repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hypercontractivity | hypercontractivity-master/HC_estimator.py | #Copyright Weihao Gao, UIUC
from math import log,pi,exp,sqrt
import numpy.random as nr
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
#Main Usage Function
def HC(x, y, bandwidth=1.06, n_trial = 10, n_iter=100, sigma = 0.1, eta = 0.1):
'''
Estimating Hypercontractivity s(X;Y) from samples (x_i, y_i)
See section 3 of arxiv.org/abs/1709.03XXX for details
Input:
--x: 2D array of size N*d_x (or 1D list of size N if d_x = 1)
--y: 2D array of size N*d_y (or 1D list of size N if d_y = 1)
Output:
--One scalar s(X;Y)
Parameters:
--bandwidth: constant in the rule-of-thumb bandwidth selection,
i.e., bw = bandwidth*std(data)*n**(-1/(d+4))
--n_trial: number of initializaations to try to achieve the global maximum
--n_iter: number of iteration in gradient descent
--delta: Initialization of gradient is 1+N(0,sigma**2)
--eta: step size in gradient descent
'''
assert len(x)==len(y), "Lists should have same length"
n = len(x)
if x.ndim == 1:
x = x.reshape((n,1))
dx = len(x[0])
if y.ndim == 1:
y = y.reshape((n,1))
dy = len(y[0])
# Compute the bandwidth for KDE using rule-of-thumb bandwidth selection
bw_x = bandwidth*np.std(x)*n**(-1.0/(dx+4))
bw_y = bandwidth*np.std(y)*n**(-1.0/(dy+4))
# Get the matrix A(j,i) = P_{XY}(x_i,y_j)/P_X(x_i)P_Y(y_j) using KDE
# and normalize it such that A is doubly stochastic
A = get_PMI(x,y,bw_x,bw_y)
A = doubly_stochastic_normalize(A)
# Using gradient descent to solve the optimization problem
# Try n_trial different initialization and take the max
s_xy = np.zeros(n_trial)
for T in range(n_trial):
weight = (np.ones(n) + sigma*nr.normal(0,1,n)).clip(1e-8,np.sqrt(n))
weight = weight/np.mean(weight)
for i in range(n_iter):
obj, grad = compute(A, weight)
weight += eta*np.sqrt(n)*grad
weight = weight.clip(1e-8,np.sqrt(n))
weight = weight/np.mean(weight)
s_xy[T] = exp(obj)
return max(s_xy)
#Compute the matrix A(j,i) = P_{XY}(x_i,y_j)/P_X(x_i)P_Y(y_j) using KDE
def get_PMI(x,y,bw_x,bw_y):
n = len(x)
Wx, Wy = np.zeros((n,n)), np.zeros((n,n))
for i in range(n):
for j in range(n):
Wx[i][j] = exp(-la.norm(x[i]-x[j])**2/(2*bw_x**2))
Wy[i][j] = exp(-la.norm(y[i]-y[j])**2/(2*bw_y**2))
Wx[i] = Wx[i]/sum(Wx[i])
Wy[i] = Wy[i]/sum(Wy[i])
A = np.dot(Wy, Wx.transpose())
return A
# Doubly Stochastic normalization of a matrix
def doubly_stochastic_normalize(X):
n = len(X)
return X + (1.0/n+sum(sum(X))/n**2)*np.ones((n,n)) - (np.dot(X,np.ones((n,n))) + np.dot(np.ones((n,n)),X))/n
def f(x):
return x*log(x)
def g(x):
return 1+log(x)
#Evaluate objective function and gradient given A and w
def compute(A, w):
n = len(w)
v = np.dot(A,w).clip(1e-8,np.sqrt(n))
Dx, Dy = sum(map(f,w)), sum(map(f,v))
obj = log(Dy) - log(Dx)
grad = np.dot(A.transpose(),map(g,v))
grad = grad/Dy - map(g,w)/Dx
return obj, (grad-np.mean(grad))/la.norm(grad-np.mean(grad))
| 2,959 | 27.190476 | 109 | py |
hypercontractivity | hypercontractivity-master/demo.py | import numpy.random as nr
import HC_estimator as hce
def main():
sample_size = 100
x_g = nr.uniform(0,1,[sample_size,1])
y_g = nr.uniform(0,1,[sample_size,1])
print('*'*100)
print('bandwidth = 0.53')
print('uncorrelated HC:', hce.HC(x_g,y_g,0.53))
print('correlated HC', hce.HC(x_g,x_g,0.53))
print('*'*100)
print('bandwidth = 1.06')
print('uncorrelated HC:', hce.HC(x_g,y_g,1.06))
print('correlated HC:', hce.HC(x_g,x_g,1.06))
if __name__ == '__main__':
main()
| 476 | 24.105263 | 48 | py |
deep_bingham | deep_bingham-master/generate_lookup_table.py | """
Generates the lookup table for the Binghasm normalization constant.
"""
from __future__ import print_function
import numpy as np
import time
import utils
def generate_bd_lookup_table():
coords = np.linspace(-500, 0, 40)
duration = time.time()
utils.build_bd_lookup_table(
"uniform", {"coords": coords, "bounds": (-500, 0), "num_points": 40},
"precomputed/lookup_-500_0_40.dill")
duration = time.time() - duration
print('lookup table function took %0.3f ms' % (duration * 1000.0))
if __name__ == "__main__":
generate_bd_lookup_table()
| 586 | 22.48 | 77 | py |
deep_bingham | deep_bingham-master/evaluate.py | import argparse
import os
import torch
import torchvision.transforms as transforms
import yaml
import data_loaders
import modules.network
from modules import angular_loss, BinghamFixedDispersionLoss, \
BinghamHybridLoss, BinghamLoss, BinghamMixtureLoss, \
CosineLoss, MSELoss, VonMisesLoss, VonMisesFixedKappaLoss
from utils.evaluation import run_evaluation
DEFAULT_CONFIG = os.path.dirname(__file__) + "configs/upna_train.yaml"
LOSS_FUNCTIONS = {'mse': MSELoss,
'bingham': BinghamLoss,
'bingham_mdn': BinghamMixtureLoss,
'von_mises': VonMisesLoss,
'cosine': CosineLoss}
def get_dataset(config):
"""Returns the test data using the provided configuration"""
data_loader = config["data_loader"]
size = data_loader["input_size"]
data_transforms = transforms.Compose([transforms.CenterCrop(600),
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
data_transforms_idiap = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if data_loader["name"] == "UPNAHeadPose":
dataset = data_loaders.UpnaHeadPoseTrainTest(
data_loader["config"], data_transforms)
test_dataset = dataset.test
elif data_loader["name"] == "T_Less":
dataset = data_loaders.TLessTrainTest(data_loader["config"],
data_transforms_idiap)
test_dataset = dataset.test
else:
dataset = data_loaders.IDIAPTrainTest(
data_loader["config"], data_transforms_idiap)
test_dataset = dataset.test
return test_dataset
def get_data_loader(dataset, batch_size):
"""Return a data loader"""
dataset = get_dataset(dataset)
test_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False)
return test_loader
def main():
"""Loads arguments and starts testing."""
parser = argparse.ArgumentParser(
description="Deep Orientation Estimation")
parser.add_argument('-c', '--config', default=DEFAULT_CONFIG, type=str)
args = parser.parse_args()
config_file = args.config
# Load config
assert os.path.exists(args.config), "Config file {} does not exist".format(
args.config)
with open(config_file) as fp:
config = yaml.load(fp)
if "loss_parameters" in config["test"]:
loss_parameters = config["test"]["loss_parameters"]
else:
loss_parameters = None
device = torch.device(config["test"][
"device"] if torch.cuda.is_available() else "cpu")
print("Using device: {}".format(device))
num_classes = config["test"]["num_outputs"]
# Build model architecture
num_channels = config["test"]["num_channels"]
model_name = config["test"]["model"]
model = modules.network.get_model(name=model_name,
pretrained=True,
num_channels=num_channels,
num_classes=num_classes)
model.to(device)
print("Model name: {}".format(model_name))
model_path = config["test"]["model_path"]
if os.path.isfile(model_path):
print("Loading model {}".format(model_path))
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint["state_dict"])
else:
assert "model not found"
# Get data loader
batch_size = 32
test_loader = get_data_loader(config, batch_size)
loss_function_name = config["test"]["loss_function"]
dataset_name = config["data_loader"]["name"]
if loss_parameters:
criterion = LOSS_FUNCTIONS[loss_function_name](**loss_parameters)
else:
criterion = LOSS_FUNCTIONS[loss_function_name]()
if "floating_point_type" in config["test"]:
floating_point_type = config["test"]["floating_point_type"]
else:
floating_point_type = "float"
if floating_point_type == "double":
model.double()
run_evaluation(
model, test_loader, criterion,
device, floating_point_type
)
if __name__ == '__main__':
main()
| 4,542 | 30.769231 | 79 | py |
deep_bingham | deep_bingham-master/bingham_distribution.py | """
Bingham Distribution
This module implements the Bingham distribution as it was proposed in:
Christopher Bingham, *"An Antipodally Symmetric Distribution on the Sphere"*,
Annals of Statistics 2(6), 1974
"""
import logging
import scipy.integrate as integrate
import scipy.optimize
import scipy.special
import numpy as np
import sys
class BinghamDistribution(object):
"""Implementation of the Bingham Distribution.
We represent the Bingham distribution as
.. math::
f(x) = \\exp\\left( x^\\top M Z M^\\top x \\right)\\ , \\quad x\\in S^n
The current implementation supports the 2d and 4d case of the Bingham
Distribution (i.e. n=2 and n=4).
Parameters
----------
param_m : array of shape (dim,dim)
Location and noise direction parameter matrix M of the Bingham
distribution.
param_z : array of shape (dim)
Diagonal entries of dispersion parameter matrix Z of the Bingham
distribution.
options : dict
Dictionary containing additional options that may be:
"norm_const_mode":
Mode of computing the normalization constant as described for
the mode parameter of the normalization_constant method.
"norm_const_options":
Optional normalization constant computation options for the
normalization_constant method. Only processed if norm_const_mode
is provided.
"""
# Constant indicating which dimensions are implemented.
IMPLEMENTED_DIMENSIONS = [2, 4]
def __init__(self, param_m, param_z, options=dict()):
self.assert_parameters(param_m, param_z)
self._dim = param_m.shape[0]
self._param_m = np.copy(param_m.astype(float))
self._param_z = np.copy(param_z.astype(float))
self._mode = self._param_m[:, -1]
if "norm_const_mode" in options.keys():
nc_options = options["norm_const_options"] \
if "norm_const_options" in options.keys() else dict()
self._norm_const = self.normalization_constant(
param_z, mode=options["norm_const_mode"], options=nc_options)
else:
self._norm_const = self.normalization_constant(param_z)
self._norm_const_deriv \
= BinghamDistribution.normalization_constant_deriv(self._param_z)
self._logger = logging.getLogger(__name__)
##############
# Properties #
##############
@property
def dim(self):
return self._dim
@property
def m(self):
return self._param_m
@property
def mode(self):
return self._mode
@property
def norm_const(self):
return self._norm_const
@property
def norm_const_deriv(self):
return self._norm_const_deriv
@property
def z(self):
return self._param_z
##################
# Public Methods #
##################
def is_almost_equal(self, other):
return (np.allclose(self._param_m, other.m) and
np.allclose(self._param_z, other.z))
def pdf(self, data):
"""PDF of the Bingham Distribution
Parameters
----------
data : array of shape(n_points, dim)
The samples at which the density is evaluated.
Returns
-------
density : array of shape (n_points),
Value of the pdf evaluated at each data point.
"""
assert isinstance(data, np.ndarray), \
"Samples need bo be of type numpy.ndarray."
if len(data.shape) == 1:
data = np.array([data])
assert len(data.shape) == 2 and data.shape[1] == self._dim, \
"Sample dimension does not agree with distribution dimension."
# Here, the Bingham distribution parametrization we use is
# f(x) \propto exp(x^T M Z M^T x)
full_param_matrix = \
np.dot(self._param_m, np.dot(np.diag(self._param_z),
self._param_m.transpose()))
# This can be later vectorized for speed-up
num_data_points = np.shape(data)[0]
density = np.zeros(num_data_points)
for i in range(0, num_data_points):
density[i] = np.exp(
np.dot(data[i], np.dot(full_param_matrix, data[i])))
density = density / self._norm_const
return density
def random_samples(self, n):
"""Generates Bingham random samples.
The random sampling uses a rejection method that was originally
proposed in
J. T. Kent, A. M. Ganeiber, K. V. Mardia, "A New Method to Simulate
the Bingham and Related Distributions in Directional Data Analysis
with Applications", arXiv preprint arXiv:1310.8110, 2013.
Parameters
----------
n : integer
Number of random samples.
Returns
-------
samples : array of shape (n_points, dim)
Array with random samples.
"""
samples = np.zeros([n, self._dim])
a = -np.dot(
self._param_m, np.dot(np.diag(self._param_z), self._param_m.T))
b = scipy.optimize.fsolve(
lambda x: np.sum(1. / (x - 2. * self._param_z)) - 1,
1.0
)[0]
self._logger.debug("b=%g", b)
omega = np.eye(self._dim) + 2. * a / b
mbstar = np.exp(-(self._dim - b) / 2.) \
* (self._dim / b)**(self._dim / 2.)
def fb_likelihood(x):
return np.exp(np.dot(-x, np.dot(a, x.T)))
def acg_likelihood(x):
return np.dot(x, np.dot(omega, x.T)) ** (-self._dim / 2.)
current_sample = 0
while current_sample < n:
candidate = np.random.multivariate_normal(
np.zeros(self._dim), np.linalg.inv(omega), 1)
candidate = candidate / np.linalg.norm(candidate)
w = np.random.uniform()
if w < fb_likelihood(candidate) / (mbstar *
acg_likelihood(candidate)):
samples[current_sample] = candidate
current_sample += 1
return samples
def second_moment(self):
"""Calculate covariance matrix of Bingham distribution.
Returns
-------
s (d x d matrix): scatter/covariance matrix in R^d
"""
nc_deriv_ratio = np.diag(self._norm_const_deriv / self._norm_const)
# The diagonal of D is always 1, however this may not be the
# case because dF and F are calculated using approximations
nc_deriv_ratio = nc_deriv_ratio / sum(np.diag(nc_deriv_ratio))
s = np.dot(self._param_m,
np.dot(nc_deriv_ratio, self._param_m.transpose()))
s = (s + s.transpose()) / 2 # enforce symmetry
return s
##################
# Static Methods #
##################
@staticmethod
def multiply(b1, b2):
"""Computes the product of two Bingham pdfs
This method makes use of the fact that the Bingham distribution
is closed under Bayesian inference. Thus, the product of two
Bingham pdfs is itself the pdf of a Bingham distribution. This
method computes the parameters of the resulting distribution.
Parameters
----------
b1 : BinghamDistribution
First Bingham Distribution.
b2 : BinghamDistribution
Second Bingham Distribution.
Returns
-------
B : BinghamDistribution
Bingham distribution representing this*B2 (after
renormalization).
"""
assert isinstance(b2, BinghamDistribution), \
"Second argument needs to be of type BinghamDistribution"
assert b1.dim == b2.dim, \
"Dimensions do not match"
# new exponent
c = np.add(np.dot(b1.m,
np.dot(np.diag(b1.z),
b1.m.transpose())),
np.dot(b2.m, np.dot(np.diag(b2.z),
b2.m.transpose())))
# Ensure symmetry of c, asymmetry may arise as a consequence of a
# numerical instability earlier.
c = 0.5 * np.add(c, c.transpose())
eigvalues, eigvectors = np.linalg.eig(c) # Eigenvalue decomposition
eigvalues = eigvalues[::-1]
indx = eigvalues.argsort()
z_param = eigvalues[indx]
# Rows and columns are swapped in numpy's eig
m_param = eigvectors.transpose()[indx]
z_param = z_param - z_param[-1] # last entry should be zero
return BinghamDistribution(m_param, z_param)
@staticmethod
def compose(b1, b2):
"""Compose two Bingham distributions.
Using Moment Matching based approximation, we compose two Bingham
distributions. The mode of the new distribution should be the
quaternion multiplication of the original modes; the uncertainty
should be larger than before
Parameters
----------
b1 : BinghamDistribution
First Bingham Distribution.
b2 : BinghamDistribution
Second Bingham Distribution.
Returns
-------
b : BinghamDistribution
Bingham distribution representing the convolution
"""
assert b1.dim == b2.dim, \
"Dimensions not equal"
assert b1.dim == 2 or b1.dim == 4, \
"Unsupported dimension"
b1s = b1.second_moment()
b2s = b2.second_moment()
if b1.dim == 2:
# for complex numbers
# derived from complex multiplication
# Gerhard Kurz, Igor Gilitschenski, Simon Julier, Uwe D. Hanebeck,
# Recursive Bingham Filter for Directional Estimation Involving 180
# Degree Symmetry Journal of Advances in Information Fusion,
# 9(2):90 - 105, December 2014.
a11 = b1s[0, 0]
a12 = b1s[0, 1]
a22 = b1s[1, 1]
b11 = b2s[0, 0]
b12 = b2s[0, 1]
b22 = b2s[1, 1]
s11 = a11 * b11 - 2 * a12 * b12 + a22 * b22
s12 = a11 * b12 - a22 * b12 - a12 * b22 + a12 * b11
s21 = s12
s22 = a11 * b22 + 2 * a12 * b12 + a22 * b11
s = np.array([[s11, s12], [s21, s22]])
return BinghamDistribution.fit_to_moment(s)
else:
# adapted from Glover's C code in libBingham, see also
# Glover, J. & Kaelbling, L.P. Tracking 3 - D Rotations with
# the Quaternion Bingham Filter MIT, 2013
a11 = b1s[0, 0]
a12 = b1s[0, 1]
a13 = b1s[0, 2]
a14 = b1s[0, 3]
a22 = b1s[1, 1]
a23 = b1s[1, 2]
a24 = b1s[1, 3]
a33 = b1s[2, 2]
a34 = b1s[2, 3]
a44 = b1s[3, 3]
b11 = b2s[0, 0]
b12 = b2s[0, 1]
b13 = b2s[0, 2]
b14 = b2s[0, 3]
b22 = b2s[1, 1]
b23 = b2s[1, 2]
b24 = b2s[1, 3]
b33 = b2s[2, 2]
b34 = b2s[2, 3]
b44 = b2s[3, 3]
# can be derived from quaternion multiplication
s11 = \
a11*b11 - 2*a12*b12 - 2*a13*b13 - 2*a14*b14 + a22*b22 + \
2*a23*b23 + 2*a24*b24 + a33*b33 + 2*a34*b34 + a44*b44
s12 = \
a11*b12 + a12*b11 + a13*b14 - a14*b13 - a12*b22 - a22*b12 - \
a13*b23 - a23*b13 - a14*b24 - a24*b14 - a23*b24 + a24*b23 - \
a33*b34 + a34*b33 - a34*b44 + a44*b34
s21 = s12
s13 = \
a11*b13 + a13*b11 - a12*b14 + a14*b12 - a12*b23 - a23*b12 - \
a13*b33 + a22*b24 - a24*b22 - a33*b13 - a14*b34 - a34*b14 + \
a23*b34 - a34*b23 + a24*b44 - a44*b24
s31 = s13
s14 = \
a11*b14 + a12*b13 - a13*b12 + a14*b11 - a12*b24 - a24*b12 - \
a22*b23 + a23*b22 - a13*b34 - a34*b13 - a23*b33 + a33*b23 - \
a14*b44 - a24*b34 + a34*b24 - a44*b14
s41 = s14
s22 = \
2*a12*b12 + a11*b22 + a22*b11 + 2*a13*b24 - 2*a14*b23 + \
2*a23*b14 - 2*a24*b13 - 2*a34*b34 + a33*b44 + a44*b33
s23 = \
a12*b13 + a13*b12 + a11*b23 + a23*b11 - a12*b24 + a14*b22 - \
a22*b14 + a24*b12 + a13*b34 - a14*b33 + a33*b14 - a34*b13 + \
a24*b34 + a34*b24 - a23*b44 - a44*b23
s32 = s23
s24 = \
a12*b14 + a14*b12 + a11*b24 + a12*b23 - a13*b22 + a22*b13 - \
a23*b12 + a24*b11 - a14*b34 + a34*b14 + a13*b44 + a23*b34 - \
a24*b33 - a33*b24 + a34*b23 - a44*b13
s42 = s24
s33 = \
2*a13*b13 + 2*a14*b23 - 2*a23*b14 + a11*b33 + a33*b11 - \
2*a12*b34 + 2*a34*b12 - 2*a24*b24 + a22*b44 + a44*b22
s34 = \
a13*b14 + a14*b13 - a13*b23 + a23*b13 + a14*b24 - a24*b14 + \
a11*b34 + a12*b33 - a33*b12 + a34*b11 + a23*b24 + a24*b23 - \
a12*b44 - a22*b34 - a34*b22 + a44*b12
s43 = s34
s44 = \
2*a14*b14 - 2*a13*b24 + 2*a24*b13 + 2*a12*b34 - 2*a23*b23 - \
2*a34*b12 + a11*b44 + a22*b33 + a33*b22 + a44*b11
s = np.array([[s11, s12, s13, s14],
[s21, s22, s23, s24],
[s31, s32, s33, s34],
[s41, s42, s43, s44]])
return BinghamDistribution.fit_to_moment(s)
@staticmethod
def assert_parameters(param_m, param_z):
"""Asserts param_m and param_z to satisfy requirements of the Bingham"""
assert isinstance(param_m, np.ndarray), \
"m needs to be of type numpy.ndarray."
assert isinstance(param_z, np.ndarray), \
"z needs to be of type numpy.ndarray."
dist_dim = param_m.shape[0]
assert dist_dim in BinghamDistribution.IMPLEMENTED_DIMENSIONS, \
"Not supported distribution dimension."
# Currently we support only 2d Bingham distribution.
assert param_m.shape == (dist_dim, dist_dim), \
"m needs to be a square Matrix."
assert param_z.shape == (dist_dim, ), \
"z needs to be a vector and dimension needs to agree with m."
# TODO: Get rid of these 2 asseritons by using properties for getting
# and setting the location parameter m and the dispersion parameter z.
assert param_z[-1] == 0., "Last entry of z needs to be 0."
assert all(param_z[:-1] <= param_z[1:]), \
"Entries of z need to be given in an ascending order."
# Check for orthogonality of m.
numerical_tolerance = 1e-10
product = np.dot(param_m, param_m.T)
diff = product - np.eye(dist_dim)
assert np.all(np.abs(diff) < numerical_tolerance), \
"param_m is not orthogonal."
@staticmethod
def decompose_parameters(param_matrix, correct_eigenvalues=True):
"""Decomposes a parameter matrix into location and dispersion part.
The entire parameter matrix M*Z*M^T is decomposed into M and Z, where Z
is a diagonal matrix returned as a vector.
Parameters
----------
param_matrix : array of shape(n_dim, n_dim)
Original full parameter matrix.
correct_eigenvalues : boolean
Sets largest eigenvalue to 0 if true by subtracting the largest
eigenvalue from Z (default).
"""
(bingham_dispersion, bingham_location) = np.linalg.eig(param_matrix)
eigval_order = np.argsort(bingham_dispersion)
bingham_location = bingham_location[:, eigval_order]
bingham_dispersion = bingham_dispersion[eigval_order]
offset = 0.0
if correct_eigenvalues:
offset = bingham_dispersion[-1]
bingham_dispersion = bingham_dispersion - offset
return bingham_location, bingham_dispersion, offset
@staticmethod
def fit(data):
"""Fits a bingham distribution to given data.
The implemented fitting procedure is based on the method of moments,
i.e. we compute the empirical second moment of the data and numerically
obtain the corresponding Bingham distribution parameters.
Parameters
----------
data : array of shape(n_points, 2)
The samples at which the density is evaluated.
Returns
-------
result : Bingham distribution object
"""
assert isinstance(data, np.ndarray), \
"data needs to be a np.ndarray"
bd_dim = data.shape[1]
assert bd_dim in BinghamDistribution.IMPLEMENTED_DIMENSIONS, \
"Not supported Bingham distribution dimensionality."
n_samples = data.shape[0]
second_moment = np.dot(data.T, data)/n_samples
return BinghamDistribution.fit_to_moment(second_moment)
@staticmethod
def fit_to_moment(second_moment):
"""Finds a Bingham distribution with a given second moment.
Parameters
----------
second_moment : (d x d matrix)
matrix representing second moment.
Returns
-------
b : BinghamDistribution
the MLE estimate for a Bingham distribution given the
scatter matrix S
"""
assert np.allclose(second_moment, second_moment.transpose()), \
"second moment must be symmetric"
bd_dim = second_moment.shape[1]
(moment_eigval, bingham_location) = np.linalg.eig(second_moment)
# Sort eigenvalues (and corresponding eigenvectors) in asc. order.
eigval_order = np.argsort(moment_eigval)
bingham_location = bingham_location[:, eigval_order]
moment_eigval = moment_eigval[eigval_order]
logger = logging.getLogger(__name__)
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("second_moment=\n%s", second_moment)
logger.debug("moment_eigval=%s", moment_eigval)
logger.debug("eigval_order=%s", eigval_order)
logger.debug("bingham_location=\n%s", bingham_location)
def mle_goal_fun(z, rhs):
"""Goal function for MLE optimizer."""
z_param = np.append(z, 0)
norm_const = BinghamDistribution.normalization_constant(z_param)
norm_const_deriv \
= BinghamDistribution.normalization_constant_deriv(z_param)
res = (norm_const_deriv[0:(bd_dim-1)] / norm_const) \
- rhs[0:(bd_dim-1)]
return res
bingham_dispersion = scipy.optimize.fsolve(
lambda x: mle_goal_fun(x, moment_eigval), np.ones([(bd_dim-1)]))
bingham_dispersion = np.append(bingham_dispersion, 0)
bingham_dist = BinghamDistribution(bingham_location, bingham_dispersion)
# Remove this bloat code.
return bingham_dist
@staticmethod
def normalization_constant(param_z, mode="default", options=dict()):
"""Computes the Bingham normalization constant.
Parameters
----------
param_z : array of shape (dim)
Diagonal entries of dispersion parameter matrix Z of the Bingham
distribution.
mode : string
Method of computation (optional).
options : dict
Computation-method specific options.
"""
# Gerhard Kurz, Igor Gilitschenski, Simon Julier, Uwe D. Hanebeck,
# "Recursive Bingham Filter for Directional Estimation Involving 180
# Degree Symmetry", Journal of Advances in Information
# Fusion, 9(2):90 - 105, December 2014.
bd_dim = param_z.shape[0]
assert bd_dim in BinghamDistribution.IMPLEMENTED_DIMENSIONS \
and param_z.ndim == 1, \
"param_z needs to be a vector of supported dimension."
# TODO Check structure of Z
if bd_dim == 2:
if mode == "default" or mode == "bessel":
# Surface area of the unit sphere is a factor in the
# normalization constant. The formula is taken from
# https://en.wikipedia.org/wiki/N-sphere#Volume_and_surface_area
sphere_surface_area = 2.0 * (np.pi**(bd_dim / 2.0) /
scipy.special.gamma(bd_dim / 2.0))
norm_const = (np.exp(param_z[1]) * sphere_surface_area *
scipy.special.iv(
0, (param_z[0] - param_z[1]) / 2.0)
* np.exp((param_z[0] - param_z[1]) / 2.0))
return norm_const
elif bd_dim == 4:
if mode == "default" or mode == "saddlepoint":
f = BinghamDistribution.__norm_const_saddlepoint(
np.sort(-param_z)+1)
f *= np.exp(1)
return f[2]
elif mode == "numerical":
param_z_diag = np.diag(param_z)
def bd_likelihood(x):
return np.exp(np.dot(x, np.dot(param_z_diag, x)))
def integrand(phi1, phi2, phi3):
sp1 = np.sin(phi1)
sp2 = np.sin(phi2)
return bd_likelihood(np.array([
sp1 * sp2 * np.sin(phi3),
sp1 * sp2 * np.cos(phi3),
sp1 * np.cos(phi2),
np.cos(phi1)
])) * (sp1 ** 2.) * sp2
norm_const = integrate.tplquad(
integrand,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi, # phi1
**options
)
return norm_const[0]
sys.exit("Invalid computation mode / dimension combination.")
@staticmethod
def normalization_constant_deriv(param_z, mode="default"):
"""Computes the derivatives (w.r.t. Z) of the normalization constant.
Parameters
----------
param_z : array of shape (dim)
Diagonal entries of dispersion parameter matrix Z of the Bingham
distribution.
mode : string
Method of computation (optional).
"""
bd_dim = param_z.shape[0]
assert bd_dim in BinghamDistribution.IMPLEMENTED_DIMENSIONS \
and param_z.ndim == 1, \
"param_z needs to be a vector of supported dimension."
derivatives = np.zeros(bd_dim)
if bd_dim == 2 and mode == "default":
derivatives = np.zeros(2)
z_param_diff = (param_z[0] - param_z[1]) / 2.0
z_param_mean = (param_z[0] + param_z[1]) / 2.0
b1 = scipy.special.iv(1, z_param_diff)
b0 = scipy.special.iv(0, z_param_diff)
derivatives[0] = np.pi * np.exp(z_param_mean) * (b1 + b0)
derivatives[1] = np.pi * np.exp(z_param_mean) * (-b1 + b0)
elif bd_dim == 4 and mode == "quad":
def bd_deriv_likelihood(x, j):
return x[j]**2 * np.exp(np.dot(x, np.dot(np.diag(param_z), x)))
for i in range(0, bd_dim):
derivatives[i] = integrate.tplquad(
lambda phi1, phi2, phi3:
bd_deriv_likelihood(np.flip(np.array([
np.cos(phi1),
np.sin(phi1) * np.cos(phi2),
np.sin(phi1) * np.sin(phi2) * np.cos(phi3),
np.sin(phi1) * np.sin(phi2) * np.sin(phi3),
])), i) * (np.sin(phi1) ** 2.) * np.sin(phi2),
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi # phi1
)[0]
else:
if mode == "default" or mode == "saddlepoint":
derivatives = np.zeros(bd_dim)
for i in range(0, bd_dim):
modz = np.concatenate((param_z[0:i + 1],
np.array([param_z[i]]),
param_z[i:bd_dim + 1]))
t = BinghamDistribution.__norm_const_saddlepoint(
np.sort(-modz) + 1)
t *= np.exp(1) / (2 * np.pi)
derivatives[i] = t[2]
else:
sys.exit("No such computation mode.")
return derivatives
##########################
# Private Static Methods #
##########################
@staticmethod
def __xi2cgfderiv(t, dim, la, derriv):
"""Calculates first 4 derivatives of the cumulant generating function"""
res = [0] * 4
for i in range(dim):
if i == derriv:
scale = 3.0
else:
scale = 1.0
res[0] += scale*0.5/(la[i]-t)
res[1] += scale*0.5/((la[i]-t)*(la[i]-t))
res[2] += scale*1/((la[i]-t)*(la[i]-t)*(la[i]-t))
res[3] += scale*3/((la[i]-t)*(la[i]-t)*(la[i]-t)*(la[i]-t))
return res
@staticmethod
def __find_root_newton(dim, la, min_el):
"""Root finding algorithm using Newton's Method"""
prec = 1E-10 # Precision
x = min_el - 0.5 # Upper bound for initial evaluation point
i = 0
while True:
val = BinghamDistribution.__xi2cgfderiv(x, dim, la, -1)
val[0] -= 1
x += -val[0] / val[1]
i += 1
if not ((val[0] > prec or val[0] < -prec) and i < 1000):
break
return x
@staticmethod
def __find_multiple_roots_newton(dim, la, min_el):
"""Multiple roots finding algorithm using Newton's Method"""
prec = 1E-10
ubound = min_el - 0.5
retval = [ubound] * (dim + 1) # set starting value of Newton method
i = 0
while True:
err = 0
# Iterate over the Norm const and each partial derivative
for j in range(dim + 1):
v0 = 0
v1 = 0
for k in range(dim):
if k != j - 1:
v0 += 0.5 / (la[k] - retval[j])
v1 += 0.5 / ((la[k] - retval[j]) * (la[k]-retval[j]))
else:
v0 += 3 * 0.5/(la[k] - retval[j])
v1 += 3 * 0.5/((la[k] - retval[j]) * (la[k]-retval[j]))
v0 -= 1 # because we want to solve K(t)=1
err += abs(v0)
retval[j] += -v0 / v1 # Newton iteration
i += 1
if not (err > prec and i < 1000):
break
return retval
@staticmethod
def __norm_const_saddlepoint(eigval, deriv=False):
""" Saddlepoint based approximation of the normalization constant. """
assert isinstance(eigval, np.ndarray), \
"input needs to be of type numpy.ndarray."
assert eigval.ndim == 1, \
"input needs to be a vector"
dim = eigval.shape[0]
min_el = np.amin(eigval)
result = np.zeros(3)
derivatives = {}
la = eigval
scale_factor = 1.0
if min_el <= 0:
la = eigval - (min_el - 0.1)
scale_factor = np.exp(-min_el + 0.1)
min_el = 0.1
if deriv:
r = BinghamDistribution.__find_multiple_roots_newton(
dim, la, min_el)
hk = BinghamDistribution.__xi2cgfderiv(r[0], dim, la, -1)
t = (1.0 / 8 * (hk[3] / (hk[1] * hk[1])) - 5.0 / 24 *
(hk[2] * hk[2] / (hk[1] * hk[1] * hk[1])))
result[0] = (np.sqrt(2 * pow(np.pi, dim - 1)) * np.exp(-r[0]) /
np.sqrt(hk[1]) * scale_factor)
for i in range(dim):
result[0] /= np.sqrt(la[i] - r[0])
result[1] = result[0] * (1 + t)
result[2] = result[0] * np.exp(t)
for i in range(dim):
hk = BinghamDistribution.__xi2cgfderiv(r[i + 1], dim, la, i)
t = (1.0 / 8 * (hk[3] / (hk[1] * hk[1])) - 5.0 / 24 *
(hk[2] * hk[2] / (hk[1] * hk[1] * hk[1])))
derivatives[3*i] = (np.sqrt(2*pow(np.pi, dim+1))*np.exp(-r[i+1])
/ (np.sqrt(hk[1]) * 2 * np.pi) *
scale_factor)
for j in range(dim):
if j != i:
derivatives[3 * i] /= np.sqrt(la[j] - r[i + 1])
else:
derivatives[3 * i] /= pow(np.sqrt(la[j] - r[i + 1]), 3)
derivatives[3 * i + 1] = derivatives[3 * i] * (1 + t)
derivatives[3 * i + 2] = derivatives[3 * i] * np.exp(t)
return result, derivatives
else:
r = BinghamDistribution.__find_root_newton(dim, la, min_el)
hk = BinghamDistribution.__xi2cgfderiv(r, dim, la, -1)
t = (1.0 / 8 * (hk[3] / (hk[1] * hk[1])) - 5.0 / 24 *
(hk[2] * hk[2] / (hk[1] * hk[1] * hk[1])))
result[0] = (np.sqrt(2 * pow(np.pi, dim - 1)) * np.exp(-r) /
np.sqrt(hk[1]) * scale_factor)
for i in range(dim):
result[0] /= np.sqrt(la[i] - r)
result[1] = result[0] * (1 + t)
result[2] = result[0] * np.exp(t)
return result
| 29,792 | 36.102117 | 80 | py |
deep_bingham | deep_bingham-master/train.py | """
Deep Orientation Estimation Training
"""
import argparse
import os
import sys
import torch
import torch.optim as optim
import torchvision.transforms as transforms
import yaml
from tensorboardX import SummaryWriter
import data_loaders
import modules.network
from modules import BinghamLoss, BinghamMixtureLoss, \
VonMisesLoss, MSELoss, CosineLoss
from training import Trainer
torch.manual_seed(0)
DEFAULT_CONFIG = os.path.dirname(__file__) + "configs/upna_train.yaml"
LOSS_FUNCTIONS = {'mse': MSELoss,
'bingham': BinghamLoss,
'bingham_mdn': BinghamMixtureLoss,
'von_mises': VonMisesLoss,
'cosine': CosineLoss}
def get_dataset(config):
""" Returns the training data using the provided configuration."""
data_loader = config["data_loader"]
size = data_loader["input_size"]
data_transforms = transforms.Compose([
transforms.CenterCrop(600),
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
data_transforms_idiap = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
data_transforms_depth = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
if data_loader["name"] == "UPNAHeadPose":
dataset = data_loaders.UpnaHeadPoseTrainTest(
data_loader["config"], data_transforms)
train_dataset = dataset.train
elif data_loader["name"] == "IDIAP":
dataset = data_loaders.IDIAPTrainTest(data_loader["config"],
data_transforms_idiap)
train_dataset = dataset.train
elif data_loader["name"] == "T_Less":
dataset = data_loaders.TLessTrainTest(
data_loader["config"], data_transforms_idiap)
train_dataset = dataset.train
else:
sys.exit("Unknown data loader " + config['data_loader']["name"] + ".")
training_size = int(len(train_dataset) * 0.90)
val_size = len(train_dataset) - training_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [training_size, val_size])
return train_dataset, val_dataset
def main():
""" Loads arguments and starts training."""
parser = argparse.ArgumentParser(description="Deep Orientation Estimation")
parser.add_argument('-c', '--config', default=DEFAULT_CONFIG, type=str)
args = parser.parse_args()
config_file = args.config
# Load config
assert os.path.exists(args.config), "Config file {} does not exist".format(
args.config)
with open(config_file) as fp:
config = yaml.load(fp)
if not os.path.exists(config["train"]["save_dir"]):
os.makedirs(config["train"]["save_dir"])
device = torch.device(
config["train"]["device"] if torch.cuda.is_available() else "cpu")
print("Using device: {}".format(device))
# Build model architecture
num_channels = config["train"]["num_channels"] or 3
model_name = config["train"]["model"] or 'vgg11'
num_classes = config["train"].get("num_outputs", None)
model = modules.network.get_model(name=model_name,
pretrained=True,
num_channels=num_channels,
num_classes=num_classes)
model.to(device)
print("Model name: {}".format(model_name))
# optionally resume from checkpoint
resume = config["train"]["resume"]
if resume:
if os.path.isfile(resume):
print("Loading checkpoint {}".format(resume))
checkpoint = torch.load(resume)
start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["state_dict"])
else:
start_epoch = 0
print("No checkpoint found at {}".format(resume))
else:
start_epoch = 0
# Get dataset
train_dataset, test_dataset = get_dataset(config)
b_size = config["train"]["batch_size"] or 4
# This should not be necessary but it surprisingly is. In the presence of a
# GPU, PyTorch tries to allocate GPU memory when pin_memory is set to true
# in the data loader. This happens even if training is to happen on CPU and
# all objects are on CPU.
if config["train"]["device"] != "cpu":
use_memory_pinning = True
else:
use_memory_pinning = False
validationloader = torch.utils.data.DataLoader(
test_dataset, batch_size=b_size, shuffle=True, num_workers=1,
pin_memory=use_memory_pinning)
trainloader = torch.utils.data.DataLoader(
train_dataset, batch_size=b_size, shuffle=True, num_workers=1,
pin_memory=use_memory_pinning)
print("batch size: {}".format(b_size))
# Define loss function (criterion) and optimizer
learning_rate = config["train"]["learning_rate"] or 0.0001
loss_function_name = config["train"]["loss_function"]
if "loss_parameters" in config["train"]:
loss_parameters = config["train"]["loss_parameters"]
else:
loss_parameters = None
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
print(optimizer)
# Set up tensorboard writer
writer_train = SummaryWriter(
"runs/{}/training".format(config["train"]["save_as"]))
writer_val = SummaryWriter(
"runs/{}/validation".format(config["train"]["save_as"]))
# Train the network
num_epochs = config["train"]["num_epochs"] or 2
print("Number of epochs: {}".format(num_epochs))
if loss_parameters is not None:
loss_function = LOSS_FUNCTIONS[loss_function_name](**loss_parameters)
else:
loss_function = LOSS_FUNCTIONS[loss_function_name]()
if "floating_point_type" in config["train"]:
floating_point_type = config["train"]["floating_point_type"]
else:
floating_point_type = "float"
trainer = Trainer(device, floating_point_type)
for epoch in range(start_epoch, num_epochs):
trainer.train_epoch(
trainloader, model, loss_function, optimizer,
epoch, writer_train, writer_val, validationloader)
save_checkpoint(
{'epoch': epoch + 1, 'state_dict': model.state_dict()},
filename=os.path.join(config["train"]["save_dir"],
'checkpoint_{}_{}.tar'.format(
model_name, epoch))
)
print('Finished training')
def save_checkpoint(state, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
if __name__ == '__main__':
main()
| 6,909 | 32.543689 | 105 | py |
deep_bingham | deep_bingham-master/modules/maad.py | import torch
from modules.gram_schmidt import gram_schmidt, gram_schmidt_batched
from modules.quaternion_matrix import quaternion_matrix
from utils.utils import \
convert_euler_to_quaternion
from modules.vm_operations import *
import math
def angular_loss_single_sample(target, predicted):
""" Returns the angle between two quaternions.
Note that for a quaternion q, -q = q so the
angle of rotation must be less than 180 degrees.
Inputs:
target = target quaternion
predicted = predicted quaternion
"""
quat_ang = torch.clamp(torch.abs(torch.dot(target, predicted)), min=0,
max=1)
acos_val = torch.acos(quat_ang)
diff_ang = acos_val * 2
return diff_ang
def maad_mse(target, predicted):
"""
Computes the MAAD over a batch of
target, predicted quaternion pairs
Inputs
target = batch of target quaternions
predicted = batch of predicted quaternions
"""
angular_loss = 0
for i in range(target.shape[0]):
angular_loss += angular_loss_single_sample(target[i], predicted[i])
return angular_loss / target.shape[0]
def maad_cosine(target, predicted):
angular_dev = 0
for i in range(target.shape[0]):
angles = output_to_angles(predicted[i])
pan = torch.atan2(angles[1], angles[0])
tilt = torch.atan2(angles[3], angles[2])
roll = torch.atan2(angles[5], angles[4])
pan_target = target[i][0]
tilt_target = target[i][1]
roll_target = target[i][2]
target_quat = convert_euler_to_quaternion(pan_target, tilt_target,
roll_target)
predicted_quat = convert_euler_to_quaternion(math.degrees(pan),
math.degrees(tilt),
math.degrees(roll))
angular_dev += angular_loss_single_sample(torch.from_numpy(target_quat),
torch.from_numpy(
predicted_quat))
return angular_dev / target.shape[0]
def maad_biternion(target, predicted):
angular_dev = 0
for i in range(target.shape[0]):
angles, kappas = output_to_angles_and_kappas(predicted[i])
pan = torch.atan2(angles[1], angles[0])
tilt = torch.atan2(angles[3], angles[2])
roll = torch.atan2(angles[5], angles[4])
pan_target = target[i][0]
tilt_target = target[i][1]
roll_target = target[i][2]
target_quat = convert_euler_to_quaternion(pan_target, tilt_target,
roll_target)
predicted_quat = convert_euler_to_quaternion(math.degrees(pan),
math.degrees(tilt),
math.degrees(roll))
angular_dev += angular_loss_single_sample(torch.from_numpy(target_quat),
torch.from_numpy(
predicted_quat))
return angular_dev / target.shape[0]
def maad_bingham(target, predicted, orthogonalization="gram_schmidt"):
""" Computes mean absolute angular deviation between a pair of quaternions
Parameters:
predicted (torch.Tensor): Output from network of shape (N, 16) if
orthogonalization is "gram_schmidt" and (N, 4) if it is
"quaternion_matrix".
target (torch.Tensor): Ground truth of shape N x 4
orthogonalization (str): Orthogonalization method to use. Can be
"gram_schmidt" for usage of the classical gram-schmidt method.
"modified_gram_schmidt" for a more robust variant, or
"quaternion_matrix" for usage of a orthogonal matrix representation
of an output quaternion.
"""
angular_dev = 0
if orthogonalization == "gram_schmidt":
batch_size = target.shape[0]
reshaped_output = predicted.reshape(batch_size, 4, 4)
param_m = gram_schmidt_batched(reshaped_output)
for i in range(batch_size):
angular_dev += angular_loss_single_sample(
target[i], param_m[i, :, 3])
else:
for i in range(target.shape[0]):
if orthogonalization == "modified_gram_schmidt":
reshaped_output = predicted[i][-16:].reshape(4, 4)
param_m = gram_schmidt(reshaped_output, modified=True)
elif orthogonalization == "quaternion_matrix":
param_m = quaternion_matrix(predicted[i])
else:
raise ValueError("Invalid orthogonalization method.")
angular_dev += angular_loss_single_sample(target[i], param_m[:, 3])
return angular_dev / target.shape[0]
| 4,931 | 37.834646 | 80 | py |
deep_bingham | deep_bingham-master/modules/vm_operations.py | import torch
def output_to_kappas(output):
zero_vec = torch.zeros(len(output), 3)
if output.is_cuda:
device = output.get_device()
zero_vec = torch.zeros(len(output), 3).to(device)
kappas = torch.where(output[:, :3] > 0, output[:, :3], zero_vec)
return kappas
def output_to_angles(output):
pan = normalize_cosine_sine(output[:2])
tilt = normalize_cosine_sine(output[2:4])
roll = normalize_cosine_sine(output[4:])
angles = torch.cat((pan, tilt, roll), 0)
return angles
def output_to_angles_and_kappas(output):
pan = normalize_cosine_sine(output[3:5])
tilt = normalize_cosine_sine(output[5:7])
roll = normalize_cosine_sine(output[7:])
angles = torch.cat((pan, tilt, roll), 0)
zero_vec = torch.zeros(3)
if output.is_cuda:
device = output.get_device()
zero_vec = torch.zeros(3).to(device)
kappas = torch.where(output[:3] > 0, output[:3], zero_vec)
return angles, kappas
def normalize_cosine_sine(angle_tensor):
return angle_tensor / torch.sqrt(torch.sum(torch.pow(angle_tensor, 2)))
| 1,093 | 27.051282 | 75 | py |
deep_bingham | deep_bingham-master/modules/bingham_mixture_loss.py | """Implementation of the Bingham Mixture Loss"""
import torch
from .maad import angular_loss_single_sample
from .bingham_fixed_dispersion import BinghamFixedDispersionLoss
from .bingham_loss import BinghamLoss
from .gram_schmidt import gram_schmidt_batched
from utils import vec_to_bingham_z_many
class BinghamMixtureLoss(object):
""" Bingham Mixture Loss
Computes the log likelihood bingham mixture loss on a batch. Can be
configured such that for a predefined number of epochs
Arguments:
lookup_table_file (str): Path to the location of the lookup table.
mixture_component_count (int): Number of Bingham mixture components.
interpolation_kernel (str): The kernel to use for rbf interpolaition
(can be "multiquadric" or "gaussian").
fixed_dispersion_stage (int): Number of epochs in which the network is
trained using a fixed dispersion parameter z.
fixed_param_z (list): The fixed dispersion parameter Z used for all
mixture components during the fixed dispersion stage.
Inputs:
target (torch.Tensor): Target values at which the likelihood is
evaluated of shape (N, 4)
output (torch.Tensor): Output values from which M and Z are extracted of
shape (N, MIXTURE_COMPONENT_COUNT * 20). The first of the 20 values
per mixture component is for computing the weight of that component.
The remaining 19 are passed on to the BinghamLoss class.
"""
def __init__(self, lookup_table_file, mixture_component_count,
interpolation_kernel="multiquadric", fixed_dispersion_stage=25,
fixed_param_z=[-1, -1, -1, 0]):
self._num_components = mixture_component_count
self._fixed_dispersion_stage = fixed_dispersion_stage
self._softmax = torch.nn.Softmax(dim=1)
self._bingham_fixed_dispersion_loss = BinghamFixedDispersionLoss(
fixed_param_z, orthogonalization="gram_schmidt")
self._bingham_loss = BinghamLoss(
lookup_table_file, interpolation_kernel,
orthogonalization="gram_schmidt")
def __call__(self, target, output, epoch):
batch_size = output.shape[0]
weights = self._softmax(output[:, 0:-1:20])
log_likelihood = torch.tensor(0., device=output.device, dtype=output.dtype)
for i in range(batch_size):
current_likelihood = torch.tensor(
0., device=output.device, dtype=output.dtype)
for j in range(self._num_components):
if epoch < self._fixed_dispersion_stage:
bd_log_likelihood = self._bingham_fixed_dispersion_loss(
target[i].unsqueeze(0),
output[i, (j*20+4):((j+1)*20)].unsqueeze(0))[1]
else:
bd_log_likelihood = self._bingham_loss(
target[i].unsqueeze(0),
output[i, (j*20+1):((j+1)*20)].unsqueeze(0))[1]
current_likelihood += weights[i, j] * \
torch.exp(bd_log_likelihood).squeeze()
log_likelihood += torch.log(current_likelihood)
loss = -log_likelihood
log_likelihood /= batch_size
return loss, log_likelihood
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): NN output shaped as loss output parameter.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
batch_size = output.shape[0]
weights = self._softmax(output[:, 0:-1:20])
maad = torch.zeros(
batch_size, device=output.device, dtype=output.dtype)
mode_stats = dict()
for j in range(self._num_components):
bd_z = torch.mean(vec_to_bingham_z_many(
output[:, (j*20+1):(j*20+4)]
).squeeze(0), 0)
mode_stats["mode_" + str(j) + "_weight"] \
= float(torch.mean(weights[:, j]))
if epoch >= self._fixed_dispersion_stage:
mode_stats["mode_" + str(j) + "_z_0"] = float(bd_z[0])
mode_stats["mode_" + str(j) + "_z_1"] = float(bd_z[1])
mode_stats["mode_" + str(j) + "_z_2"] = float(bd_z[2])
param_m = torch.zeros((batch_size, self._num_components, 4, 4),
device=output.device, dtype=output.dtype)
for j in range(self._num_components):
param_m[:, j, :, :] = gram_schmidt_batched(
output[:, (j * 20 + 4):((j + 1) * 20)].reshape(batch_size, 4, 4)
)
# Setting mmaad to 10 such that the minimum succeeds in the first run.
mmaad = 10. * torch.ones(
batch_size, device=output.device, dtype=output.dtype)
for i in range(batch_size):
for j in range(self._num_components):
cur_angular_deviation = angular_loss_single_sample(
target[i], param_m[i, j, :, 3])
maad[i] += cur_angular_deviation * weights[i, j]
mmaad[i] = torch.min(mmaad[i], cur_angular_deviation)
maad = torch.mean(maad)
mmaad = torch.mean(mmaad)
stats = {
"maad": float(maad),
"mmaad": float(mmaad)
}
stats.update(mode_stats)
return stats
| 5,574 | 41.234848 | 83 | py |
deep_bingham | deep_bingham-master/modules/bingham_fixed_dispersion.py | import torch
from modules.gram_schmidt import gram_schmidt_batched
from modules.bingham_loss import batched_logprob
from modules.quaternion_matrix import quaternion_matrix
class BinghamFixedDispersionLoss(object):
"""
Class for calculating bingham loss assuming a fixed Z.
Parameters:
bd_z (list): Values of parameter matrix Z of size 3 (the bingham is four
dimensional but the last parameter is assumed to be 0). All must be
negative and in ascending order.
orthogonalization (str): Orthogonalization method to use. Can be
"gram_schmidt" for usage of the classical gram-schmidt method.
"modified_gram_schmidt" for a more robust variant, or
"quaternion_matrix" for usage of a orthogonal matrix representation
of an output quaternion.
"""
def __init__(self, bd_z, orthogonalization="gram_schmidt"):
self.name = "bingham_fixed_z"
self.bd_z = bd_z
self.orthogonalization = orthogonalization
def __call__(self, target, output):
"""
Calculates the bingham fixed dispersion log likelihood loss
on a batch of target-output values.
Inputs:
target: Target values at which the likelihood is evaluated
of shape (N, 4)
output: Output values from which M is computed, shape
(N, 16) if orthogonalization is "gram_schmidt" and (N, 4) if it
is "quaternion_matrix".
Result:
loss: The loss of the current batch.
log_likelihood: Average log likelihood.
"""
if type(self.bd_z) != torch.Tensor:
bd_z = torch.tensor([
[self.bd_z[0], 0, 0, 0],
[0, self.bd_z[1], 0, 0],
[0, 0, self.bd_z[2], 0],
[0, 0, 0, 0]
], device=output.device, dtype=output.dtype)
log_likelihood = 0.0
bd_m = self._output_to_m(output)
for i in range(output.shape[0]):
log_likelihood \
+= self._bingham_loss_fixed_dispersion_single_sample(
target[i], bd_m[i], bd_z)
loss = -log_likelihood
return loss, log_likelihood / output.shape[0]
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): Network output.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
stats = {
"maad": float(maad_quaternion(
target, output, self.orthogonalization))
}
return stats
@staticmethod
def _bingham_loss_fixed_dispersion_single_sample(target, bd_m, bd_z):
"""
Calculates the bingham likelihood loss on
a single sample.
Parameters:
target: Target value at which the likelihood is
evaluated
bd_m: Bingham distribution location and axes parameter of shape
(1, 4, 4)
bd_z: Z parameter matrix of shape (1, 4, 4)
"""
target = target.reshape(1, 4)
loss = torch.mm(torch.mm(torch.mm(torch.mm(
target, bd_m), bd_z), torch.t(bd_m)), torch.t(target))
return loss
def _output_to_m(self, output):
""" Creates orthogonal matrix from output.
Parameters:
output (torch.Tensor): Output values from which M is extracted,
shape (batch_size, 16) for gram-schmidt orthogonalization
and (batch_size, 4) for quaternion_matrix orthogonalization.
"""
batch_size = output.shape[0]
if self.orthogonalization == "gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output)
elif self.orthogonalization == "modified_gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output, modified=True)
elif self.orthogonalization == "quaternion_matrix":
#bd_m = quaternion_matrix(output)
raise NotImplementedError
else:
raise ValueError("Invalid orthogonalization type.")
return bd_m
| 4,440 | 36.008333 | 80 | py |
deep_bingham | deep_bingham-master/modules/network.py | import torch.nn as nn
from torchvision import models
def get_model(name, pretrained, num_channels, num_classes):
"""
Method that returns a torchvision model given a model
name, pretrained (or not), number of channels,
and number of outputs
Inputs:
name - string corresponding to model name
pretrained- Boolean for whether a pretrained
model is requested
num_channels- int number of channels
num_classes- number of outputs of the network
"""
function = getattr(models, name)
model = function(pretrained=pretrained)
if "resnet" in name:
if num_channels == 1:
model = ResNet18Grayscale(models.resnet.BasicBlock,
[2, 2, 2, 2],
num_classes)
else:
model.fc = nn.Linear(512, num_classes)
else:
model = nn.Sequential(*(list(model.children())[:-1]))
model.classifier.add_module('6', nn.Linear(
list(model.classifier.children()))[-3].in_features, num_classes)
return model
class ResNet18Grayscale(models.resnet.ResNet):
"""
A class that inherits the torchvision model
Resnet and makes it compatible with grayscale
images.
"""
def __init__(self, block, layers, num_classes):
super(ResNet18Grayscale, self).__init__(block, layers, num_classes)
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.fc = nn.Linear(512, num_classes)
| 1,587 | 32.083333 | 80 | py |
deep_bingham | deep_bingham-master/modules/von_mises.py | """Implementation of von Mises loss function
Code based on:
https://github.com/sergeyprokudin/deep_direct_stat/blob/master/utils/losses.py
"""
import numpy as np
import torch
import math
import sys
from scipy.interpolate import Rbf
import utils
from utils import generate_coordinates
from modules.maad import maad_biternion
from modules.vm_operations import *
class VonMisesLoss(object):
"""
Computes the von Mises log likelihood loss on a batch of target-output
values.
"""
def __init__(self):
self._bessel_taylor_coefs = torch.tensor(
[1.00000000e+00, 2.50000000e-01, 1.56250000e-02,
4.34027778e-04, 6.78168403e-06])
def __call__(self, target, output):
"""
Calculates the von mises likelihood loss on a batch of target-output
values.
parameters:
target: target values at which the likelihood is evaluated
of shape (n, 1, 6)
output: output values from which kappa and biterion representation
of the angles are extracted, shape (n, 1, 9)
returns:
neg_log_likelihood_loss: the negative sum of the log-likelihood of
each sample.
log_likelihood: the average log likelihood.
"""
log_likelihood = 0
data_type = output.type()
for i in range(output.shape[0]):
angles, kappas = output_to_angles_and_kappas(output[i])
x = math.radians(target[i][0])
y = math.radians(target[i][1])
z = math.radians(target[i][2])
pan_target = torch.tensor([math.cos(x), math.sin(x)])
tilt_target = torch.tensor([math.cos(y), math.sin(y)])
roll_target = torch.tensor([math.cos(z), math.sin(z)])
if output.is_cuda:
device = output.get_device()
pan_target = pan_target.to(device)
tilt_target = tilt_target.to(device)
roll_target = roll_target.to(device)
log1 = self._von_mises_log_likelihood_single_angle(
pan_target, angles[:2], kappas[0:1])
log2 = self._von_mises_log_likelihood_single_angle(
tilt_target, angles[2:4], kappas[1:2])
log3 = self._von_mises_log_likelihood_single_angle(
roll_target, angles[4:], kappas[2:])
log_likelihood += log1 + log2 + log3
loss = -log_likelihood
return loss, log_likelihood / output.shape[0]
def _von_mises_log_likelihood_single_angle(self, y_true, mu_pred,
kappa_pred):
r"""
Compute log-likelihood given data samples and predicted von-mises model
parameters
Parameters:
y_true: true values of an angle in biternion (cos, sin)
representation.
mu_pred: predicted mean values of an angle in biternion (cos, sin)
representation.
kappa_pred: predicted kappa (inverse variance) values of an angle
Returns:
log_likelihood: the von Mises log likelihood.
"""
cosine_dist = torch.sum(torch.mul(y_true, mu_pred)).reshape([-1, 1])
if kappa_pred.is_cuda:
device = kappa_pred.get_device()
cosine_dist = cosine_dist.to(device)
norm_const = self._log_bessel_approx_dds(kappa_pred) \
+ torch.log(torch.tensor(2. * 3.14159))
log_likelihood = (kappa_pred * cosine_dist) - norm_const
return log_likelihood.reshape([-1, 1])
def _log_bessel_approx_dds(self, kappa):
kappa.reshape([-1, 1])
def _log_bessel_approx_taylor(cls, x):
num_coef = cls._bessel_taylor_coefs.shape[0]
arg = torch.arange(0, num_coef, 1) * 2
deg = arg.reshape([1, -1])
n_rows = x.shape[0]
x_tiled = x.repeat([1, num_coef])
deg_tiled = deg.repeat([n_rows, 1]).float()
coef_tiled = cls._bessel_taylor_coefs[0:num_coef].reshape(
1, num_coef).repeat([n_rows, 1])
if x.is_cuda:
device = x.get_device()
x_tiled = x_tiled.to(device)
deg_tiled = deg_tiled.to(device)
coef_tiled = coef_tiled.to(device)
val = torch.log(
torch.sum(torch.pow(x_tiled, deg_tiled) * coef_tiled, 1))
return val.reshape([-1, 1])
def _log_bessel_approx_large(x):
return x - 0.5 * torch.log(2 * np.pi * x)
if kappa[0] > 5:
return _log_bessel_approx_large(kappa)
else:
return _log_bessel_approx_taylor(self, kappa)
def statistics(self, target, output, epoch=None):
param_kappas = output_to_kappas(output)
stats = {"maad" : float(maad_biternion(target, output)),
"kappa_0": float(param_kappas[:, 0].mean()),
"kappa_1": float(param_kappas[:, 1].mean()),
"kappa_2": float(param_kappas[:, 2].mean())}
return stats
| 5,116 | 33.574324 | 79 | py |
deep_bingham | deep_bingham-master/modules/gram_schmidt.py | import torch
def gram_schmidt(input_mat, reverse=False, modified=False):
""" Carries out the Gram-Schmidt orthogonalization of a matrix.
Arguments:
input_mat (torch.Tensor): A quadratic matrix that will be turned into an
orthogonal matrix.
reverse (bool): Starts gram Schmidt method beginning from the last
column if set to True.
modified (bool): Uses modified Gram-Schmidt as described.
"""
mat_size = input_mat.shape[0]
Q = torch.zeros(mat_size, mat_size,
device=input_mat.device, dtype=input_mat.dtype)
if modified:
if reverse:
outer_iterator = range(mat_size - 1, -1, -1)
def inner_iterator(k): return range(k, -1, -1)
else:
outer_iterator = range(mat_size)
def inner_iterator(k): return range(k+1, mat_size)
# This implementation mostly follows the description from
# https://www.math.uci.edu/~ttrogdon/105A/html/Lecture23.html
# The more complex form is due to pytorch not allowing for inplace
# operations of variables needed for gradient computation.
v = input_mat
for j in outer_iterator:
Q[:, j] = v[:, j] / torch.norm(v[:, j])
v_old = v
v = torch.zeros(mat_size, mat_size,
device=input_mat.device, dtype=input_mat.dtype)
for i in inner_iterator(j):
v[:, i] = v_old[:, i] \
- (torch.dot(Q[:, j].clone(), v_old[:, i])
* Q[:, j].clone())
elif not modified:
if reverse:
outer_iterator = range(mat_size - 1, -1, -1)
def inner_iterator(k): return range(mat_size - 1, k, -1)
else:
outer_iterator = range(mat_size)
def inner_iterator(k): return range(k)
for j in outer_iterator:
v = input_mat[:, j]
for i in inner_iterator(j):
p = torch.dot(Q[:, i].clone(), v) * Q[:, i].clone()
v = v - p
Q[:, j] = v / torch.norm(v)
return Q
def gram_schmidt_batched(input_mat, reverse=False, modified=False):
""" Carries out the Gram-Schmidt orthogonalization of a matrix on an
entire batch.
Arguments:
input_mat (torch.Tensor): A tensor containing quadratic matrices each of
which will be orthogonalized of shape (batch_size, m, m).
reverse (bool): Starts gram Schmidt method beginning from the last
column if set to True.
modified (bool): Uses modified Gram-Schmidt as described.
Returns:
Q (torch.Tensor): A batch of orthogonal matrices of same shape as
input_mat.
"""
batch_size = input_mat.shape[0]
mat_size = input_mat.shape[1]
Q = torch.zeros(batch_size, mat_size, mat_size,
device=input_mat.device, dtype=input_mat.dtype)
if modified:
#TODO implement batched version
for i in range(input_mat.shape[0]):
q = gram_schmidt(input_mat[i], reverse, modified)
Q[i] = q
elif not modified:
if reverse:
raise NotImplementedError
else:
outer_iterator = range(mat_size)
def inner_iterator(k): return range(k)
for j in outer_iterator:
v = input_mat[:, :, j].view(batch_size, mat_size, 1)
for i in inner_iterator(j):
q_squeezed = Q[:, :, i].view(batch_size, 1, mat_size).clone()
dot_products = torch.bmm(q_squeezed, v)
p = dot_products.repeat((1, mat_size, 1)) \
* Q[:, :, i].unsqueeze(2).clone()
v = v - p
Q[:, :, j] = v.squeeze() / torch.norm(v, dim=1).repeat(1, mat_size)
return Q
| 3,837 | 35.207547 | 80 | py |
deep_bingham | deep_bingham-master/modules/mse.py | import torch
import torch.nn as nn
from modules.maad import maad_mse
class MSELoss(object):
"""
Class for the MSE loss function
"""
def __init__(self):
self.loss = nn.MSELoss(reduction='sum')
def __call__(self, target, output):
"""
Calculates the MSE loss on a batch of target-output values.
Target value is the true unit quaternion pose. Output is the predicted
quaternion after normalization.
Arguments:
target (torch.Tensor): Target values at which the loss is evaluated of shape (N, 4)
output (torch.Tensor): Output values of shape (N, 4)
Returns:
loss: The loss of the current batch
log_likelihood: 0. This loss function does not calculate a log likelihood so 0
is returned.
"""
return self.loss(target, output), torch.Tensor([0])
def statistics(self, target, output, cur_epoch=None):
""" Reports loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): Network output.
cur_epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): angular deviation.
"""
return {"maad": maad_mse(target, output.detach())}
| 1,325 | 32.15 | 95 | py |
deep_bingham | deep_bingham-master/modules/bingham_loss.py | """Implementation of the Bingham loss function"""
from __future__ import print_function
import dill
import os
import bingham_distribution as ms
import numpy as np
import torch
from scipy.interpolate import Rbf
import utils
from modules.maad import maad_bingham
from modules.gram_schmidt import gram_schmidt, gram_schmidt_batched
from modules.quaternion_matrix import quaternion_matrix
from utils import generate_coordinates, vec_to_bingham_z_many
def batched_logprob(target, mu, sigma):
""" Mean of log probability of targets given mu and sigmas of a Gaussian
distribution """
target = target.reshape(mu.shape)
dist = torch.distributions.normal.Normal(mu, sigma)
return torch.mean(dist.log_prob(target))
def batched_norm(target, output):
""" Mean of norm error between target and output matrices """
target = target.reshape(output.shape)
diff = target - output
loss = torch.mean(torch.norm(diff, dim=-1))
return loss
class BinghamLoss(object):
"""
Calculates the bingham log likelihood loss on a batch of target-output
values.
Arguments:
lookup_table_file (str): Path to the location of the lookup table.
interpolation_kernel (str): The kernel to use for rbf interpolaition.
Can be "multiquadric" (default) or "gaussian".
orthogonalization (str): Orthogonalization method to use. Can be
"gram_schmidt" for usage of the classical gram-schmidt method.
"modified_gram_schmidt" for a more robust variant, or
"quaternion_matrix" for usage of a orthogonal matrix representation
of an output quaternion.
Inputs:
target (torch.Tensor): Target values at which the likelihood is
evaluated of shape (N, 4)
output (torch.Tensor): Output values from which M and Z are extracted of
shape (N, 19) if orthogonalization is "gram_schmidt" and shape (N,7)
if it is "quaternion_matrix"
Result:
loss: The loss of the current batch.
log_likelihood: Average log likelihood.
"""
def __init__(self, lookup_table_file,
interpolation_kernel="multiquadric",
orthogonalization="gram_schmidt"):
self.orthogonalization = orthogonalization
_, _, nc_lookup_table, coords \
= utils.load_lookup_table(lookup_table_file)
print("Bingham Interpolation Kernel: " + interpolation_kernel)
self.interp_options = {
"interp_data": torch.from_numpy(nc_lookup_table),
"interp_coords": torch.from_numpy(coords)
}
rbf_file = os.path.splitext(lookup_table_file)[0] + ".rbf"
if os.path.exists(rbf_file):
with open(rbf_file, 'rb') as file:
self.rbf = dill.load(file)
else:
x, y, z = generate_coordinates(
self.interp_options["interp_coords"])
# Limit found empirically.
assert len(x) < 71000, "Lookup table too large."
print("Creating the interpolator... (this usually takes a while)")
self.rbf = Rbf(x, y, z, torch.log(
self.interp_options["interp_data"]
).numpy().ravel().squeeze())
with open(rbf_file, 'wb') as file:
dill.dump(self.rbf, file)
def __call__(self, target, output):
if target.is_cuda:
device = target.get_device()
M, Z = self._output_to_m_z(output)
log_likelihood = torch.sum(
self._log_bingham_loss(
target, M, Z.squeeze(0),
self.rbf))
loss = -log_likelihood
return loss, log_likelihood / target.shape[0]
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): Network output.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
bd_z = torch.mean(vec_to_bingham_z_many(output[:, :3]).squeeze(0), 0)
cur_maad = maad_bingham(target, output[:, 3:], self.orthogonalization)
stats = {
"z_0": float(bd_z[0]),
"z_1": float(bd_z[1]),
"z_2": float(bd_z[2]),
"maad": float(cur_maad)
}
return stats
@staticmethod
def _log_bingham_loss(target, M, Z, rbf=None):
r"""Log Bingham likelihood loss.
The Bingham distribution is parametrized as
f(x) = N(Z) * exp(x^T MZM^Tx)
with x being defined on the hypershere, i.e. ||x||=1.
Note: This has been developed using CPU-only storage of Tensors and may
require adaptation when used with GPU.
Parameters:
target: Target values at which the likelihood is evaluated of shape
(N, 4).
M: Bingham distribution location and axes parameter of shape
(N,4,4). M is expected to be an orthonormal matrix.
Z: Tensor representing the Z parameter matrix of shape (N, 3).
The parameters are expected to be negative and given in an
ascending order.
rbf: RBF object
Returns:
log likelihood: log value of the pdf for each of the target samples.
"""
assert target.dim() == 2 and target.shape[1] == 4, \
"Wrong dimensionality of target tensor."
assert M.dim() == 3 and M.shape[1:3] == (4, 4), \
"Wrong dimensionality of location parameter matrix M."
assert Z.dim() == 2 and Z.shape[1] == 3, \
"Wrong dimensionality of location parameter matrix Z."
assert Z.shape[0] == M.shape[0] and Z.shape[0] == target.shape[0], \
"Number of samples does not agree with number of parameters."
if target.is_cuda:
device = target.get_device()
else:
device = "cpu"
# Adds missing 0 to vectors Z and turns them into diagonal matrices.
z_padded = torch.cat(
(Z, torch.zeros((Z.shape[0], 1), device=device, dtype=M.dtype)),
dim=1)
z_as_matrices = torch.diag_embed(z_padded)
norm_const = BinghamInterpolationRBF.apply(Z, rbf)
likelihoods = (torch.bmm(torch.bmm(torch.bmm(torch.bmm(
target.unsqueeze(1),
M),
z_as_matrices),
M.transpose(1, 2)),
target.unsqueeze(2))
).squeeze() - norm_const
return likelihoods
def _output_to_m_z(self, output):
""" Creates orthogonal matrix from output.
This method does not support vectorization yet.
Parameters:
output (torch.Tensor): Output values from which M is extracted,
shape (19,) for gram-schmidt orthogonalization and (7,) for
quaternion_matrix orthogonalization.
"""
bd_z = utils.vec_to_bingham_z_many(output[:, :3])
bd_m = vec_to_bingham_m(output[:, 3:], self.orthogonalization)
return bd_m, bd_z
class BinghamInterpolationRBF(torch.autograd.Function):
r"""Computes the Bingham interpolation constant and its derivatives.
Input:
Z: Tensor representing the Z parameters of shape (N, 3).
Returns:
norm_const: Von Mises normalization constants evaluated for each set of kappas
in matrix.
"""
@staticmethod
def forward(ctx, Z, rbfi):
norm_const = np.zeros(Z.shape[0])
ctx.save_for_backward(Z)
ctx.constant = rbfi
v = Z.detach().cpu().numpy()
for idx in range(Z.shape[0]):
norm_const[idx] = rbfi(v[idx][0], v[idx][1], v[idx][2])
tensor_type = Z.type()
if Z.is_cuda:
device = Z.get_device()
result = torch.tensor(norm_const, device=device).type(tensor_type)
else:
result = torch.tensor(norm_const).type(tensor_type)
return result
@staticmethod
def _compute_derivatives(rbfi, Z):
"""
A function that computes the gradient of the kappas via finite differences.
Parameters:
rbfi: an RBF interpolation object
kappas: a list of three kappas
Returns:
a torch tensor gradient for the kappas
"""
delta = 0.0001
x = rbfi(Z[0], Z[1], Z[2])
finite_diff_x = (rbfi(Z[0] + delta, Z[1], Z[2]) - x) / delta
finite_diff_y = (rbfi(Z[0], Z[1] + delta, Z[2]) - x) / delta
finite_diff_z = (rbfi(Z[0], Z[1], Z[2] + delta) - x) / delta
return torch.tensor([finite_diff_x, finite_diff_y, finite_diff_z])
@staticmethod
def backward(ctx, grad_output):
if not ctx.needs_input_grad[0]:
return None
Z = ctx.saved_tensors[0]
rbfi = ctx.constant
grad_Z = torch.zeros(Z.shape[0], 3)
v = Z.detach().cpu().numpy()
for idx in range(grad_output.shape[0]):
grad_Z[idx] = \
grad_output[idx] \
* BinghamInterpolationRBF._compute_derivatives(rbfi, v[idx])
tensor_type = grad_output.type()
if grad_output.is_cuda:
device = grad_output.get_device()
result = torch.tensor(grad_Z, device=device).type(tensor_type)
else:
result = torch.tensor(grad_Z).type(tensor_type)
return result, None
def vec_to_bingham_m(output, orthogonalization):
""" Creates orthogonal matrix from output.
This operates on an entire batch.
Parameters:
output (torch.Tensor): Output values from which M is extracted,
shape (batch_size, 16) for gram-schmidt orthogonalization
and (batch_size, 4) for quaternion_matrix orthogonalization.
orthogonalization (str): orthogonalization (str): Orthogonalization
method to use. Can be "gram_schmidt" for usage of the classical
gram-schmidt method. "modified_gram_schmidt" for a more robust
variant, or "quaternion_matrix" for usage of a orthogonal matrix
representation of an output quaternion. The latter is not supported
yet.
"""
batch_size = output.shape[0]
if orthogonalization == "gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output)
elif orthogonalization == "modified_gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output, modified=True)
elif orthogonalization == "quaternion_matrix":
#TODO batched version
bd_m = torch.zeros(output.shape[0], 4, 4).to(device=output.device, dtype=output.dtype)
for i in range(output.shape[0]):
bd_m[i] = quaternion_matrix(output)
else:
raise ValueError("Invalid orthogonalization type.")
return bd_m
| 11,124 | 34.205696 | 94 | py |
deep_bingham | deep_bingham-master/modules/cosine.py | from modules.maad import output_to_angles, maad_cosine
from utils import radians
import torch
class CosineLoss():
"""
Class for calculating Cosine Loss assuming biternion representation of pose.
"""
def __init__(self):
self.stats = 0
def __call__(self, target, output):
"""
Calculates the cosine loss on a batch of target-output values.
Arguments:
target: Target values at which loss is evaluated of shape (N, 3)
output: Output values, shape (N, 6) from predicted from network
prior to normalization of each sin/cos pair.
Result:
loss: The loss of the current batch
log_likelihood: 0. This loss function does not calculate log likelihood so
so 0 is returned.
"""
loss = 0
for i in range(output.shape[0]):
loss += self._cosine_single_sample(target[i], output[i])
return loss, torch.Tensor([0])
def statistics(self, target, output, cur_epoch):
stats = {"maad": float(maad_cosine(target, output))}
self.stats = stats
return stats
def _cosine_single_sample(self, target, output):
"""
Calculates cosine loss for a single sample.
Arguments:
target: Target value at which loss is evaluated of shape (1, 3)
output: Output value, shape (1, 6) from predicted from network
prior to normalization of each sin/cos pair.
Returns:
loss: The loss of a single sample.
"""
radian_target = radians(target)
radian_target_cos = torch.cos(radian_target)
radian_target_sin = torch.sin(radian_target)
target_biternion = []
for i in range(3):
target_biternion.append(radian_target_cos[i])
target_biternion.append(radian_target_sin[i])
target = torch.tensor(target_biternion)
if output.is_cuda:
device = output.get_device()
target = target.to(device)
angles = output_to_angles(output)
return 3 - torch.dot(angles, target)
| 2,160 | 33.301587 | 86 | py |
deep_bingham | deep_bingham-master/modules/__init__.py | from .maad import maad_biternion, maad_bingham, maad_mse
from .bingham_loss import BinghamLoss
from .bingham_mixture_loss import BinghamMixtureLoss
from .mse import MSELoss
from .von_mises import VonMisesLoss
from .cosine import CosineLoss
| 241 | 33.571429 | 57 | py |
deep_bingham | deep_bingham-master/modules/quaternion_matrix.py | import torch
def quaternion_matrix(quat):
""" Computes an orthogonal matrix from a quaternion.
We use the representation from the NeurIPS 2018 paper "Bayesian Pose
Graph Optimization via Bingham Distributions and Tempred Geodesic MCMC" by
Birdal et al. There, the presentation is given above eq. (6). In practice
any similar scheme will do.
Parameters:
quat (torch.tensor): Tensor of shape 4 representing a quaternion.
"""
# This cumbersome way is necessary because copy constructors seem not to
# preserve gradients.
indices = torch.tensor([
[0, 1, 2, 3],
[1, 0, 3, 2],
[2, 3, 0, 1],
[3, 2, 1, 0]
], device=quat.device)
sign_mask = torch.tensor([
[1, -1, -1, 1],
[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1]
], device=quat.device, dtype=quat.dtype)
quat_normalized = quat / torch.norm(quat)
quat_mat = torch.take(quat_normalized, indices)
quat_mat = sign_mask * quat_mat
return quat_mat
| 1,042 | 27.189189 | 78 | py |
deep_bingham | deep_bingham-master/training/__init__.py | from .trainer import Trainer | 28 | 28 | 28 | py |
deep_bingham | deep_bingham-master/training/trainer.py | import time
import torch
from modules import maad
from utils import AverageMeter
class Trainer(object):
""" Trainer for Bingham Orientation Uncertainty estimation.
Arguments:
device (torch.device): The device on which the training will happen.
"""
def __init__(self, device, floating_point_type="float"):
self._device = device
self._floating_point_type = floating_point_type
@staticmethod
def adjust_learning_rate(optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] / 2
def train_epoch(self, train_loader, model, loss_function,
optimizer, epoch, writer_train, writer_val, val_loader):
"""
Method that trains the model for one epoch on the training set and
reports losses to Tensorboard using the writer_train
train_loader: A DataLoader that contains the shuffled training set
model: The model we are training
loss_function: Loss function object
optimizer: The optimizer we are using
Epoch: integer epoch number
writer_train: A Tensorboard summary writer for reporting the average
loss while training.
writer_val: A Tensorboard summary writer for reporting the average
loss during validation.
val_loader: A DataLoader that contains the shuffled validation set
"""
losses = AverageMeter()
model.train()
if self._floating_point_type == "double":
model = model.double()
if hasattr(model, 'is_sequential'):
is_sequential = True
else:
is_sequential = False
timings_start = time.time()
for i, data in enumerate(train_loader):
if i % 20 == 0:
if i > 0 and i % 100 == 0:
print("Elapsed time: {}".format(
str(time.time()-timings_start)))
timings_start = time.time()
if is_sequential:
model.reset_state(batch=data['image'].shape[0],
device=self._device)
self.validate(self._device, val_loader, model,
loss_function, writer_val, i, epoch,
len(train_loader), 0.1)
# switch to train mode
model.train()
if self._floating_point_type == "double":
target_var = data["pose"].double().to(self._device)
input_var = data["image"].double().to(self._device)
else:
target_var = data["pose"].float().to(self._device)
input_var = data["image"].float().to(self._device)
if torch.sum(torch.isnan(target_var)) > 0:
continue
# compute output
if is_sequential:
model.reset_state(batch=data['image'].shape[0],
device=self._device)
model.to(self._device)
output = model(input_var)
if loss_function.__class__.__name__ == "MSELoss":
# norm over the last dimension (i.e. orientations)
norms \
= torch.norm(output, dim=-1, keepdim=True).to(self._device)
output = output / norms
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
loss, log_likelihood = loss_function(target_var, output, epoch)
else:
loss, log_likelihood = loss_function(target_var, output)
# compute gradient and do optimization step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self._floating_point_type == "double":
loss = loss.double() / data["image"].shape[0]
else:
loss = loss.float() / data["image"].shape[0]
losses.update(loss.item(), data["image"].size(0))
if i + len(train_loader) * epoch % 1000 == 0:
Trainer.adjust_learning_rate(optimizer)
writer_train.add_scalar('data/loss', loss,
i + len(train_loader) * epoch)
writer_train.add_scalar('data/log_likelihood', log_likelihood,
i + len(train_loader) * epoch)
cur_iter = epoch * len(train_loader) + i
stats = loss_function.statistics(target_var, output, epoch)
Trainer.report_stats(writer_train, stats, cur_iter)
print("Epoch: [{0}][{1}/{2}]\t Loss {loss.last_val:.4f} "
"({loss.avg:.4f})\t".format(
epoch, i, len(train_loader), loss=losses))
def validate(self, device, val_loader, model, loss_function, writer,
index=None, cur_epoch=None, epoch_length=None, eval_fraction=1):
"""
Method that validates the model on the validation set and reports losses
to Tensorboard using the writer
device: A string that states whether we are using GPU ("cuda:0") or cpu
model: The model we are training
loss_function: Loss function object
optimizer: The optimizer we are using
writer: A Tensorboard summary writer for reporting the average loss
during validation.
cur_epoch: integer epoch number representing the training epoch we are
currently on.
index: Refers to the batch number we are on within the training set
epoch_length: The number of batches in an epoch
val_loader: A DataLoader that contains the shuffled validation set
loss_parameters: Parameters passed on to the loss generation class.
"""
# switch to evaluate mode
model.eval()
losses = AverageMeter()
log_likelihoods = AverageMeter()
maads = AverageMeter()
averaged_stats = AverageMeter()
val_load_iter = iter(val_loader)
for i in range(int(len(val_loader) * eval_fraction)):
data = val_load_iter.next()
if self._floating_point_type == "double":
target_var = data["pose"].double().to(device)
input_var = data["image"].double().to(device)
else:
target_var = data["pose"].float().to(device)
input_var = data["image"].float().to(device)
if torch.sum(torch.isnan(target_var)) > 0:
continue
# compute output
output = model(input_var)
if loss_function.__class__.__name__ == "MSELoss":
# norm over the last dimension (ie. orientations)
norms = torch.norm(output, dim=-1, keepdim=True).to(device)
output = output / norms
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
loss, log_likelihood = loss_function(target_var, output, cur_epoch)
else:
loss, log_likelihood = loss_function(target_var, output)
if self._floating_point_type == "double":
loss = loss.double() / data["image"].shape[0]
else:
loss = loss.float() / data["image"].shape[0]
# measure accuracy and record loss
losses.update(loss.item(), data["image"].size(0))
log_likelihoods.update(log_likelihood.item(), data["image"].size(0))
# TODO: Unify reporting to the style below.
stats = loss_function.statistics(target_var, output, cur_epoch)
averaged_stats.update(stats, data["image"].size(0))
if index is not None:
cur_iter = cur_epoch * epoch_length + index
writer.add_scalar('data/loss', losses.avg, cur_iter)
writer.add_scalar('data/log_likelihood', log_likelihoods.avg,
cur_iter)
Trainer.report_stats(writer, averaged_stats.avg, cur_iter)
print('Test:[{0}][{1}/{2}]\tLoss {loss.last_val:.4f} '
'({loss.avg:.4f})\t'.format(
cur_epoch, index, epoch_length, loss=losses))
@staticmethod
def report_stats(writer, stats, cur_iter):
for key in stats:
writer.add_scalar(
'data/' + key, stats[key], cur_iter)
| 8,449 | 39.430622 | 83 | py |
deep_bingham | deep_bingham-master/utils/visualization.py | import manstats as ms
import numpy as np
import quaternion
import matplotlib.pylab as plab
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
def plot_pose_bingham(bd_param_m, bd_param_z):
"""
Plots an uncertain orientation given as a 4d bingham.
Arguments:
bd_param_m, bd_param_z: The parameters of the Bingham distribution.
"""
# PART 0: Configuration
# Number of samples per sampled angular axis.
num_samples_axis = 100
# Number of sampled orientations.
num_orientation_samples = 200
# Bandwidth for the von Mises kernels used.
bandwidth = 50.
# PART 1: Sample poses.
bd = ms.BinghamDistribution(bd_param_m, bd_param_z)
orientations = bd.random_samples(num_orientation_samples)
mean_idx = 0
means = np.zeros([num_orientation_samples * 3, 3])
orientations = quaternion.from_float_array(orientations)
for orientation in orientations:
r_mat = quaternion.as_rotation_matrix(orientation)
means[mean_idx, :] = r_mat[:, 0]
means[mean_idx+1, :] = r_mat[:, 1]
means[mean_idx+2, :] = r_mat[:, 2]
mean_idx += 3
# PART 2: Generate values on the heatmap.
# Create a meshgrid sampling over the sphere.
phi = np.linspace(0, np.pi, num_samples_axis)
theta = np.linspace(0, 2 * np.pi, num_samples_axis)
phi, theta = np.meshgrid(phi, theta)
# The Cartesian coordinates of the unit sphere
x = np.reshape(np.sin(phi) * np.cos(theta), num_samples_axis**2)
y = np.reshape(np.sin(phi) * np.sin(theta), num_samples_axis**2)
z = np.reshape(np.cos(phi), num_samples_axis**2)
sphere_points = np.stack([x, y, z]).transpose()
intensities = _vmf_kernel(sphere_points, means, bandwidth)
intensities = np.reshape(intensities, [num_samples_axis, num_samples_axis])
x = np.reshape(x, [num_samples_axis, num_samples_axis])
y = np.reshape(y, [num_samples_axis, num_samples_axis])
z = np.reshape(z, [num_samples_axis, num_samples_axis])
fmax, fmin = intensities.max(), intensities.min()
intensities = (intensities - fmin) / (fmax - fmin)
# PART 3: Plot the heatmap.
fig = plt.figure(figsize=plt.figaspect(1.))
base_coordinates = quaternion.as_rotation_matrix(
quaternion.from_float_array(bd.mode)).transpose()
ax = fig.add_subplot(1, 1, 1, projection='3d')
_plot_coordinate_axes(base_coordinates)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
# Choose colormap and make it transparent.
cmap = plab.cm.viridis
my_cmap = cmap(np.arange(cmap.N))
my_cmap[:, -1] = np.linspace(0, 1, cmap.N)
my_cmap = ListedColormap(my_cmap)
ax.plot_surface(x, y, z, rstride=1, cstride=1,
facecolors=my_cmap(intensities))
# Turn off the axis planes
ax.view_init()
ax.azim = 0
ax.elev = 0
ax.set_axis_off()
plt.show()
def _plot_coordinate_axes(coordinates):
zeros = np.zeros(3)
x, y, z = zip(zeros, coordinates[0])
plt.plot(x, y, z, '-k', linewidth=3)
x, y, z = zip(zeros, coordinates[1])
plt.plot(x, y, z, '-k', linewidth=3)
x, y, z = zip(zeros, coordinates[2])
plt.plot(x, y, z, '-k', linewidth=3)
def _vmf_kernel(points, means, bandwidth):
# Evaluates a von Mises-Fisher mixture type kernel on given inputs.
num_points = points.shape[0]
result = np.zeros(num_points)
for cur_mean in means:
# Use of np.einsum for optimizing dot product computation
# performance. Based on approach presented in:
# https://stackoverflow.com/a/15622926/812127
result += \
np.exp(bandwidth * np.einsum(
'ij,ij->i',
np.repeat(np.expand_dims(cur_mean, 0), num_points, axis=0),
points))
return result | 3,863 | 31.745763 | 79 | py |
deep_bingham | deep_bingham-master/utils/utils.py | """ Utilities for learning pipeline."""
from __future__ import print_function
import copy
import dill
import hashlib
import itertools
import bingham_distribution as ms
import math
import numpy as np
import os
import scipy
import scipy.integrate as integrate
import scipy.special
import sys
import torch
from pathos.multiprocessing import ProcessingPool as Pool
def convert_euler_to_quaternion(roll, yaw, pitch):
"""Converts roll, yaw, pitch to a quaternion.
"""
# roll (z), yaw (y), pitch (x)
cy = math.cos(math.radians(roll) * 0.5)
sy = math.sin(math.radians(roll) * 0.5)
cp = math.cos(math.radians(yaw) * 0.5)
sp = math.sin(math.radians(yaw) * 0.5)
cr = math.cos(math.radians(pitch) * 0.5)
sr = math.sin(math.radians(pitch) * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def radians(degree_tensor):
"""
Method to convert a torch tensor of angles in degree format to radians.
Arguments:
degree_tensor (torch.Tensor): Tensor consisting of angles in degree format.
Returns:
radian_tensor (torch.Tensor): Tensor consisting of angles in radian format.
"""
radian_tensor = degree_tensor/180 * math.pi
return radian_tensor
def generate_coordinates(coords):
"""
A function that returns all possible triples of coords
Parameters:
coords: a numpy array of coordinates
Returns:
x: the first coordinate of possible triples
y: the second coordinate of possible triples
z the third coordinate of possible triples
"""
x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()
y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))
z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))
return x, y, z
def ensure_dir_exists(path):
""" Checks if a directory exists and creates it otherwise. """
if not os.path.exists(path):
os.makedirs(path)
def load_lookup_table(path):
"""
Loads lookup table from dill serialized file.
Returns a table specific tuple. For the Bingham case, the tuple containins:
table_type (str):
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
coords (numpy.ndarray): Coordinates at which lookup table was evaluated.
For the von Mises case, it contains:
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
"""
assert os.path.exists(path), "Lookup table file not found."
with open(path, "rb") as dillfile:
return dill.load(dillfile)
def eaad_von_mises(kappas, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
kappas: Von Mises kappa parameters for roll, pitch, yaw.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2.0 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-2, "epsabs": 1e-2}
param_mu = np.array([0., 0., 0.]) # radians
quat_mu = convert_euler_to_quaternion(
math.degrees(param_mu[0]), math.degrees(param_mu[1]),
math.degrees(param_mu[2])
)
param_kappa = kappas
direct_norm_const = 8.0 * (np.pi ** 3) \
* scipy.special.iv(0, param_kappa[0]) \
* scipy.special.iv(0, param_kappa[1]) \
* scipy.special.iv(0, param_kappa[2])
def integrand_aad(phi1, phi2, phi3):
return np.exp(param_kappa[0] * np.cos(phi1)) \
* np.exp(param_kappa[1] * np.cos(phi2)) \
* np.exp(param_kappa[2] * np.cos(phi3)) \
* aad(quat_mu,
convert_euler_to_quaternion(
math.degrees(phi1), math.degrees(phi2),
math.degrees(phi3)
))
eaad_int = integrate.tplquad(
integrand_aad,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: 2. * np.pi, # phi2
lambda x, y: 0.0, lambda x, y: 2. * np.pi, # phi1
**integral_options
)
return eaad_int[0]/direct_norm_const
def eaad_bingham(bingham_z, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
bingham_z: Bingham dispersion parameter in the format expected by the
manstats BinghamDistribution class.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
# acos_val = np.arccos(np.dot(quat_a, quat_b))
# diff_ang = 2 * np.min([acos_val, np.pi - acos_val])
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-4, "epsabs": 1e-4}
bd = ms.BinghamDistribution(
np.eye(4), bingham_z,
{"norm_const_mode": "numerical",
"norm_const_options": integral_options}
)
def integrand_transformed(x):
# To avoid unnecessary divisions, this term does not contain the
# normalization constant. At the end, the result of the integration is
# divided by it.
return aad(x, bd.mode) \
* np.exp(np.dot(x, np.dot(np.diag(bingham_z), x)))
def integrand(phi1, phi2, phi3):
sp1 = np.sin(phi1)
sp2 = np.sin(phi2)
return integrand_transformed(np.array([
sp1 * sp2 * np.sin(phi3),
sp1 * sp2 * np.cos(phi3),
sp1 * np.cos(phi2),
np.cos(phi1)
])) * (sp1 ** 2.) * sp2
eaad_int = integrate.tplquad(
integrand,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi, # phi1
**integral_options
)
return eaad_int[0] / bd.norm_const
def build_bd_lookup_table(table_type, options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
table_type: Type of lookup table used. May be 'uniform' or 'nonuniform'
options: Dict cotaining type specific options.
If type is "uniform" this dict must contain:
"bounds" = Tuple (lower_bound, upper_bound) representing bounds.
"num_points" = Number of points per dimension.
If type is "nonuniform" this dict must contain a key "coords" which
is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(table_type)
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_type, serialized_options, res_table, coords) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(serialized_type)
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
elif table_type == "uniform":
# Number of points per axis.
(lbound, rbound) = options["bounds"]
num_points = options["num_points"]
assert num_points > 1, \
"Grid must have more than one point per dimension."
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = np.linspace(lbound, rbound, num_points)
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
elif table_type == "nonuniform":
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = options["coords"]
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
else:
sys.exit("Unknown lookup table type")
return res_table
def build_vm_lookup_table(options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
options: Dict cotaining table options. It must contain a key "coords"
which is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_options, res_table) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
else:
coords = options["coords"]
res_table = _compute_vm_lookup_table(coords)
with open(path, "wb") as dillfile:
dill.dump((options, res_table), dillfile)
return res_table
def _compute_bd_lookup_table(coords, nc_options):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
pt_idx = point_indices[idx]
# Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no impact
# on the result as the Bingham normalization constant is agnostic to it.
# However, the numpy integration that is used to compute it, combines
# numerical 2d and 1d integration which is why the order matters for the
# actual computation time.
#
# TODO: Make pymanstats choose best order automatically.
norm_const = ms.BinghamDistribution.normalization_constant(
np.array(
[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]),
"numerical", nc_options)
print("Computing NC for Z=[{}, {}, {}, 0.0]: {}".format(
coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],
norm_const))
return norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
class AverageMeter(object):
"""Computes and stores the averages over a numbers or dicts of numbers.
For the dict, this class assumes that no new keys are added during
the computation.
"""
def __init__(self):
self.last_val = 0
self.avg = 0
self.count = 0
def update(self, val, n=1):
self.last_val = val
n = float(n)
if type(val) == dict:
if self.count == 0:
self.avg = copy.deepcopy(val)
else:
for key in val:
self.avg[key] *= self.count / (self.count + n)
self.avg[key] += val[key] * n / (self.count + n)
else:
self.avg *= self.count / (self.count + n)
self.avg += val * n / (self.count + n)
self.count += n
self.last_val = val
def _compute_vm_lookup_table(coords):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
cur_pt_idx = point_indices[idx]
log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[2]]))
print("Computing NC for kappas=[{}, {}, {}]: {}".format(
coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]],
log_norm_const))
return log_norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
def vec_to_bingham_z_many(y):
z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0)
return z
def vec_to_bingham_z(y):
z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0)
if not all(z[0][:-1] <= z[0][1:]):
print(z)
return z
| 15,063 | 33.629885 | 83 | py |
deep_bingham | deep_bingham-master/utils/__init__.py | from .utils import *
| 21 | 10 | 20 | py |
deep_bingham | deep_bingham-master/utils/evaluation.py | import torch
from modules import maad
from utils import AverageMeter, eaad_bingham, eaad_von_mises
import numpy as np
def run_evaluation(model, dataset, loss_function, device, floating_point_type="float"):
model.eval()
losses = AverageMeter()
log_likelihoods = AverageMeter()
maads = AverageMeter()
averaged_stats = AverageMeter()
eaads = AverageMeter()
min_eaads = AverageMeter()
min_maads = AverageMeter()
val_load_iter = iter(dataset)
eval_fraction = 0.1
for i in range(int(len(dataset)*eval_fraction)):
data = val_load_iter.next()
if floating_point_type == "double":
target_var = data["pose"].double().to(device)
input_var = data["image"].double().to(device)
else:
target_var = data["pose"].float().to(device)
input_var = data["image"].float().to(device)
if torch.sum(torch.isnan(target_var)) > 0:
continue
# compute output
output = model(input_var)
if loss_function.__class__.__name__ == "MSELoss":
# norm over the last dimension (ie. orientations)
norms = torch.norm(output, dim=-1, keepdim=True).to(device)
output = output / norms
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
loss, log_likelihood = loss_function(target_var, output, 49)
else:
loss, log_likelihood = loss_function(target_var, output)
if floating_point_type == "double":
loss = loss.double() / data["image"].shape[0]
else:
loss = loss.float() / data["image"].shape[0]
# measure accuracy and record loss
losses.update(loss.item(), data["image"].size(0))
log_likelihoods.update(log_likelihood.item(), data["image"].size(0))
if loss_function.__class__.__name__ == "VonMisesLoss":
angular_deviation = maad(loss_function, target_var, output, None)
maads.update(angular_deviation)
min_maads.update(angular_deviation)
eaad, min_eaad = kappas_to_eaad(output)
eaads.update(eaad, data["image"].size(0))
min_eaads.update(min_eaad, data["image"].size(0))
else:
stats = loss_function.statistics(target_var, output, 31)
averaged_stats.update(stats, data["image"].size(0))
maads.update(stats["maad"])
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
min_maads.update(stats["mmaad"])
else:
min_maads.update(stats["maad"])
if "Bingham" in loss_function.__class__.__name__:
eaad, min_eaad = bingham_z_to_eaad(
stats, loss_function
)
eaads.update(eaad, data["image"].size(0))
min_eaads.update(min_eaad, data["image"].size(0))
if "Bingham" or "VonMises" in loss_function.__class__.__name__:
print("Loss: {}, Log Likelihood: {}, MAAD: {}, Min MAAD: {}, EAAD: {}, Min EAAD: {}".format(
losses.avg, log_likelihoods.avg, maads.avg, min_maads.avg, eaads.avg, min_eaads.avg))
else:
print("Loss: {}, Log Likelhood: {}, MAAD: {}".format(losses.avg, log_likelihoods.avg, maads.avg))
def kappas_to_eaad(output):
kappas = torch.mean(output[:, :3], 0).detach().cpu().numpy()
eaad = eaad_von_mises(kappas)
return eaad, eaad
def bingham_z_to_eaad(stats, loss_function):
eaads = []
if loss_function.__class__.__name__ == "BinghamLoss":
z_0, z_1, z_2 = stats["z_0"], stats["z_1"], stats["z_2"]
bingham_z = np.array([z_0, z_1, z_2, 0])
eaad = eaad_bingham(bingham_z)
eaads.append(eaad)
elif loss_function.__class__.__name__ == "BinghamMixtureLoss":
for j in range(loss_function._num_components):
bingham_z = [stats["mode_" + str(j) + "_z_{}".format(i)] for i in range(3)]
bingham_z.append(0)
bingham_z = np.array(bingham_z)
eaad = eaad_bingham(bingham_z)
eaads.append(eaad)
return sum(eaads)/len(eaads), min(eaads)
| 4,120 | 40.21 | 105 | py |
deep_bingham | deep_bingham-master/data_loaders/t_less_dataset.py | from .utils import *
from torch.utils.data import Dataset, random_split, Subset
import yaml
import os
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from PIL import Image
import numpy as np
from skimage import io
import torch
import quaternion
import cv2
import h5py
torch.manual_seed(0)
def make_hdf5_file(config, image_transform):
dataset_path = config["dataset_path"]
if config["dirs"]:
dirs = config["dirs"]
tless_full = TLessSplit(
TLessFullDataset(dataset_path, dirs, image_transform), shuffle=config.get("shuffle", True))
train = tless_full.train
test = tless_full.test
else:
train_dirs = config["train_dirs"]
test_dirs = config["test_dirs"]
train = TLessFullDataset(dataset_path, train_dirs, image_transform)
test = TLessFullDataset(dataset_path, test_dirs, image_transform)
file_dataset = config["hdf5"]
img_shape = train[0]["image"].shape
label_shape = train[0]["pose"].shape[-1]
f = h5py.File("datasets/{}".format(file_dataset), "w")
train_img = f.create_dataset("train_img", (
len(train), img_shape[0], img_shape[1], img_shape[2]))
train_label = f.create_dataset("train_label", (len(train), label_shape))
test_img = f.create_dataset("test_img", (
len(test), img_shape[0], img_shape[1], img_shape[2]))
test_label = f.create_dataset("test_label", (len(test), label_shape))
print("Making HDF5")
for i in range(len(train)):
f["train_img"][i, :, :, :] = train[i]["image"]
f["train_label"][i, :] = train[i]["pose"]
for i in range(len(test)):
f["test_img"][i, :, :, :] = test[i]["image"]
f["test_label"][i, :] = test[i]["pose"]
class TLessTrainTest():
"""
Stores a training and test set for the TLess Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be
stored.
image_transforms: A list of of composed pytorch transforms to be applied
to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
file_dataset = config["hdf5"]
if not os.path.isfile("datasets/{}".format(file_dataset)):
make_hdf5_file(config_file, image_transform)
f = h5py.File("datasets/{}".format(file_dataset), 'r')
biterion = config["biterion"]
blur = config["blur"]
self.train = TLessHDF5(f.get('train_img'), f.get('train_label'),
f.get("train_bb"),
biterion, blur)
self.test = TLessHDF5(f.get('test_img'), f.get('test_label'),
f.get("test_bb"), biterion, blur)
class TLessHDF5(Dataset):
"""
Loads TLess dataset from a HDF5 dataset and applies transformations to
biterion or quaternion form and adds noise to the labels.
biterion: format of the pose. if true, biterion. if false, quaternion.
"""
def __init__(self, images, labels, bb, biterion, blur):
self.images = images
self.labels = labels
self.bb = bb
self.biterion = biterion
self.blur = blur
def __getitem__(self, idx):
image = self.images[idx, :, :, :]
pose = self.labels[idx, :]
if self.blur:
size = 10
kernel = np.ones((size, size), np.float32) / size ** 2
blurred_img = cv2.filter2D(image, -1, kernel)
image = blurred_img
if self.biterion:
convert_to_rad = quaternion_to_euler(pose[0], pose[1], pose[2],
pose[3])
sample = {'image': torch.from_numpy(image),
'pose': torch.Tensor([math.degrees(convert_to_rad[0]),
math.degrees(convert_to_rad[1]),
math.degrees(convert_to_rad[2])])}
else:
sample = {'image': torch.from_numpy(image),
'pose': torch.Tensor(pose)}
return sample
def __len__(self):
return self.images.shape[0]
class TLessSplit(object):
def __init__(self, dataset, shuffle=True):
train_size = int(len(dataset) * 0.75)
if shuffle:
self.train, self.test = random_split(dataset, [train_size, len(
dataset) - train_size])
else:
self.train = Subset(dataset, list(range(train_size)))
self.test = Subset(dataset, list(range(train_size, len(dataset))))
class TLessFullDataset(Dataset):
def __init__(self, path, dirs, image_transform):
self.subdatasets = []
self.size = [0]
self.image_transform = image_transform
for i in range(len(dirs)):
self.subdatasets.append(
TLessSingleDataset(path, dirs[i], self.image_transform))
self.size.append(len(self.subdatasets[i]) + self.size[-1])
def __getitem__(self, idx):
data_bin = 0
if not type(idx) == int:
idx = idx.item()
for i in range(1, len(self.size)):
if self.size[i] > idx >= self.size[i - 1]:
data_bin = i - 1
new_index = idx - self.size[data_bin]
return self.subdatasets[data_bin][new_index]
def __len__(self):
return self.size[-1]
class TLessSingleDataset(Dataset):
def __init__(self, path, direc, image_transform):
self.dir_to_gt = {}
self.full_path = os.path.join(path, direc)
with open(self.full_path + "/gt.yml") as fp:
self.dir_to_gt = yaml.load(fp, Loader=Loader)
self.size = len(self.dir_to_gt.keys())
self.image_transform = image_transform
def __getitem__(self, index):
name_img = str(index).zfill(4)
img_path = os.path.join(self.full_path, "rgb",
"{}.png".format(name_img))
bb = np.array(self.dir_to_gt[index][0]["obj_bb"])
image = Image.fromarray(
io.imread(img_path)[int(bb[1]): int(bb[1] + bb[3]),
int(bb[0]): int(bb[0] + bb[2]), :])
pose = np.array(self.dir_to_gt[index][0]["cam_R_m2c"]).reshape(3, 3)
if self.image_transform:
image = self.image_transform(image).numpy()
pose = rotation_matrix_to_quaternion(pose)
assert (sum(np.array(self.dir_to_gt[index][0]["obj_bb"])) != 0)
return {"image": image, "pose": torch.Tensor(pose)}
def __len__(self):
return self.size
def rotation_matrix_to_quaternion(rot_mat):
quat = quaternion.as_float_array(quaternion.from_rotation_matrix(rot_mat))
return quat
| 7,101 | 34.688442 | 103 | py |
deep_bingham | deep_bingham-master/data_loaders/utils.py | import math
import numpy as np
import quaternion
def convert_euler_to_quaternion(roll, yaw, pitch):
"""Converts roll, yaw, pitch to a quaternion.
"""
cy = math.cos(math.radians(roll) * 0.5)
sy = math.sin(math.radians(roll) * 0.5)
cp = math.cos(math.radians(yaw) * 0.5)
sp = math.sin(math.radians(yaw) * 0.5)
cr = math.cos(math.radians(pitch) * 0.5)
sr = math.sin(math.radians(pitch) * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def get_relative_rotation(src_orientation, dest_orientation):
a = quaternion.quaternion(src_orientation[0], src_orientation[1],
src_orientation[2], src_orientation[3])
b = quaternion.quaternion(dest_orientation[0], dest_orientation[1],
dest_orientation[2], dest_orientation[3])
relative_rotation = quaternion.as_float_array(b * a.inverse())
return relative_rotation
def get_frame_index(name, frame):
for idx in range(len(frame)):
if frame.iloc[idx, 0] == name:
return idx
raise Exception(
"Could not find image {} in data frame, unsuccessful in finding frame index".format(
name))
def convert_euler_to_quaternion_idiap(yaw, pitch, roll):
yaw = math.radians(yaw)
roll = math.radians(roll)
pitch = math.radians(pitch)
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def quaternion_to_euler(w, x, y, z):
sinr_cosp = +2.0 * (w * x + y * z)
cosr_cosp = +1.0 - 2.0 * (x * x + y * y)
pitch = math.atan2(sinr_cosp, cosr_cosp)
sinp = +2.0 * (w * y - z * x)
sinp = +1.0 if sinp > +1.0 else sinp
sinp = -1.0 if sinp < -1.0 else sinp
yaw = math.asin(sinp)
siny_cosp = +2.0 * (w * z + x * y)
cosy_cosp = +1.0 - 2.0 * (y * y + z * z)
roll = math.atan2(siny_cosp, cosy_cosp)
return roll, yaw, pitch
| 2,407 | 27 | 92 | py |
deep_bingham | deep_bingham-master/data_loaders/upna_preprocess.py | from __future__ import print_function, division
import os
import pandas as pd
import numpy as np
import yaml
# Ignore warnings
import warnings
import csv
warnings.filterwarnings("ignore")
import cv2
TRAIN_SET = set(
["User_01", "User_02", "User_03", "User_04", "User_05", "User_06"])
TEST_SET = set(["User_07", "User_08", "User_09", "User_10"])
class UpnaHeadPoseDataPreprocess:
def __init__(self, config_file):
if type(config_file) != dict:
with open(config_file) as fp:
config = yaml.load(fp)
else:
config = config_file
print(os.path.abspath(config["dataset_path"]))
assert os.path.isdir(os.path.abspath(config["dataset_path"]))
self.dataset_path = config["dataset_path"]
self.processed_path = config["preprocess_path"]
if not os.path.isdir(config["preprocess_path"]):
os.mkdir(config["preprocess_path"])
print("preprocessing dataset")
self._preprocess_data()
# Create data frames for training and validation sets. Maps image names
# to quaternions.
train_data_frame, test_data_frame = self._make_data_frame()
self.frame_train = pd.read_csv(train_data_frame)
self.frame_test = pd.read_csv(test_data_frame)
print("dataset preprocessing finished")
def _extract_frames(self, videofile, videofolder):
"""Extracts frames from video and stores them as jpg image."""
# Create folder for processed video if necessary.
if videofolder in TRAIN_SET:
videopath = self.processed_path + "/train/" + videofolder
else:
videopath = self.processed_path + "/test/" + videofolder
if not os.path.isdir(videopath):
os.makedirs(videopath)
file_prefix = videopath + "/" + os.path.splitext(videofile)[
0] + "_frame"
# Open and process video.
filepath = self.dataset_path + "/" + videofolder + "/" + videofile
video_capture = cv2.VideoCapture(filepath)
success, image = video_capture.read()
count = 1
while success:
# save frame as jpg file.
cv2.imwrite(file_prefix + ("%d.jpg" % (count)), image)
success, image = video_capture.read()
count += 1
def _transform_labels(self, labelfile, labelfolder):
""" Transforms labels from Euler angles into dual quaternions. """
# Create folder for processed labels if necessary.
if labelfolder in TRAIN_SET:
labeldir = self.processed_path + "/train/" + labelfolder
else:
labeldir = self.processed_path + "/test/" + labelfolder
if not os.path.isdir(labeldir):
os.makedirs(labeldir)
# Open and transform label files replacing euler angles by quaternions.
# The quaternion a + b I + c J + d K is stored as a b c d.
file_path = self.dataset_path + "/" + labelfolder + "/" + labelfile
quaternion_list = []
with open(file_path, 'rt') as groundtruth_file:
csv_reader = csv.reader(groundtruth_file, delimiter='\t',
lineterminator='\r\n\r\n\t')
for row in csv_reader:
orientation_quat = np.array(
[float(row[3]), float(row[4]), float(row[5])])
quaternion_list.append(orientation_quat)
processed_file_path = labeldir + "/" + labelfile
with open(processed_file_path, "wt") as processed_file:
csv_writer = csv.writer(processed_file, delimiter="\t")
for row in quaternion_list:
row = np.round(row, 4).tolist()
csv_writer.writerow(row)
def _preprocess_data(self):
"""Transforms video frames into image frames."""
print("Transforming video frames into image frames")
# Iterate over subfolders.
iter_dirs = iter(os.walk(self.dataset_path))
next(iter_dirs)
# first = True
for cur_dir in iter_dirs:
cur_dir_basename = os.path.basename(os.path.normpath(cur_dir[0]))
for cur_file in cur_dir[2]:
if cur_file.endswith('groundtruth3D_zeroed.txt'): # and first:
# Transform orientation labels into quaternions.
print("Processing " + cur_file)
self._transform_labels(cur_file, cur_dir_basename)
elif cur_file.endswith('mp4'): # and first:
# Extract frames from videos.
self._extract_frames(cur_file, cur_dir_basename)
def _make_data_frame(self):
print("Creating a csv mapping image file names to quaternion poses")
train_csv_file = self.processed_path + "/train/input.csv"
test_csv_file = self.processed_path + "/test/input.csv"
iter_dirs = iter(os.walk(self.processed_path))
next(iter_dirs)
for cur_dir in iter_dirs:
cur_dir_basename = os.path.basename(os.path.normpath(cur_dir[0]))
for cur_file in cur_dir[2]:
if cur_file.endswith('groundtruth3D_zeroed.txt'):
if cur_dir_basename in TRAIN_SET:
self._add_images_poses_to_csv(cur_file,
cur_dir_basename,
train_csv_file)
else:
self._add_images_poses_to_csv(cur_file,
cur_dir_basename,
test_csv_file)
return train_csv_file, test_csv_file
def _add_images_poses_to_csv(self, cur_file, cur_dir_basename, csv_file):
image_name_list = []
if cur_dir_basename in TRAIN_SET:
quat_path = self.processed_path + "/train/" + cur_dir_basename \
+ "/" + cur_file
else:
quat_path = self.processed_path + "/test/" + cur_dir_basename \
+ "/" + cur_file
with open(quat_path, 'rt') as quaternion_file:
csv_reader = csv.reader(quaternion_file, delimiter='\t')
data = list(csv_reader)
row_count = len(data)
for i in range(1, row_count + 1):
words = os.path.splitext(cur_file)[0].split("_")
sub_name = "_".join(words[:4])
name = cur_dir_basename + "/" + sub_name + "_frame{}.jpg".format(i)
image_name_list.append(name)
with open(csv_file, "a") as fp:
field_names = ["image_name", "q0", "q1", "q2", "q3"]
csv_writer = csv.DictWriter(fp, fieldnames=field_names)
print("writing to csv file", image_name_list[0])
for i in range(row_count):
quat = data[i]
csv_writer.writerow(
{'image_name': image_name_list[i], "q0": quat[0],
"q1": quat[1], "q2": quat[2]})
| 7,036 | 40.639053 | 79 | py |
deep_bingham | deep_bingham-master/data_loaders/__init__.py | from .idiap_dataset import *
from .upna_dataset import *
from .t_less_dataset import *
| 87 | 21 | 29 | py |
deep_bingham | deep_bingham-master/data_loaders/upna_dataset.py | import os
import torch
from PIL import Image
from skimage import io
from torch.utils.data import Dataset
import h5py
from .upna_preprocess import *
from .utils import *
from bingham_distribution import BinghamDistribution
def make_hdf5_file(config, image_transform):
dataset_path = config["preprocess_path"]
csv_train = dataset_path + "/train/input.csv"
csv_test = dataset_path + "/test/input.csv"
biterion = config["biterion"]
if os.path.isfile(csv_train) and os.path.isfile(csv_test):
test_frame = pd.read_csv(csv_test)
train_frame = pd.read_csv(csv_train)
else:
preprocess = UpnaHeadPoseDataPreprocess(config)
test_frame = preprocess.frame_test
train_frame = preprocess.frame_train
train = UpnaHeadPoseSplitSet(dataset_path + "/train",
train_frame, image_transform)
test = UpnaHeadPoseSplitSet(dataset_path + "/test",
test_frame, image_transform)
img_shape = train[0]["image"].shape
label_shape = train[0]["pose"].shape[-1]
f = h5py.File(dataset_path + "/dataset.hdf5", "w")
f.create_dataset("train_img", (
len(train), img_shape[0], img_shape[1], img_shape[2]))
f.create_dataset("train_label", (len(train), label_shape))
f.create_dataset("test_img", (
len(test), img_shape[0], img_shape[1], img_shape[2]))
f.create_dataset("test_label", (len(test), label_shape))
for i, data in enumerate(train):
f["train_img"][i, :, :, :] = train[i]["image"]
f["train_label"][i, :] = train[i]["pose"]
print("train", i)
for i, data in enumerate(test):
f["test_img"][i, :, :, :] = test[i]["image"]
f["test_label"][i, :] = test[i]["pose"]
print("test", i)
class UpnaHeadPoseTrainTest():
"""
Stores a training and test set for the UPNA Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be stored.
image_transforms: A list of of composed pytorch transforms to be applied
to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
if not os.path.isfile(config["preprocess_path"] + "/dataset.hdf5"):
make_hdf5_file(config_file, image_transform)
f = h5py.File(config["preprocess_path"] + "/dataset.hdf5", 'r')
noise = config["euler_noise"]
quat_noise = config["quat_noise"]
biterion = config["biterion"]
self.train = UpnaHDF5(f.get('train_img'), f.get('train_label'),
biterion, noise, quat_noise)
self.test = UpnaHDF5(f.get('test_img'), f.get('test_label'), biterion,
noise, quat_noise)
class UpnaHDF5(Dataset):
"""
Loads UPNA dataset from a HDF5 dataset and applies transformations to
biterion or quaternion form and adds noise to the labels.
biterion: format of the pose. if true, biterion. if false, quaternion.
euler_noise: the standard deviation of the Gaussian distribution that we
sample noise from
quat_noise: the Z of a bingham distribution that we sample noise from
"""
def __init__(self, images, labels, biterion, euler_noise, quat_noise):
self.images = images
self.labels = labels
self.biterion = biterion
if euler_noise:
s = np.random.normal(0, euler_noise, 3 * len(self.labels))
self.euler_noise = []
for i in range(len(self.labels)):
self.euler_noise.append([s[i * 3], s[i * 3 + 1], s[i * 3 + 2]])
else:
self.euler_noise = None
if quat_noise:
quat_noise = [float(quat_noise[0]), float(quat_noise[1]),
float(quat_noise[2]), 0]
bd = BinghamDistribution(np.identity(4), np.array(quat_noise))
samples = bd.random_samples(len(labels))
perm = [3, 0, 1, 2]
re_samples = samples[:, perm]
self.quat_noise = quaternion.as_quat_array(re_samples)
else:
self.quat_noise = []
def __getitem__(self, idx):
image = torch.from_numpy(self.images[idx, :, :, :]).float()
if self.euler_noise:
pose = np.array([self.labels[idx][0] + self.euler_noise[idx][0],
self.labels[idx][1] + self.euler_noise[idx][1],
self.labels[idx][2] + self.euler_noise[idx][2]])
else:
pose = self.labels[idx, :]
if len(self.quat_noise) != 0:
w, x, y, z = convert_euler_to_quaternion(pose[0], pose[1], pose[2])
quat_pose = quaternion.quaternion(w, x, y, z)
res = quaternion.as_float_array(quat_pose * self.quat_noise[idx])
roll, pitch, yaw = quaternion_to_euler(res[0], res[1], res[2],
res[3])
pose = np.array(
[math.degrees(roll), math.degrees(pitch), math.degrees(yaw)])
if self.biterion:
sample = {'image': image,
'pose': torch.from_numpy(pose)}
else:
sample = {'image': image,
'pose': convert_euler_to_quaternion(pose[0],
pose[1],
pose[2])}
return sample
def __len__(self):
return self.images.shape[0]
class UpnaHeadPoseSplitSet(Dataset):
def __init__(self, dataset_path, frame, image_transform):
"""
Stores a training or test set for the UPNA Head Pose Dataset
Parameters:
dataset_path: the location of where processed images and poses will be stored.
frame: the the csv frame that stores the posesi
image_transforms: A list of of composed pytorch transforms to be applied to a PIL image
"""
self.frame = frame
self.image_transform = image_transform
self.dataset_path = dataset_path
def __len__(self):
return len(self.frame)
def __getitem__(self, idx):
name = self.frame.iloc[idx, 0]
frame_index = idx
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
head_pose = self.frame.iloc[frame_index, 1:4].as_matrix()
head_pose = head_pose.astype('float').reshape(-1, 3)[0]
if self.image_transform:
image = self.image_transform(image)
sample = {'image': image,
'pose': head_pose}
return sample
# TODO: GET RID OF THIS- REDUNDANT. except for the images field. need to incorporate that elsewhere...
class UpnaHeadPoseDataset(Dataset):
"""
Stores a test set for the UPNA Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the location of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be stored.
image_transforms: (optional) A list of of composed pytorch transforms to
be applied to a PIL image
images: (optional) Can provide a list of image names and a dataset will
be constructed with those images
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
self.dataset_path = config["preprocess_path"] + "/test"
self.csv_path = self.dataset_path + "/input.csv"
self.user = config["user"]
self.video = config["video"]
if os.path.isfile(self.csv_path):
self.frame = pd.read_csv(self.csv_path)
else:
self.frame = UpnaHeadPoseDataPreprocess(config_file).frame_test
self.image_transform = image_transform
self.images = self._generate_file_names()
def __len__(self):
return len(self.images)
def _generate_file_names(self):
"""
From user number and video number, generate a list of corresponding frames.
Parameters:
user_num: string user number ex. "07"
video_num: string video number ex. "03"
Returns:
names: a list of file names.
"""
names = []
for i in range(1, 300):
string_name = "User_{}/user_{}_video_{}_frame{}.jpg".format(
self.user, self.user, self.video, i)
names.append(string_name)
return names
def __getitem__(self, idx):
name = self.images[idx]
frame_index = get_frame_index(name, self.frame)
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
head_pose = self.frame.iloc[frame_index, 1:4].as_matrix()
head_pose = head_pose.astype('float').reshape(-1, 3)[0]
if self.image_transform:
image = self.image_transform(image)
sample = {'image': image,
'pose': torch.from_numpy(
convert_euler_to_quaternion(head_pose[0], head_pose[1],
head_pose[2]))}
return sample
| 9,853 | 36.9 | 102 | py |
deep_bingham | deep_bingham-master/data_loaders/idiap_dataset.py | """
Data loading methods from matlab file from:
https://github.com/lucasb-eyer/BiternionNet
"""
import os
import h5py
import yaml
import torch
from PIL import Image
from skimage import io
from torch.utils.data import Dataset
from .utils import *
from bingham_distribution import BinghamDistribution
class IDIAPTrainTest(object):
"""
Stores a training and test set for the IDIAP Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be
stored.
image_transforms: A list of of composed pytorch transforms to be applied to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
self.dataset_path = config["dataset_path"]
mat_file = self.dataset_path + "/or_label_full.mat"
self.image_transform = image_transform
pose_file = h5py.File(mat_file)
train_table = load("train", pose_file)
test_table = load("test", pose_file)
euler_noise = config["euler_noise"]
biterion = config["biterion"]
quat_noise = config["quat_noise"]
self.train = IDIAPSplitSet(train_table, image_transform,
self.dataset_path + "/train", euler_noise,
quat_noise,
biterion)
self.test = IDIAPSplitSet(test_table, image_transform,
self.dataset_path + "/test", euler_noise,
quat_noise, biterion)
def matlab_string(obj):
"""
Return a string parsed from a matlab file
"""
return ''.join(chr(c) for c in obj[:, 0])
def matlab_strings(mat, ref):
"""
Returns an array of strings parsed from matlab file
"""
return [matlab_string(mat[r]) for r in ref[:, 0]]
def matlab_array(mat, ref, dtype):
"""
Parses the relevant information (ref) with type
dtype from a matlab file (mat) and returns
a numpy array.
Parameter:
mat: matlab file containing pose information
ref: the column of the file of interest
dtype: data type of data
"""
N = len(ref)
arr = np.empty(N, dtype=dtype)
for i in range(N):
arr[i] = mat[ref[i, 0]][0, 0]
return arr
def load(traintest, mat_full):
"""
Loads train or test data from mat lab file containing both train
and test data and returns the relevant information in numpy arrays
Parameters:
traintest: a string that denotes "train" or "test"
mat_full: the matlab file containing pose information
Returns:
pan: a numpy array containing pan angles from the dataset
tilt: a numpy array containing tilt angles from the dataset
roll: a numpy array containing roll angles from the dataset
names: a numpy array containing image names from the dataset
"""
container = mat_full['or_label_' + traintest]
pan = matlab_array(mat_full, container['pan'], np.float32)
tilt = matlab_array(mat_full, container['tilt'], np.float32)
roll = matlab_array(mat_full, container['roll'], np.float32)
names = matlab_strings(mat_full, container['name'])
return pan, tilt, roll, names
class IDIAPSplitSet(Dataset):
"""
Stores a training or test set for the UPNA Head Pose Dataset
Parameters:
dataset_path: the location of where processed images and poses will
be stored.
image_transforms: A list of of composed pytorch transforms to be
applied to a PIL image
euler_noise: the standard deviation of the Gaussian distribution
that we sample noise from
quat_noise: the Z of the bingham distribution that we sample noise
from
"""
def __init__(self, table, image_transform, dataset_path, euler_noise,
quat_noise, biterion):
self.image_transform = image_transform
self.pan, self.tilt, self.roll, self.names = table
self.dataset_path = dataset_path
if euler_noise:
s = np.random.normal(0, euler_noise, 3 * len(self.names))
self.euler_noise = []
for i in range(len(self.names)):
self.euler_noise.append([s[i * 3], s[i * 3 + 1], s[i * 3 + 2]])
else:
self.euler_noise = None
if quat_noise:
bd = BinghamDistribution(np.eye(4), np.array(quat_noise))
self.quat_noise = quaternion.as_quat_array(
bd.random_samples(len(self.pan)))
else:
self.quat_noise = []
self.biterion = biterion
def __len__(self):
return len(self.names)
def __getitem__(self, idx):
pan = self.pan[idx]
tilt = self.tilt[idx]
roll = self.roll[idx]
name = self.names[idx]
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
if self.image_transform:
image = self.image_transform(image)
if self.euler_noise:
pan = math.degrees(pan) + self.euler_noise[idx][0]
tilt = math.degrees(tilt) + self.euler_noise[idx][1]
roll = math.degrees(roll) + self.euler_noise[idx][2]
else:
pan = math.degrees(pan)
tilt = math.degrees(tilt)
roll = math.degrees(roll)
if len(self.quat_noise) != 0:
w, x, y, z = convert_euler_to_quaternion(pan, tilt, roll)
quat_pose = quaternion.quaternion(w, x, y, z)
res = quaternion.as_float_array(quat_pose * self.quat_noise[idx])
euler_res = quaternion_to_euler(res[0], res[1], res[2], res[3])
pan = math.degrees(euler_res[0])
tilt = math.degrees(euler_res[1])
roll = math.degrees(euler_res[2])
if self.biterion:
sample = {'image': image,
'pose': torch.Tensor([pan, tilt, roll])}
else:
sample = {'image': image,
'pose': torch.from_numpy(
convert_euler_to_quaternion(pan,
tilt,
roll))}
return sample
| 6,671 | 34.679144 | 95 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/main.py | import argparse
import time
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score
from dataloader import DatasetLoader, collate_fn
from primary_pred_module import primModel
from ancillary_pred_module import anclModel
from self_correction_module import selfcorrModel
parser = argparse.ArgumentParser()
#dataset
parser.add_argument('--dataset', type=str, default='salad', help='dataset', choices=['breakfast', 'salad'])
parser.add_argument('--feature_type', type=str, default='fisher', help='feature type, for salad, only have first three choices.', choices=['fisher', 'gt', 'I3D', 'fisher_label', 'I3D_label', 'fisher_label_cat', 'I3D_label_cat'])
parser.add_argument('--n_classes', type=int, default=19, help='action classes, corresponding to dataset', choices=[48, 19])
parser.add_argument('--observation', type=str, default='obs-0.3', help='portion of observed video', choices=['obs-0.2', 'obs-0.3'])
parser.add_argument('--prediction', type=float, default=0.1, help='portion of predicted video', choices=[0.1, 0.2, 0.3, 0.5])
parser.add_argument('--fps', type=int, default=30, help='fps of video, corresponding to dataset', choices=[15, 30])
#video preprocessing
parser.add_argument('--len_S_list', nargs='+', type=int, default=[5, 10, 15], help='S to be divided into how many clips')
parser.add_argument('--len_R', type=int, default=5, help='R to be divided into how many clips')
parser.add_argument('--startpoints_R', nargs='+', type=float, default=[5, 10, 15], help='startpoints of R (how many seconds before current time point')
#model hypermeters
parser.add_argument('--conv_dim_NLB', type=int, default=128, help='out_channel dimension of the convolution layer in NLB')
parser.add_argument('--linear_dim', type=int, default=1024, help='dimension of the linear layer in CB.')
parser.add_argument('--dropout_NLB', type=float, default=0.3, help='dropout rate of the dropout layer in NLB')
parser.add_argument('--dropout_CB', type=float, default=0.3, help='dropout rate of the dropout layer in CB')
parser.add_argument('--dropout_TAB', type=float, default=0.3, help='dropout rate of the dropout layer in TAB')
parser.add_argument('--hidden_dim_LSTM', type=int, default=512, help='hidden layer of LSTM (decoder of dense prediction)')
parser.add_argument('--max_len', type=int, default=25, help='maximum times of LSTM recurrence (should be long enough that no video has more clips to predict than this number, breakfast is 24, salad is 25.)')
parser.add_argument('--light', type=bool, default=True, help='whether to use light version model (refer to block.py for details)')
#self correction module
parser.add_argument('--self_correction_method', type=str, default='auto', help='which method to use in self correction module', choices=['no', 'linear', 'auto'])
parser.add_argument('--alpha', nargs=2, type=float, default=[30, 0.5], help='start and end value of alpha in self correction module (start>end), only needed when self correction module method is "linear"')
#other
parser.add_argument('--model', type=str, default='/model', help='path to save model')
parser.add_argument('--batch', type=int, default=2, help='batch size (salad is 2, breakfast is 16)')
args = parser.parse_args()
datapath = args.dataset + '/features/' #change to your datapath
modelpath = args.dataset + args.model #change to your modelpath (path to save trained models)
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
if args.dataset == 'breakfast':
if args.feature_type == 'gt' or args.feature_type == 'fisher_label' or args.feature_type == 'I3D_label':
video_feat_dim = args.n_classes
elif args.feature_type == 'fisher':
video_feat_dim = 64
elif args.feature_type == 'I3D':
video_feat_dim = 400
elif args.feature_type == 'fisher_label_cat':
video_feat_dim = 64 + args.n_classes
elif args.feature_type == 'I3D_label_cat':
video_feat_dim = 400 + args.n_classes
else: #args.dataset == 'salad'
if args.feature_type == 'gt':
video_feat_dim = args.n_classes
elif args.feature_type == 'fisher':
video_feat_dim = 64
elif args.feature_type == 'I3D':
video_feat_dim = 2048
def mycrossentropy(prob, gt):
loss = 0
prob = F.softmax(prob, 1)
for i in range(len(prob)):
loss -= torch.sum(gt[i]*torch.log(prob[i]))
return loss
def main():
alpha = args.alpha[0]
end = args.alpha[1]
full = 8 #how many data in full set
total = 40 #how many data in training set (including full set and weak set)
light = 'light' if args.light else 'heavy'
anci = anclModel(args, video_feat_dim).to(device)
prim = primModel(args, video_feat_dim).to(device)
loss_fn = nn.CrossEntropyLoss(reduction='sum')
loss_mse = nn.MSELoss(reduction='sum')
if not args.self_correction_method == 'auto':
#step1: train ancillary model using full set
anci.train()
optimizer1 = optim.Adam(anci.parameters(), lr=0.001, betas=(0.99, 0.9999))
scheduler1 = optim.lr_scheduler.MultiStepLR(optimizer1, milestones=[5, 15])
print('-------Start training ancillary model-------')
for e in range(20):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer1.zero_grad()
label, prob, curr_action, pred_action_durations, _ = anci(S, R, wl)
loss += loss_fn(curr_action, wl)
for i in range(prob.shape[0]):
loss += loss_mse(pred_action_durations[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i]) >0:
loss += loss_fn(prob[i][:len(fl[i])], fl[i])
loss.backward()
optimizer1.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
scheduler1.step()
print('step1 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/full, time.time()-s, acc))
#step2: train primary model using full set and weak set with ancillary model fixed
optimizer2 = optim.Adam(prim.parameters(), lr=0.001, betas=(0.99, 0.9999))
scheduler2 = optim.lr_scheduler.MultiStepLR(optimizer2, milestones=[3, 15])
anci.eval()
prim.train()
print('-------Start training primary model-------')
for e in range(25):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
weakset = DataLoader(dataset=DatasetLoader(args, datapath, 'weak'), batch_size=args.batch*2, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer2.zero_grad()
label, prob, curr_action, pred_action_durations, _ = prim(S, R)
loss += loss_fn(curr_action, wl)
for i in range(prob.shape[0]):
loss += loss_mse(pred_action_durations[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i]) >0:
loss += loss_fn(prob[i][:len(fl[i])], fl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
for S, R, _, wl, _ in weakset:
loss = 0
optimizer2.zero_grad()
_, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
if args.self_correction_method == 'no':
sfl = prob_a
sfad = pred_action_durations_a
else:
corr = selfcorrModel(args, alpha)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
loss += loss_mse(pred_action_durations_p, sfad)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += mycrossentropy(prob_p[i], sfl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
scheduler2.step()
alpha = max(alpha*0.95, end)
print('step2 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/total, time.time()-s, acc))
#step3: test
prim.eval()
with torch.no_grad():
testset = DataLoader(dataset=DatasetLoader(args, datapath, 'test'), batch_size=args.batch, shuffle='False',
collate_fn=collate_fn)
total_acc = 0
n = 0
for S, R, fl, wl, dl in testset:
label, _, curr_action, pred_action_durations, _ = prim(S, R)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
print('frame-wise accuracy on test set is %.4f' % acc)
else:
#step1: train ancillary model using half full set
anci.train()
optimizer1 = optim.Adam(anci.parameters(), lr=0.001, betas=(0.99, 0.9999))
scheduler1 = optim.lr_scheduler.MultiStepLR(optimizer1, milestones=[5])
print('-------Start training ancillary model-------')
for e in range(15):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full', half=True), batch_size=int(args.batch/2), shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer1.zero_grad()
label, prob, curr_action, pred_action_durations, _ = anci(S, R, wl)
loss += loss_fn(curr_action, wl)
for i in range(prob.shape[0]):
loss += loss_mse(pred_action_durations[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i]) > 0:
loss += loss_fn(prob[i][:len(fl[i])], fl[i])
loss.backward()
optimizer1.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
scheduler1.step()
print('step1 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/int(full/2), time.time()-s, acc))
#step2: train primary model and self-correction model using full set with ancillary model fixed
corr = selfcorrModel(args, alpha).to(device)
params = [{'params':prim.parameters()}, {'params':corr.parameters()}]
optimizer2 = optim.Adam(params, lr=0.001, betas=(0.99, 0.9999))
scheduler2 = optim.lr_scheduler.MultiStepLR(optimizer2, milestones=[3])
anci.eval()
prim.train()
corr.train()
print('-------Start training primary model and self-correction model-------')
for e in range(20):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer2.zero_grad()
label, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += loss_mse(pred_action_durations_p[i][:len(dl[i])-1], dl[i][:-1])
loss += loss_mse(sfad[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i])>0:
loss += loss_fn(prob_p[i][:len(fl[i])], fl[i])
loss += loss_fn(sfl[i][:len(fl[i])], fl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations_p[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations_p[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
scheduler2.step()
print('step2 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/full, time.time()-s, acc))
#step3: fine-tune primary model using full set and weak set and self-correction model using full set with ancillary model fixed
print('-------Start fine-tuning primary model and self-correction model-------')
for e in range(20):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
weakset = DataLoader(dataset=DatasetLoader(args, datapath, 'weak'), batch_size=args.batch*2, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer2.zero_grad()
label, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += loss_mse(pred_action_durations_p[i][:len(dl[i])-1], dl[i][:-1])
loss += loss_mse(sfad[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i])>0:
loss += loss_fn(prob_p[i][:len(fl[i])], fl[i])
loss += loss_fn(sfl[i][:len(fl[i])], fl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations_p[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations_p[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
for S, R, _, wl, _ in weakset:
loss = 0
optimizer2.zero_grad()
_, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
loss += loss_mse(pred_action_durations_p, sfad)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += mycrossentropy(prob_p[i], sfl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
print('step3 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/total, time.time()-s, acc))
#step4: test
prim.eval()
with torch.no_grad():
testset = DataLoader(dataset=DatasetLoader(args, datapath, 'test'), batch_size=4, shuffle='False',
collate_fn=collate_fn)
total_acc = 0
n = 0
for S, R, fl, wl, dl, ids in testset:
label, _, curr_action, pred_action_durations, _ = prim(S, R)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
print('frame-wise accuracy on test set is %.4f' % acc)
torch.save(anci, os.path.join(modelpath, 'fullset_%d_%s_%s_pred-%f_%s_%s_anci' % (full, args.feature_type, args.observation, args.prediction, light, args.self_correction_method)))
torch.save(prim, os.path.join(modelpath, 'fullset_%d_%s_%s_pred-%f_%s_%s_prim' % (full, args.feature_type, args.observation, args.prediction, light, args.self_correction_method)))
if args.self_correction_method != 'no':
torch.save(corr, os.path.join(modelpath, 'fullset_%d_%s_%s_pred-%f_%s_%s_corr' % (full, args.feature_type, args.observation, args.prediction, light, args.self_correction_method)))
print('Done!')
if __name__ == "__main__":
main()
| 23,720 | 51.596452 | 228 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/data_preprocessing.py | import os.path
import pickle
import numpy as np
import torch
def read_mapping_dict(mapping_file):
file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
actions_dict = dict()
for a in actions:
actions_dict[a.split()[1]] = int(a.split()[0])
return actions_dict
def get_label_bounds(data_labels):
labels_uniq = []
labels_uniq_loc = []
for kki in range(len(data_labels)):
sequence_labels, sequence_durations = get_label_length_seq(data_labels[kki])
labels_uniq.append(sequence_labels)
labels_uniq_loc.append(sequence_durations)
return labels_uniq, labels_uniq_loc
def get_label_length_seq(content):
label_seq = []
length_seq = []
start = 0
length_seq.append(0)
for i in range(len(content)):
if content[i] != content[start]:
label_seq.append(content[start])
length_seq.append(i)
start = i
label_seq.append(content[start])
length_seq.append(len(content))
return label_seq, length_seq
class DataClass:
def __init__(self, args, path, mode='full', half=False):
self.path = path
self.GT_folder = os.path.join(self.path, 'groundTruth/')
self.mapping = os.path.join(self.path, 'mapping.txt')
self.full_split = os.path.join(self.path, 'split/full.split3.bundle')
self.weak_split = os.path.join(self.path, 'split/weak.split3.bundle')
self.test_split = os.path.join(self.path, 'split/test.split.bundle')
self.obs = float(args.observation[-3:]) #observation portion
self.pred = args.prediction #prediction portion
self.fps = args.fps #video's fps
self.curr_label = dict()
self.future_labels = dict()
self.future_durations = dict()
actions_dict = read_mapping_dict(self.mapping)
if args.feature_type == 'gt' or args.feature_type == 'fisher' or args.feature_type == 'I3D':
if mode == 'full':
self.data_feat, data_labels = self.load_data_features(args, self.full_split, actions_dict, half)
elif mode == 'weak':
self.data_feat, data_labels = self.load_data_features(args, self.weak_split, actions_dict, half)
else:
self.data_feat, data_labels = self.load_data_features(args, self.test_split, actions_dict, half)
elif args.feature_type == 'fisher_label' or args.feature_type == 'I3D_label':
if mode == 'full':
self.data_feat, data_labels = self.load_seg_outs(args, self.full_split, actions_dict, mode, half)
elif mode == 'weak':
self.data_feat, data_labels = self.load_seg_outs(args, self.weak_split, actions_dict, mode, half)
else:
self.data_feat, data_labels = self.load_seg_outs(args, self.test_split, actions_dict, mode, half)
else:
if mode == 'full':
self.data_feat, data_labels = self.load_seg_outs_concat(args, self.full_split, actions_dict, mode, half)
elif mode == 'weak':
self.data_feat, data_labels = self.load_seg_outs_concat(args, self.weak_split, actions_dict, mode, half)
else:
self.data_feat, data_labels = self.load_seg_outs_concat(args, self.test_split, actions_dict, mode, half)
labels_uniq, labels_uniq_loc = get_label_bounds(data_labels)
counter_index = 0
for kki in range(0, len(data_labels)):
mi_labels = data_labels[kki] #a video's frame-wise label
video_len = len(mi_labels)
sequence_labels = labels_uniq[kki]
sequence_durations = labels_uniq_loc[kki]
current_stop = int(len(mi_labels) * self.obs) #the last frame of the observation part
pred_stop = int(len(mi_labels) * (self.obs + self.pred)) #the last frame of the prediction part
stop_index = 0
for ioi in range(len(sequence_durations) - 1):
if sequence_durations[ioi] <= current_stop:
stop_index = ioi
#the order of the last action in the observation part
list_future_labels = []
list_future_durations = [min(pred_stop, sequence_durations[stop_index+1]) - current_stop] #current action duration (within the prediction part)
val_curr_label = sequence_labels[stop_index]
if stop_index + 1 != len(sequence_labels):
for izi in range(stop_index + 1, len(sequence_labels)):
if sequence_durations[izi] <= pred_stop:
list_future_durations.append(min(pred_stop - sequence_durations[izi], sequence_durations[izi+1] - sequence_durations[izi]))
list_future_labels.append(sequence_labels[izi])
self.curr_label[str(counter_index)] = torch.tensor(val_curr_label).long() #current action
self.future_labels[str(counter_index)] = torch.Tensor(list_future_labels).long() #future actions
self.future_durations[str(counter_index)] = torch.cat((torch.Tensor(list_future_durations)/video_len, torch.Tensor([video_len]))) #future actions durations
counter_index = counter_index + 1
def load_data_features(self, args, split_load, actions_dict, half=False):
file_ptr = open(split_load, 'r')
if half:
content_all = file_ptr.read().split('\n')[:-1]
content_all = content_all[:int(len(content_all)/2)]
else:
content_all = file_ptr.read().split('\n')[:-1]
if args.dataset == 'breakfast':
content_all = [x.strip('./data/groundTruth/') + 't' for x in content_all]
data_all = []
label_all = []
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
curr_data = []
if args.feature_type == 'fisher':
if args.dataset == 'breakfast':
loc_curr_data = self.path + 'fisher/' + os.path.splitext(content)[0] + '.txt'
curr_data = np.loadtxt(loc_curr_data, dtype='float32')
curr_data = curr_data[:, 1:65] #n*64 (n is the number of frame)
else: #args.dataset == 'salad'
loc_curr_data = self.path + 'fisher/' + os.path.splitext(content)[0] + '-New.txt'
curr_data = np.loadtxt(loc_curr_data, dtype='float32')
curr_data = curr_data[:, 1:65] #n*64
elif args.feature_type == 'I3D':
if args.dataset == 'breakfast':
loc_curr_data = self.path + 'I3D/' + os.path.splitext(content)[0]
curr_data = np.loadtxt(loc_curr_data, dtype='float32') #n*400
else: #args.dataset == 'salad'
loc_curr_data = self.path + 'I3D/' + os.path.splitext(content)[0] + '.npy'
curr_data = np.load(loc_curr_data).T #n*2048
else: #args.feature_type == 'gt'
for iik in range(len(curr_gt)):
ind_label = actions_dict[curr_gt[iik]]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data) #n*n_classes(one-hot)
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
return data_all, label_all
def load_seg_outs(self, args, split_load, actions_dict, mode, half=False):
file_ptr = open(split_load, 'r')
if half:
content_all = file_ptr.read().split('\n')[:-1]
content_all = content_all[:int(len(content_all)/2)]
else:
content_all = file_ptr.read().split('\n')[:-1]
content_all = [x.strip('./data/groundTruth/') + 't' for x in content_all]
data_all = []
label_all = []
if mode == 'full' or mode == 'weak':
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
curr_data = []
for iik in range(len(label_curr_video)):
ind_label = label_curr_video[iik]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
else:
if args.feature_type == 'fisher_label':
# current split for fisher vector based segmentation labels
segmentation_location = os.path.join(self.path, 'seg_fisher')
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
# read fisher based segmentation labels
file_ptr_fisher = open(segmentation_location + '/split1/' + args.observation + '/' + content, 'r')
fisher_seg_labels = file_ptr_fisher.read().split('\n')[:-1]
curr_data = []
for iik in range(len(fisher_seg_labels)):
ind_label = actions_dict[fisher_seg_labels[iik]]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
else:
counter = 0
# read segmentation labels based on i3d features
file_name = os.path.join(self.path, 'seg_I3D') + '/' + 'seg_ours_2_split1.pickle'
with open(file_name, 'rb') as handle:
segmentation_data = pickle.load(handle)
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
# read i3d based segmentation labels
i3d_seg_labels = segmentation_data[counter]
counter = counter + 1
curr_data = []
for iik in range(len(i3d_seg_labels)):
ind_label = i3d_seg_labels[iik]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
return data_all, label_all
def load_seg_outs_concat(self, args, split_load, actions_dict, mode, half=False):
file_ptr = open(split_load, 'r')
if half:
content_all = file_ptr.read().split('\n')[:-1]
content_all = content_all[:int(len(content_all)/2)]
else:
content_all = file_ptr.read().split('\n')[:-1]
content_all = [x.strip('./data/groundTruth/') + 't' for x in content_all]
data_all = []
label_all = []
if args.feature_type == 'fisher_label_cat':
# current split for fisher vector based segmentation labels
segmentation_location = os.path.join(self.path, 'seg_fisher')
for content in content_all:
#fisher feature
loc_curr_data = self.path+'fisher/' + os.path.splitext(content)[0] + '.txt'
curr_data = np.loadtxt(loc_curr_data, dtype='float32')
curr_data = curr_data[:, 1:65] #n*64(n指帧数)
#gt label
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
#one-hot feature
curr_data_feat = []
if mode == 'full' or mode == 'weak':
#gt one-hot label
for iik in range(len(curr_gt)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[actions_dict[curr_gt[iik]]] = 1.0
curr_data_feat.append(curr_data_vec)
else:
# read fisher based segmentation labels
file_ptr_fisher = open(segmentation_location + '/split1/' + args.observation + '/' + content, 'r')
fisher_seg_labels = file_ptr_fisher.read().split('\n')[:-1]
for iik in range(len(fisher_seg_labels)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[actions_dict[fisher_seg_labels[iik]]] = 1.0
curr_data_feat.append(curr_data_vec)
curr_data_feat = np.array(curr_data_feat) #n*n_classes
minlen = min(len(curr_data_feat), len(curr_data))
curr_data = np.concatenate((curr_data_feat[:minlen], curr_data[:minlen]), axis=1)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
else:
# read segmentation labels based on i3d features
file_name = os.path.join(self.path, 'seg_I3D') + '/' + 'seg_ours_2_split1.pickle'
with open(file_name, 'rb') as handle:
segmentation_data = pickle.load(handle)
counter = 0
for content in content_all:
#I3D feature
loc_curr_data = self.path + 'I3D/' + os.path.splitext(content)[0]
curr_data = np.loadtxt(loc_curr_data, dtype='float32') #n*400
#gt label
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
#one-hot label
curr_data_feat = []
if mode == 'full' or mode == 'weak':
#gt one-hot label
for iik in range(len(curr_gt)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[actions_dict[curr_gt[iik]]] = 1.0
curr_data_feat.append(curr_data_vec)
else:
# read i3d based segmentation labels
i3d_seg_labels = segmentation_data[counter]
counter = counter + 1
for iik in range(len(i3d_seg_labels)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[i3d_seg_labels[iik]] = 1.0
curr_data_feat.append(curr_data_vec)
curr_data_feat = np.array(curr_data_feat) #n*n_classes
minlen = min(len(curr_data_feat), len(curr_data))
curr_data = np.concatenate((curr_data_feat[:minlen], curr_data[:minlen]), axis=1)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
return data_all, label_all
| 17,121 | 48.060172 | 167 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/ancillary_pred_module.py | '''
input: a video and its weak label
output: predicted frame-wise action
Ancillary predction model outputs a frame-wise action prediction given a video and first second label.
This model generates an initial prediction for the weak set, which will aid training the primary model.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from blocks import TABlock
class anclModel(nn.Module):
def __init__(self, args, video_feat_dim):
super(anclModel, self).__init__()
self.n_classes = args.n_classes
self.hidden_size = args.hidden_dim_LSTM
self.num_TAB = len(args.startpoints_R)
self.linear_dim = args.linear_dim
self.max_len = args.max_len
self.fps = args.fps
self.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
self.TABs = nn.ModuleList([TABlock(args, video_feat_dim) for _ in range(self.num_TAB)])
self.cls_layer = nn.ModuleList([nn.Sequential(nn.Linear(in_features=2*self.linear_dim, out_features=self.n_classes), nn.Softmax(dim=1)) for _ in range(self.num_TAB)])
self.cls_curr_duration = nn.Linear(in_features=self.num_TAB*self.linear_dim, out_features=1)
self.lstm_linear = nn.Linear(in_features=(2*self.num_TAB)*self.linear_dim + self.num_TAB*self.n_classes, out_features=self.linear_dim+1)
self.lstm = nn.LSTM(self.linear_dim+1, self.hidden_size, batch_first=True)
self.pred_class = nn.Linear(in_features=self.hidden_size, out_features=self.n_classes)
self.pred_duration = nn.Linear(in_features=self.hidden_size+self.linear_dim, out_features=1)
self.embed = nn.Embedding(self.n_classes, self.linear_dim)
self.attn = nn.Linear(in_features=self.hidden_size, out_features=self.linear_dim, bias=False)
def forward(self, S_list, R_list, weak_label):
Y = []
R_ppps = []
S_ppps = []
for i in range(len(R_list)):
S_ppp, R_ppp = self.TABs[i](S_list, R_list[i])
R_ppps.append(R_ppp)
S_ppps.append(S_ppp)
Y.append(self.cls_layer[i](torch.cat((S_ppp, R_ppp), 1)))
curr_action_duration = self.cls_curr_duration(torch.cat(R_ppps, 1)) #batch_size*1
pred_action_durations = [curr_action_duration]
lstm_input = torch.cat((self.embed(weak_label).view(-1, self.linear_dim).contiguous(), curr_action_duration), 1).unsqueeze(1) #batch_size*1*(linear_dim+1)
batch_size = lstm_input.size(0)
pred_class_labels = []
pred_class_probs = []
attentions = []
states = None
prev_hiddens = torch.zeros(batch_size, self.hidden_size).to(self.device)
for i in range(self.max_len):
hiddens, states = self.lstm(lstm_input, states)
hiddens = hiddens.squeeze(1) #batch_size*hidden_size
outputs = self.pred_class(hiddens.squeeze(1))
attention = F.softmax(torch.matmul(self.attn(hiddens).unsqueeze(1)/(self.linear_dim ** 0.5), torch.stack(S_ppps, 1).permute(0,2,1)), dim=-1) #batch_size*1*3
attention = torch.matmul(attention, torch.stack(S_ppps, 1)).view(batch_size, -1) #batch_size*linear_dim
duration = self.pred_duration(torch.cat((attention, prev_hiddens), 1)) #batch_size*1
attentions.append(attention)
predicted_class = outputs.max(1)[1] #batch_size
pred_class_prob = F.softmax(outputs, 1) #batch_size*n_classes
pred_class_labels.append(predicted_class)
pred_class_probs.append(pred_class_prob)
pred_action_durations.append(duration)
lstm_input = torch.cat((self.embed(predicted_class), duration), 1).unsqueeze(1) #batch_size*1*(linear_dim+1)
prev_hiddens = hiddens
curr_action = torch.sum(torch.stack(Y, 0), 0) #current action: batch*n_classes
pred_class_probs = torch.stack(pred_class_probs, 1) #batch*max_len*n_classes
pred_action_durations = torch.cat(pred_action_durations, 1) #batch_size*(max_len+1)
return pred_class_labels, pred_class_probs, curr_action, pred_action_durations, attentions
| 4,229 | 51.875 | 174 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/dataloader.py | import torch
import torch.utils.data as data
from data_preprocessing import DataClass
class DatasetLoader(data.Dataset):
def __init__(self, args, path, mode, half=False):
self.dataset = DataClass(args, path, mode, half)
self.obs = float(args.observation[-3:]) #observation portion
self.pred = args.prediction #prediction portion
self.fps = args.fps
self.len_R = args.len_R
self.startpoints_R = args.startpoints_R
self.len_S_list = args.len_S_list
self.args = args
self.mode = mode
self.features = self.dataset.data_feat #list, one element is the feature of one video (tensor)
self.curr_label = self.dataset.curr_label #dict, key is video index, value is its current action label
self.future_labels = self.dataset.future_labels #dict, key is video index, value is its future labels list, could be an empty list
self.future_durations = self.dataset.future_durations #dict, key is video index, value is its current and future action duration (within the prediction part)
def cut(self, feature, curr_label, future_labels, durations):
'''
feature : tensor (n*dim)
feature of a video, n is the number of frames, dim is the dimension of each frame.
curr_label: torch.longtensor, label of current action
future_labels : torch.longtensor, zero or several labels
Return S_list, R_list, groundtruth label for predict part and weak label
'''
if (self.args.feature_type == 'fisher_label' or self.args.feature_type == 'I3D_label' or self.args.feature_type == 'fisher_label_cat' or self.args.feature_type == 'I3D_label_cat') and self.mode == 'test':
obs = feature
else:
obs = feature[:int(len(feature) * self.obs), :] #first obs (0.2 or 0.3) portion of videos as observation part
full_label = future_labels #ground truth for prediction part
weak_label = curr_label #weak label: current action label
durations = durations
recent_snippets = [] #R_list
spanning_snippets = [] #S_list
for scale in self.len_S_list:
curr = []
a = len(obs)/scale
for i in range(scale):
curr.append(torch.max(obs[int(i*a):int((i+1)*a)], 0)[0].squeeze())
spanning_snippets.append(torch.stack(curr))
for sp in self.startpoints_R:
curr = []
recent = obs[int(max(0, len(obs)-sp*self.fps)):, :]
a = len(recent)/self.len_R
for i in range(self.len_R):
curr.append(torch.max(recent[int(i*a):int((i+1)*a)], 0)[0].squeeze())
recent_snippets.append(torch.stack(curr))
return (spanning_snippets, recent_snippets, full_label, weak_label, durations)
def __getitem__(self, index):
return self.cut(self.features[index], self.curr_label[str(index)], self.future_labels[str(index)], self.future_durations[str(index)]) #a tuple
def __len__(self):
return len(self.features)
def collate_fn(data):
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
S_list = [[] for _ in range(3)] #3 is the length of len_S_list
R_list = [[] for _ in range(3)] #3 is the length of startpoints_R
fl = []
wl = []
dl = []
for d in data:
curr_s = d[0] #List: len_S_i*feat_dim
curr_r = d[1] #List: len_R*feat_dim
for i in range(len(curr_s)):
S_list[i].append(curr_s[i])
for i in range(len(curr_r)):
R_list[i].append(curr_r[i])
fl.append(d[2].to(device)) #List: each element is a tensor of future action labels
wl.append(d[3])
dl.append(d[4].to(device)) #List: each element is a tensor of current and future action durations
S_list = [torch.stack(s).to(device) for s in S_list] #List: each element is batch*len_S_i*feat_dim
R_list = [torch.stack(r).to(device) for r in R_list] #List: each element is batch*len_R*feat_dim
wl = torch.stack(wl, 0).to(device) #batch
return S_list, R_list, fl, wl, dl | 4,177 | 48.152941 | 212 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/self_correction_module.py | '''
input: outputs from ancillary module and primary module of weak set
output: full label of weak set
Self-correction module refines predictions generated by the ancillary model and the current primary model for the weak set.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class selfcorrModel(nn.Module):
def __init__(self, args, alpha):
super(selfcorrModel, self).__init__()
self.method = args.self_correction_method
self.alpha = alpha
self.comb1 = nn.Sequential(
nn.Linear(in_features=2,out_features=1),
nn.ReLU())
self.comb2 = nn.Sequential(
nn.Linear(in_features=2,out_features=1),
nn.ReLU())
def forward(self, prim_pred, ancl_pred, prim_duration, ancl_duration):
#prim/ancl_pred: batch_size*max_len*n_classes
#prim/ancl_duration: batch_size*(max_len+1)
if self.method == 'linear':
self_corr_label = []
for i in range(prim_pred.shape[1]):
self_corr_label.append(F.softmax(torch.pow(prim_pred[:, i, :], 1/(self.alpha+1))*torch.pow(ancl_pred[:, i, :], self.alpha/(self.alpha+1)), 1))
self_corr_duration = torch.pow(prim_duration, 1/(self.alpha+1))*torch.pow(ancl_duration, self.alpha/(self.alpha+1)) if torch.min(ancl_duration) > 1e-3 and torch.min(prim_duration) > 1e-3 else ancl_duration
#this is to avoid exploding gradient if one element is too small, however,after the first epoches, duration should be big enough (at least bigger than 1) so that it will not damage the result.
return torch.stack(self_corr_label, 1), self_corr_duration #batch_size*max_len*n_classes, batch_size*(max_len+1)
else: #auto
self_corr_label = []
for i in range(prim_pred.shape[1]):
self_corr_label.append(F.softmax(self.comb1(torch.stack((prim_pred[:, i, :], ancl_pred[:, i, :]), 1).permute(0, 2, 1)).squeeze(), 1))
self_corr_duration = self.comb2(torch.stack((prim_duration, ancl_duration), 2)).squeeze()
return torch.stack(self_corr_label, 1), self_corr_duration #batch_size*max_len*n_classes, batch_size*(max_len+1)
| 2,235 | 53.536585 | 217 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/primary_pred_module.py | '''
input: a video
output: predicted frame-wise action
Primary prediction model generates a frame-wise prediction of actions given an video.
This is the main model that is subject to the training and is used at test time.
'''
import torch.nn as nn
from blocks import TABlock
import torch
import torch.nn.functional as F
class primModel(nn.Module):
def __init__(self, args, video_feat_dim):
super(primModel, self).__init__()
self.n_classes = args.n_classes
self.hidden_size = args.hidden_dim_LSTM
self.num_TAB = len(args.startpoints_R)
self.linear_dim = args.linear_dim
self.max_len = args.max_len
self.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
self.TABs = nn.ModuleList([TABlock(args, video_feat_dim) for _ in range(self.num_TAB)])
self.cls_layer = nn.ModuleList([nn.Sequential(nn.Linear(in_features=2*self.linear_dim,out_features=self.n_classes), nn.Softmax(dim=1)) for _ in range(self.num_TAB)])
self.cls_curr_duration = nn.Linear(in_features=self.num_TAB*self.linear_dim, out_features=1)
self.lstm_linear = nn.Linear(in_features=(2*self.num_TAB)*self.linear_dim + self.num_TAB*self.n_classes, out_features=self.linear_dim+1)
self.lstm = nn.LSTM(self.linear_dim+1, self.hidden_size, batch_first=True)
self.pred_class = nn.Linear(in_features=self.hidden_size, out_features=self.n_classes)
self.pred_duration = nn.Linear(in_features=self.hidden_size+self.linear_dim, out_features=1)
self.embed = nn.Embedding(self.n_classes, self.linear_dim)
self.attn = nn.Linear(in_features=self.hidden_size, out_features=self.linear_dim, bias=False)
def forward(self, S_list, R_list):
S_ppps = []
R_ppps = []
Y = []
for i in range(len(R_list)):
S_ppp, R_ppp = self.TABs[i](S_list, R_list[i])
S_ppps.append(S_ppp)
R_ppps.append(R_ppp)
Y.append(self.cls_layer[i](torch.cat((S_ppp, R_ppp), 1)))
lstm_input = torch.cat((Y+S_ppps+R_ppps), 1) #batch_size*(2*num_TAB*linear_dim+num_TAB*n_classes)
lstm_input = self.lstm_linear(lstm_input).unsqueeze(1) #batch_size*1*(linear_dim+1)
curr_action_duration = self.cls_curr_duration(torch.cat(R_ppps, 1)) #batch_size*1
pred_action_durations = [curr_action_duration]
batch_size = lstm_input.size(0)
pred_class_labels = []
pred_class_probs = []
attentions = []
states = None
prev_hiddens = torch.zeros(batch_size, self.hidden_size).to(self.device)
for i in range(self.max_len):
hiddens, states = self.lstm(lstm_input, states)
hiddens = hiddens.squeeze(1) #batch_size*hidden_size
outputs = self.pred_class(hiddens)
attention = F.softmax(torch.matmul(self.attn(hiddens).unsqueeze(1)/(self.linear_dim ** 0.5), torch.stack(S_ppps, 1).permute(0,2,1)), dim=-1) #batch_size*1*3
attention = torch.matmul(attention, torch.stack(S_ppps, 1)).view(batch_size, -1) #batch_size*linear_dim
attentions.append(attention)
duration = self.pred_duration(torch.cat((attention, prev_hiddens), 1)) #batch_size*1
predicted_class = outputs.max(1)[1] #batch_size
pred_class_prob = F.softmax(outputs, 1) #batch_size*n_classes
pred_class_labels.append(predicted_class)
pred_class_probs.append(pred_class_prob)
pred_action_durations.append(duration)
lstm_input = torch.cat((self.embed(predicted_class), duration), 1).unsqueeze(1) #batch_size*1*(linear_dim+1)
prev_hiddens = hiddens
curr_action = torch.sum(torch.stack(Y, 0), 0) #current action: batch_size*n_classes
pred_class_probs = torch.stack(pred_class_probs, 1) #batch_size*max_len*n_classes
pred_action_durations = torch.cat(pred_action_durations, 1) #batch_size*(max_len+1)
return pred_class_labels, pred_class_probs, curr_action, pred_action_durations, attentions | 4,186 | 51.3375 | 173 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/blocks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class NONLocalBlock(nn.Module):
#Non Local Block
def __init__(self, args, dim_1, dim_2, video_feat_dim):
super(NONLocalBlock, self).__init__()
self.dim_1 = dim_1
self.dim_2 = dim_2
self.video_feat_dim = video_feat_dim
self.latent_dim = args.conv_dim_NLB
self.dropout = args.dropout_NLB
self.initnn = True# initnn is used in theta, phi, and g.
self.initnn2 = True# initnn2 is used in the final linear layer.
self.theta = nn.Conv1d(in_channels=self.dim_2,
out_channels=self.latent_dim,
kernel_size=1, stride=1, padding=0)
if self.initnn:
nn.init.xavier_normal_(self.theta.weight)
nn.init.constant_(self.theta.bias, 0)
self.phi = nn.Conv1d(in_channels=self.dim_1,
out_channels=self.latent_dim,
kernel_size=1, stride=1, padding=0)
if self.initnn:
nn.init.xavier_normal_(self.phi.weight)
nn.init.constant_(self.phi.bias, 0)
self.g = nn.Conv1d(in_channels=self.dim_1,
out_channels=self.latent_dim,
kernel_size=1, stride=1, padding=0)
if self.initnn:
nn.init.xavier_normal_(self.g.weight)
nn.init.constant_(self.g.bias, 0)
self.final_layers = nn.Sequential(
nn.LayerNorm(torch.Size([self.latent_dim, self.video_feat_dim])),
nn.ReLU(),
nn.Conv1d(in_channels=self.latent_dim,
out_channels=self.dim_2,
kernel_size=1, stride=1, padding=0),
nn.Dropout(p=self.dropout),
)
if self.initnn2:
nn.init.xavier_normal_(self.final_layers[2].weight)
nn.init.constant_(self.final_layers[2].bias, 0)
def forward(self, input1, input2):
#input1: batch_size*dim_1*video_feat_dim
#input2: batch_size*dim_2*video_feat_dim
theta_x = self.theta(input2).permute(0, 2, 1) #batch_size*video_feat_dim*latent_dim
phi_x = self.phi(input1) #batch_size*latent_dim*video_feat_dim
theta_phi = torch.matmul(theta_x, phi_x) #batch_size*video_feat_dim*video_feat_dim
p_x = F.softmax(theta_phi, dim=-1) #batch_size*video_feat_dim*video_feat_dim
g_x = self.g(input1).permute(0, 2, 1) #batch_size*video_feat_dim*latent_dim
t_x = torch.matmul(p_x, g_x).permute(0, 2, 1).contiguous() #batch_size*latent_dim*video_feat_dim
W_t = self.final_layers(t_x) #batch_size*dim_2*video_feat_dim
z_x = W_t + input2 #batch_size*dim_2*video_feat_dim
return z_x
class CouplingBlock(nn.Module):
#Coupling Block
def __init__(self, args, dim_S, dim_R, video_feat_dim):
super(CouplingBlock, self).__init__()
self.dropout = args.dropout_CB
self.video_feat_dim = video_feat_dim
self.linear_dim = args.linear_dim
self.dim_S = dim_S
self.dim_R = dim_R
self.coupBlock1 = NONLocalBlock(args, self.dim_S, self.dim_S, video_feat_dim)
self.coupBlock2 = NONLocalBlock(args, self.dim_S, self.dim_R, video_feat_dim)
self.final_SR = nn.Sequential(
nn.Linear(in_features = (self.dim_S+2*self.dim_R)*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
self.final_RR = nn.Sequential(
nn.Linear(in_features = 2*self.dim_R*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
def forward(self, S, R):
#S: batch_size*dim_S*video_feat_dim
#R: batch_size*dim_R*video_feat_dim
batch_size = S.size(0)
S_p = F.relu(self.coupBlock1(S, S)) #batch_size*dim_S*video_feat_dim
R_p = F.relu(self.coupBlock2(S_p, R)) #batch_size*dim_R*video_feat_dim
R_pp = torch.cat((R_p, R), 1).view(batch_size, -1) #batch_size*(2*dim_R*video_feat_dim)
S_pp = torch.cat((S_p.view(batch_size, -1).contiguous(), R_pp), 1).view(batch_size, -1) #batch_size*(dim_S*video_feat_dim+2*dim_R*video_feat_dim)
S_pp = self.final_SR(S_pp) #batch_size*linear_dim
R_pp = self.final_RR(R_pp) #batch_size*linear_dim
return S_pp, R_pp
class CouplingBlock_light(nn.Module):
#Coupling Block
def __init__(self, args, dim_S, dim_R, video_feat_dim):
super(CouplingBlock_light, self).__init__()
self.dropout = args.dropout_CB
self.video_feat_dim = video_feat_dim
self.linear_dim = args.linear_dim
self.dim_S = dim_S
self.dim_R = dim_R
self.coupBlock = NONLocalBlock(args, self.dim_S, self.dim_R, video_feat_dim)
self.final_SR = nn.Sequential(
nn.Linear(in_features = (self.dim_S+2*self.dim_R)*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
self.final_RR = nn.Sequential(
nn.Linear(in_features = 2*self.dim_R*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
def forward(self, S, R):
#S: batch_size*dim_S*video_feat_dim
#R: batch_size*dim_R*video_feat_dim
batch_size = S.size(0)
R_p = F.relu(self.coupBlock(S, R)) #batch_size*dim_R*video_feat_dim
R_pp = torch.cat((R_p, R), 1).view(batch_size, -1) #batch_size*(2*dim_R*video_feat_dim)
S_pp = torch.cat((S.view(batch_size, -1).contiguous(), R_pp), 1).view(batch_size, -1) #batch_size*(dim_S*video_feat_dim+2*dim_R*video_feat_dim)
S_pp = self.final_SR(S_pp) #batch_size*linear_dim
R_pp = self.final_RR(R_pp) #batch_size*linear_dim
return S_pp, R_pp
class TABlock(nn.Module):
#Temporal Aggregation Block
def __init__(self, args, video_feat_dim):
super(TABlock, self).__init__()
self.linear_dim = args.linear_dim
self.video_feat_dim = video_feat_dim
self.len_R = args.len_R
self.len_S_list = args.len_S_list
self.S_num = len(self.len_S_list)
self.dropout = args.dropout_TAB
self.light = args.light
if self.light:
self.CBs = nn.ModuleList([CouplingBlock_light(args, len_S, self.len_R, video_feat_dim) for len_S in self.len_S_list])
else:
self.CBs = nn.ModuleList([CouplingBlock(args, len_S, self.len_R, video_feat_dim) for len_S in self.len_S_list])
# self.final_RRR = nn.Sequential(
# nn.Linear(in_features = self.S_num*self.linear_dim, out_features = self.linear_dim),
# nn.ReLU(),
# nn.Dropout(self.dropout))
self.final_RRR = nn.Linear(in_features = self.S_num*self.linear_dim, out_features = self.linear_dim)
def forward(self, S_list, R):
S_pps = []
R_pps = []
for i in range(len(S_list)):
S_pp, R_pp = self.CBs[i](S_list[i], R)
S_pps.append(S_pp)
R_pps.append(R_pp)
R_ppp = torch.cat(R_pps, 1) #batch_size*(3*linear_dim)
R_ppp = self.final_RRR(R_ppp) #batch_size*linear_dim
S_ppp = torch.stack(S_pps, 0) #3*batch_size*linear_dim
S_ppp = torch.max(S_ppp, 0)[0].view(-1, self.linear_dim) #batch_size*linear_dim
return S_ppp, R_ppp
| 7,659 | 41.555556 | 153 | py |
fmm2d | fmm2d-main/src/modified-biharmonic/jinjaroot.yaml.py |
fname="jinjaroot.yaml"
file = open(fname,"w")
file.write("mbhevalRouts:\n")
file.write(" -\n")
file.write(" out: p\n")
file.write(" -\n")
file.write(" out: g\n")
file.write(" -\n")
file.write(" out: h\n\n")
file.write("mbhDirectRouts:\n")
outs=["p","g","h"]
for out in outs:
for i in range(16):
i1 = i % 2
i2 = (i // 2) % 2
i3 = (i // 4) % 2
i4 = (i // 8) % 2
ker = ""
if (i1 == 1): ker += "c"
if (i2 == 1): ker += "d"
if (i3 == 1): ker += "q"
if (i4 == 1): ker += "o"
if ker != "":
name = "mbh2d_direct"+ker+out+"_vec"
file.write(" -\n")
file.write(" name: " + name + "\n")
file.write(" ker: " + ker + "\n")
file.write(" out: " + out + "\n")
| 862 | 21.710526 | 72 | py |
fmm2d | fmm2d-main/python/setup.py | import setuptools
import string
import os
from numpy.distutils.core import setup
from numpy.distutils.core import Extension
from sys import platform
pkg_name = "fmm2dpy"
## TODO: this should be automatically populated using "read directory, or whatever"
## TODO: fix problem with relative location for executable
list_helm=['hfmm2dwrap.f','hfmm2dwrap_vec.f','helmkernels2d.f']
list_lap=['rfmm2dwrap.f','rfmm2dwrap_vec.f','rlapkernels2d.f','lfmm2dwrap.f','lfmm2dwrap_vec.f','lapkernels2d.f','cfmm2dwrap.f','cfmm2dwrap_vec.f','cauchykernels2d.f']
list_bh=['bhfmm2dwrap.f','bhkernels2d.f']
list_common=[]
FLIBS = os.getenv('FMM_FLIBS')
FLIBS = FLIBS.rstrip().split(' ')
FLIBS = list(filter(None, FLIBS))
FLIBS.append('../lib-static/libfmm2d.a')
FFLAGS = os.getenv('FMM_FFLAGS')
FFLAGS = FFLAGS.rstrip().split(' ')
FFLAGS = list(filter(None, FFLAGS))
c_opts = ['_c','_d','_cd']
c_opts2 = ['c','d','cd']
st_opts = ['_s','_t','_st']
p_optsh = ['_p','_g']
p_optsh2 = ['p','g']
p_optsl = ['_p','_g','_h']
p_optsl2 = ['p','g','h']
list_int_helm = []
list_int_helm_vec = []
list_int_helm_dir = []
list_int_lap = []
list_int_lap_vec = []
list_int_lap_dir = []
for st in st_opts:
for cd in c_opts:
for pg in p_optsh:
list_int_helm.append('hfmm2d'+st+cd+pg)
list_int_helm_vec.append('hfmm2d'+st+cd+pg+'_vec')
for pg in p_optsl:
list_int_lap.append('rfmm2d'+st+cd+pg)
list_int_lap.append('lfmm2d'+st+cd+pg)
list_int_lap.append('cfmm2d'+st+cd+pg)
list_int_lap_vec.append('rfmm2d'+st+cd+pg+'_vec')
list_int_lap_vec.append('lfmm2d'+st+cd+pg+'_vec')
list_int_lap_vec.append('cfmm2d'+st+cd+pg+'_vec')
list_int_bh = []
list_int_bh_dir = []
for cd in c_opts2:
for pg in p_optsh2:
list_int_helm_dir.append('h2d_direct'+cd+pg)
list_int_bh_dir.append('bh2d_direct'+cd+pg)
for pg in p_optsl2:
list_int_lap_dir.append('r2d_direct'+cd+pg)
list_int_lap_dir.append('l2d_direct'+cd+pg)
list_int_lap_dir.append('c2d_direct'+cd+pg)
list_int_bh.append('bhfmm2dwrap_guru')
ext_helm = Extension(
name='fmm2dpy.hfmm2d_fortran',
sources=['../src/helmholtz/'+item for item in list_helm]+['../src/common/'+item for item in list_common],
f2py_options=['only:']+list_int_helm+list_int_helm_vec+list_int_helm_dir+[':'],
extra_f90_compile_args=FFLAGS,
extra_f77_compile_args=FFLAGS,
extra_link_args=FLIBS
)
ext_lap = Extension(
name='fmm2dpy.lfmm2d_fortran',
sources=['../src/laplace/'+item for item in list_lap]+['../src/common/'+item for item in list_common],
f2py_options=['only:']+list_int_lap+list_int_lap_vec+list_int_lap_dir+[':'],
extra_f90_compile_args=FFLAGS,
extra_f77_compile_args=FFLAGS,
extra_link_args=FLIBS
)
ext_bh = Extension(
name='fmm2dpy.bhfmm2d_fortran',
sources=['../src/biharmonic/'+item for item in list_bh]+['../src/common/'+item for item in list_common],
f2py_options=['only:']+list_int_bh+list_int_bh_dir+[':'],
extra_f90_compile_args=FFLAGS,
extra_f77_compile_args=FFLAGS,
extra_link_args=FLIBS
)
## TODO: fill in the info below
setup(
name=pkg_name,
python_requires='>=3.0.0',
version="0.0.5",
author="Travis Askham, Zydrunas Gimbutas, Leslie Greengard, Libin Lu, Michael O'Neil, Manas Rachh, and Vladimir Rokhlin",
author_email="[email protected]",
description="This pacakge contains basic routines for Laplace and Helmholtz fast multipole methods in two dimensions",
long_description=open('../README.md').read(),
long_description_content_type='text/markdown',
url="",
packages=['fmm2dpy'],
install_requires=[
"numpy",
"pytest"
],
ext_modules=[ext_helm,ext_lap,ext_bh],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
)
| 3,971 | 31.826446 | 167 | py |
fmm2d | fmm2d-main/python/cfmmexample.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
#
# This is a sample code to demonstrate how to use
# the fmm libraries
#
# sample with one density, sources to sources,
# charge interactions, and potential only
#
n = 2000000
nd = 1
sources = np.random.uniform(0,1,(2,n))
eps = 10**(-5)
charges = np.random.uniform(0,1,n) + 1j*np.random.uniform(0,1,n)
out = fmm.lfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
# sample with a vector of densities, sources to
# sources and targets, dipole interactions,
# potential and gradietns
nd = 3
nt = 1870
targ = np.random.uniform(0,1,(2,nt))
dipstr = np.random.uniform(0,1,(nd,n)) + 1j*np.random.uniform(0,1,n)
out2 = fmm.cfmm2d(eps=eps,sources=sources,dipstr=dipstr,\
targets=targ,nd=nd,pg=2,pgt=2)
| 776 | 21.852941 | 69 | py |
fmm2d | fmm2d-main/python/example1.py | import fmm2d as fmm
import numpy as np
n = 2000
sources = np.random.uniform(0,1,(2,n))
m = 1000
targets = np.random.uniform(0,1,(2,m))
zk = 1.1+ 1j*0
charges = np.random.uniform(0,1,n) + 1j*np.random.uniform(0,1,n)
eps = 10**(-5)
pottarg,ier = fmm.hfmm2d_t_c_p(eps,zk,sources,charges,targets)
print(pottarg[1:3])
| 320 | 15.894737 | 64 | py |
fmm2d | fmm2d-main/python/rfmmexample.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
#
# This is a sample code to demonstrate how to use
# the fmm libraries
#
# sample with one density, sources to sources,
# charge interactions, and potential only
#
n = 2000000
nd = 1
sources = np.random.uniform(0,1,(2,n))
eps = 10**(-5)
charges = np.random.uniform(0,1,n)
out = fmm.rfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
# sample with a vector of densities, sources to
# sources and targets, dipole interactions,
# potential and gradietns
nd = 3
nt = 1870
targ = np.random.uniform(0,1,(2,nt))
dipstr = np.random.uniform(0,1,(nd,n))
dipvecs = np.random.uniform(0,1,(nd,2,n))
out2 = fmm.rfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvecs,\
targets=targ,nd=nd,pg=2,pgt=2)
| 774 | 21.142857 | 72 | py |
fmm2d | fmm2d-main/python/lfmmexample.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
#
# This is a sample code to demonstrate how to use
# the fmm libraries
#
# sample with one density, sources to sources,
# charge interactions, and potential only
#
n = 2000000
nd = 1
sources = np.random.uniform(0,1,(2,n))
eps = 10**(-5)
charges = np.random.uniform(0,1,n) + 1j*np.random.uniform(0,1,n)
out = fmm.lfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
# sample with a vector of densities, sources to
# sources and targets, dipole interactions,
# potential and gradietns
nd = 3
nt = 1870
targ = np.random.uniform(0,1,(2,nt))
dipstr = np.random.uniform(0,1,(nd,n)) + 1j*np.random.uniform(0,1,n)
dipvecs = np.random.uniform(0,1,(nd,2,n))
out2 = fmm.lfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvecs,\
targets=targ,nd=nd,pg=2,pgt=2)
| 834 | 22.857143 | 72 | py |
fmm2d | fmm2d-main/python/fmm2dpy/fmm2d.py | from . import hfmm2d_fortran as hfmm
from . import lfmm2d_fortran as lfmm
from . import bhfmm2d_fortran as bhfmm
import numpy as np
import numpy.linalg as la
class Output():
pot = None
grad = None
hess = None
pottarg = None
gradtarg = None
hesstarg = None
ier = 0
def hfmm2d(*,eps,zk,sources,charges=None,dipstr=None,dipvec=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body Helmholtz interactions
in three dimensions where the interaction kernel is given by e^{ikr}/r
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} H_{0}^{(1)}(k \|x-x_{j}\|) - d_{j} v_{j}\cdot \\nabla \left( H_{0}^{(1)}(k \|x-x_{j}\|) \\right) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole densities,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps (float): precision requested
zk (complex): Helmholtz parameter
sources (float(2,n)): source locations ($x_{j}$)
charges (complex(nd,n) or complex(n)): charge densities ($c_{j}$)
dipstr (complex(nd,n) or complex(n)): dipole densities ($d_{j}$)
dipvec (float(nd,2,n) or float(2,n)): dipole orientation vectors ($v_{j}$)
targets (float(2,nt)): target locations (x)
pg (integer): source eval flag. Potential at sources evaluated if pg = 1. Potenial and gradient at sources evaluated if pg=2. Potential, gradient and hessian evaluated at sources if pg=3
pgt (integer): target eval flag. Potential at targets evaluated if pgt = 1. Potenial and gradient at targets evaluated if pgt=2. Potential, gradient and hessian evaluated at targets if pgt=3
nd (integer): number of densities
Returns:
Returns an object of type Output (out) with the following variables
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.hess: hessian at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
out.hesstarg: hessian at target locations if requested
Example:
see hfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None or dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2 and dipvec.shape[1] == ns, "dipole vectors must be of shape [2,number of sources]"
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2, "dipole vectors must be of shape [2,number of sources]"
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 2 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,2,ns] where nd is number of densities, and ns is number of sources"
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
if(targets is not None):
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
iftarg = 1
if(iftarg == 0 or pgt != 1 or pgt !=2):
if(pg == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.ier = hfmm.hfmm2d_s_c_p_vec(eps,zk,sources,charges,nd)
if(nd == 1):
out.pot,out.ier = hfmm.hfmm2d_s_c_p(eps,zk,sources,charges)
if(pg == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.ier = hfmm.hfmm2d_s_c_g_vec(eps,zk,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.ier = hfmm.hfmm2d_s_c_g(eps,zk,sources,charges)
if(pg == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = hfmm.hfmm2d_s_d_p_vec(eps,zk,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.ier = hfmm.hfmm2d_s_d_p(eps,zk,sources,dipstr,dipvec)
if(pg == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = hfmm.hfmm2d_s_d_g_vec(eps,zk,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = hfmm.hfmm2d_s_d_g(eps,zk,sources,dipstr,dipvec)
if(pg == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = hfmm.hfmm2d_s_cd_p_vec(eps,zk,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.ier = hfmm.hfmm2d_s_cd_p(eps,zk,sources,charges,dipstr,dipvec)
if(pg == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = hfmm.hfmm2d_s_cd_g_vec(eps,zk,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = hfmm.hfmm2d_s_cd_g(eps,zk,sources,charges,dipstr,dipvec)
if(pg !=1 and pg !=2 and targets is not None):
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.ier = hfmm.hfmm2d_t_c_p_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.ier = hfmm.hfmm2d_t_c_p(eps,zk,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_t_c_g_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_t_c_g(eps,zk,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = hfmm.hfmm2d_t_d_p_vec(eps,zk,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = hfmm.hfmm2d_t_d_p(eps,zk,sources,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_t_d_g_vec(eps,zk,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_t_d_g(eps,zk,sources,dipstr,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = hfmm.hfmm2d_t_cd_p_vec(eps,zk,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = hfmm.hfmm2d_t_cd_p(eps,zk,sources,charges,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_t_cd_g_vec(eps,zk,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_t_cd_g(eps,zk,sources,charges,dipstr,dipvec,targets)
if((pg == 1 or pg == 2) and targets is not None):
assert pg == pgt, "if both potential or potential at gradient are requested at sources and targets, then the same pg must be equal to pgt"
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm2d_st_c_p_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm2d_st_c_p(eps,zk,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_st_c_g_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_st_c_g(eps,zk,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm2d_st_d_p_vec(eps,zk,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm2d_st_d_p(eps,zk,sources,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_st_d_g_vec(eps,zk,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_st_d_g(eps,zk,sources,dipstr,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm2d_st_cd_p_vec(eps,zk,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm2d_st_cd_p(eps,zk,sources,charges,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_st_cd_g_vec(eps,zk,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm2d_st_cd_g(eps,zk,sources,charges,dipstr,dipvec,targets)
return out
def rfmm2d(*,eps,sources,charges=None,dipstr=None,dipvec=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body Laplace interactions
in two dimensions where the interaction kernel is given by log(r)
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) + d_{j}v_{j} \cdot \\nabla( log(\|x-x_{j}\|) ) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole strengths,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
sources: float(2,n)
source locations (x_{j})
charges: float(nd,n) or float(n)
charge densities (c_{j})
dipstr: float(nd,n) or float(n)
dipole densities (d_{j})
dipvec: float(nd,2,n) or float(2,n)
dipole orientation vectors (v_{j})
targets: float(2,nt)
target locations (x)
pg: integer
source eval flag
potential at sources evaluated if pg = 1
potenial and gradient at sources evaluated if pg=2
potential, gradient and hessian at sources evaluated if pg=3
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potential, gradient and hessian at targets evaluated if pgt=3
nd: integer
number of densities
Returns:
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.hess: hessian at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
out.hesstarg: hessian at target locations if requested
Example:
see rfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None or dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2 and dipvec.shape[1] == ns, "dipole vectors must be of shape [2,number of sources]"
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2, "dipole vectors must be of shape [2,number of sources]"
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 2 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,2,ns] where nd is number of densities, and ns is number of sources"
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
if(targets is not None):
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
iftarg = 1
#
# sources -> sources routines
#
if(iftarg == 0 or pgt != 1 or pgt !=2 or pgt !=3):
if(pg == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.ier = lfmm.rfmm2d_s_c_p_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.ier = lfmm.rfmm2d_s_c_p(eps,sources,charges)
if(pg == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.rfmm2d_s_c_g_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.rfmm2d_s_c_g(eps,sources,charges)
if(pg == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.rfmm2d_s_c_h_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.rfmm2d_s_c_h(eps,sources,charges)
if(pg == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.rfmm2d_s_d_p_vec(eps,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.ier = lfmm.rfmm2d_s_d_p(eps,sources,dipstr,dipvec)
if(pg == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.rfmm2d_s_d_g_vec(eps,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.rfmm2d_s_d_g(eps,sources,dipstr,dipvec)
if(pg == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.rfmm2d_s_d_h_vec(eps,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.rfmm2d_s_d_h(eps,sources,dipstr,dipvec)
if(pg == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.rfmm2d_s_cd_p_vec(eps,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.ier = lfmm.rfmm2d_s_cd_p(eps,sources,charges,dipstr,dipvec)
if(pg == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.rfmm2d_s_cd_g_vec(eps,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.rfmm2d_s_cd_g(eps,sources,charges,dipstr,dipvec)
if(pg == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.rfmm2d_s_cd_h_vec(eps,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.rfmm2d_s_cd_h(eps,sources,charges,dipstr,dipvec)
#
# sources -> targets routines
#
if(pg !=1 and pg !=2 and pg !=3 and targets is not None):
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.ier = lfmm.rfmm2d_t_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.rfmm2d_t_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_t_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_t_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_t_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_t_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.rfmm2d_t_d_p_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.rfmm2d_t_d_p(eps,sources,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_t_d_g_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_t_d_g(eps,sources,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_t_d_h_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_t_d_h(eps,sources,dipstr,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.rfmm2d_t_cd_p_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.rfmm2d_t_cd_p(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_t_cd_g_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_t_cd_g(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_t_cd_h_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_t_cd_h(eps,sources,charges,dipstr,dipvec,targets)
#
# sources to sources + targets
#
if((pg == 1 or pg == 2 or pg == 3) and targets is not None):
assert pg == pgt, "if output is requested at both sources and targets, then the same pg must be equal to pgt"
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.rfmm2d_st_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.rfmm2d_st_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_st_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_st_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_st_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_st_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.rfmm2d_st_d_p_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.rfmm2d_st_d_p(eps,sources,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_st_d_g_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_st_d_g(eps,sources,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_st_d_h_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_st_d_h(eps,sources,dipstr,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.rfmm2d_st_cd_p_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.rfmm2d_st_cd_p(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_st_cd_g_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.rfmm2d_st_cd_g(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_st_cd_h_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.rfmm2d_st_cd_h(eps,sources,charges,dipstr,dipvec,targets)
return out
def lfmm2d(*,eps,sources,charges=None,dipstr=None,dipvec=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body Laplace interactions
in two dimensions where the interaction kernel is given by log(r)
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) + d_{j}v_{j} \cdot \\nabla( log(\|x-x_{j}\|) ) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole strengths,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipstr: complex(nd,n) or complex(n)
dipole densities (d_{j})
dipvec: float(nd,2,n) or float(2,n)
dipole orientation vectors (v_{j})
targets: float(2,nt)
target locations (x)
pg: integer
source eval flag
potential at sources evaluated if pg = 1
potenial and gradient at sources evaluated if pg=2
potential, gradient and hessian at sources evaluated if pg=3
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potential, gradient and hessian at targets evaluated if pgt=3
nd: integer
number of densities
Returns:
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.hess: hessian at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
out.hesstarg: hessian at target locations if requested
Example:
see lfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None or dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2 and dipvec.shape[1] == ns, "dipole vectors must be of shape [2,number of sources]"
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2, "dipole vectors must be of shape [2,number of sources]"
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 2 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,2,ns] where nd is number of densities, and ns is number of sources"
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
if(targets is not None):
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
iftarg = 1
#
# sources -> sources routines
#
if(iftarg == 0 or pgt != 1 or pgt !=2 or pgt !=3):
if(pg == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.ier = lfmm.lfmm2d_s_c_p_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.ier = lfmm.lfmm2d_s_c_p(eps,sources,charges)
if(pg == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.lfmm2d_s_c_g_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.lfmm2d_s_c_g(eps,sources,charges)
if(pg == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm2d_s_c_h_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm2d_s_c_h(eps,sources,charges)
if(pg == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.lfmm2d_s_d_p_vec(eps,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.ier = lfmm.lfmm2d_s_d_p(eps,sources,dipstr,dipvec)
if(pg == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.lfmm2d_s_d_g_vec(eps,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.lfmm2d_s_d_g(eps,sources,dipstr,dipvec)
if(pg == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm2d_s_d_h_vec(eps,sources,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm2d_s_d_h(eps,sources,dipstr,dipvec)
if(pg == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.lfmm2d_s_cd_p_vec(eps,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.ier = lfmm.lfmm2d_s_cd_p(eps,sources,charges,dipstr,dipvec)
if(pg == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.lfmm2d_s_cd_g_vec(eps,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.lfmm2d_s_cd_g(eps,sources,charges,dipstr,dipvec)
if(pg == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm2d_s_cd_h_vec(eps,sources,charges,dipstr,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm2d_s_cd_h(eps,sources,charges,dipstr,dipvec)
#
# sources -> targets routines
#
if(pg !=1 and pg !=2 and pg !=3 and targets is not None):
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.ier = lfmm.lfmm2d_t_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.lfmm2d_t_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_t_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_t_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_t_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_t_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.lfmm2d_t_d_p_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.lfmm2d_t_d_p(eps,sources,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_t_d_g_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_t_d_g(eps,sources,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_t_d_h_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_t_d_h(eps,sources,dipstr,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.lfmm2d_t_cd_p_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.lfmm2d_t_cd_p(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_t_cd_g_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_t_cd_g(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_t_cd_h_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_t_cd_h(eps,sources,charges,dipstr,dipvec,targets)
#
# sources to sources + targets
#
if((pg == 1 or pg == 2 or pg == 3) and targets is not None):
assert pg == pgt, "if output is requested at both sources and targets, then the same pg must be equal to pgt"
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm2d_st_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm2d_st_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_st_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_st_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_st_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_st_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm2d_st_d_p_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm2d_st_d_p(eps,sources,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_st_d_g_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_st_d_g(eps,sources,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_st_d_h_vec(eps,sources,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_st_d_h(eps,sources,dipstr,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm2d_st_cd_p_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm2d_st_cd_p(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_st_cd_g_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm2d_st_cd_g(eps,sources,charges,dipstr,dipvec,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_st_cd_h_vec(eps,sources,charges,dipstr,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm2d_st_cd_h(eps,sources,charges,dipstr,dipvec,targets)
return out
def cfmm2d(*,eps,sources,charges=None,dipstr=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body Laplace interactions with Cauchy kernel
in two dimensions where the interaction kernel is given by log(r)
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) + d_{j}/(x-x_{j}) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole strengths,
and $x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipstr: complex(nd,n) or complex(n)
dipole densities (d_{j})
targets: float(2,nt)
target locations (x)
pg: integer
source eval flag
potential at sources evaluated if pg = 1
potenial and gradient at sources evaluated if pg=2
potential, gradient and hessian at sources evaluated if pg=3
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potential, gradient and hessian at targets evaluated if pgt=3
nd: integer
number of densities
Returns:
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.hess: hessian at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
out.hesstarg: hessian at target locations if requested
Example:
see cfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
if nd>1:
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
if(targets is not None):
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
iftarg = 1
#
# sources -> sources routines
#
if(iftarg == 0 or pgt != 1 or pgt !=2 or pgt !=3):
if(pg == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.ier = lfmm.cfmm2d_s_c_p_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.ier = lfmm.cfmm2d_s_c_p(eps,sources,charges)
if(pg == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.cfmm2d_s_c_g_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.cfmm2d_s_c_g(eps,sources,charges)
if(pg == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.cfmm2d_s_c_h_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.cfmm2d_s_c_h(eps,sources,charges)
if(pg == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.cfmm2d_s_d_p_vec(eps,sources,dipstr,nd)
if(nd == 1):
out.pot,out.ier = lfmm.cfmm2d_s_d_p(eps,sources,dipstr)
if(pg == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.cfmm2d_s_d_g_vec(eps,sources,dipstr,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.cfmm2d_s_d_g(eps,sources,dipstr)
if(pg == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.cfmm2d_s_d_h_vec(eps,sources,dipstr,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.cfmm2d_s_d_h(eps,sources,dipstr)
if(pg == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.cfmm2d_s_cd_p_vec(eps,sources,charges,dipstr,nd)
if(nd == 1):
out.pot,out.ier = lfmm.cfmm2d_s_cd_p(eps,sources,charges,dipstr)
if(pg == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.cfmm2d_s_cd_g_vec(eps,sources,charges,dipstr,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.cfmm2d_s_cd_g(eps,sources,charges,dipstr)
if(pg == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.cfmm2d_s_cd_h_vec(eps,sources,charges,dipstr,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.cfmm2d_s_cd_h(eps,sources,charges,dipstr)
#
# sources -> targets routines
#
if(pg !=1 and pg !=2 and pg !=3 and targets is not None):
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.ier = lfmm.cfmm2d_t_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.cfmm2d_t_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_t_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_t_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_t_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_t_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.cfmm2d_t_d_p_vec(eps,sources,dipstr,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.cfmm2d_t_d_p(eps,sources,dipstr,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_t_d_g_vec(eps,sources,dipstr,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_t_d_g(eps,sources,dipstr,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_t_d_h_vec(eps,sources,dipstr,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_t_d_h(eps,sources,dipstr,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.cfmm2d_t_cd_p_vec(eps,sources,charges,dipstr,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.cfmm2d_t_cd_p(eps,sources,charges,dipstr,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_t_cd_g_vec(eps,sources,charges,dipstr,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_t_cd_g(eps,sources,charges,dipstr,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_t_cd_h_vec(eps,sources,charges,dipstr,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_t_cd_h(eps,sources,charges,dipstr,targets)
#
# sources to sources + targets
#
if((pg == 1 or pg == 2 or pg == 3) and targets is not None):
assert pg == pgt, "if output is requested at both sources and targets, then the same pg must be equal to pgt"
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.cfmm2d_st_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.cfmm2d_st_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_st_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_st_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_st_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_st_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.cfmm2d_st_d_p_vec(eps,sources,dipstr,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.cfmm2d_st_d_p(eps,sources,dipstr,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_st_d_g_vec(eps,sources,dipstr,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_st_d_g(eps,sources,dipstr,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_st_d_h_vec(eps,sources,dipstr,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_st_d_h(eps,sources,dipstr,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.cfmm2d_st_cd_p_vec(eps,sources,charges,dipstr,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.cfmm2d_st_cd_p(eps,sources,charges,dipstr,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_st_cd_g_vec(eps,sources,charges,dipstr,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.cfmm2d_st_cd_g(eps,sources,charges,dipstr,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_st_cd_h_vec(eps,sources,charges,dipstr,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.cfmm2d_st_cd_h(eps,sources,charges,dipstr,targets)
return out
def bhfmm2d(*,eps,sources,charges=None,dipoles=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body biharmonic interactions
in two dimensions where the interaction kernel is related to the
biharmonic greens function r^2 log (r) and its derivatives
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) +
\overline{c}_{j} (x-x_{j})/(\overline{x-x_{j}) + d_{j,1}/(x-x_{j}) +
d_{j,2}/(\overline{x-x_{j}}) -
\overline{d_{j,1}} (x-x_{j})/(\overline{x-x_{j}})^2\, ,
where $c_{j}$ are the charge densities, $d_{j,1}$, $d_{j,2}$ are the dipole strengths,
and $x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipoles: complex(nd,2,n) or complex(2,n)
dipole densities (d_{j,1}, d_{j,2})
targets: float(2,nt)
target locations (x)
pg: integer
source eval flag
potential at sources evaluated if pg = 1
potenial and gradient at sources evaluated if pg=2
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
nd: integer
number of densities
Returns:
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
Example:
see bhfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
charges = charges.reshape([nd,ns])
else:
charges = np.zeros([nd,ns],dtype='complex')
if(dipoles is not None):
if nd == 1 and ns>1:
assert dipoles.shape[0] == 2 and dipoles.shape[1] == ns, "Dipole must of shape [2, number of sources]"
if nd == 1 and ns==1:
assert dipoles.shape[0] == 2, "Dipole must of shape [2, number of sources]"
if nd>1:
assert dipoles.shape[0] == nd and dipoles.shape[1] == 2 and dipoles.shape[2]==ns, "Dipole must of shape [nd,2, number of sources]"
dipoles = dipoles.reshape([nd,2,ns])
ifdipole = 1
else:
dipoles = np.zeros([nd,2,ns],dtype='complex')
if(targets is not None):
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
iftarg = 1
nt = np.shape(targets)[1]
else:
targets = np.zeros([2,0],dtype='double')
nt = 0
iper = 0
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = bhfmm.bhfmm2dwrap_guru(eps,sources,ifcharge,charges,ifdipole,dipoles,iper,pg,targets,pgt)
out.hess = None
out.hesstarg = None
if(nd == 1):
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(2,nt)
if(pg>0):
out.pot = out.pot.reshape(ns,)
if(pg==2):
out.grad = out.grad.reshape(2,ns)
if(pg<2):
out.grad = None
if(pgt<2):
out.gradtarg = None
return out
def h2ddir(*,zk,sources,targets,charges=None,dipstr=None,dipvec=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body Helmholtz interactions
in three dimensions where the interaction kernel is given by $H_{0}^{(1)(kr)$
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} H_{0}^{(1)}(k \|x-x_{j}\|) - d_{j} v_{j}\cdot \\nabla \left( H_{0}^{(1)}(k \|x-x_{j}\|) \\right) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are dipole densities
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When |x-x_{m}| \leq thresh, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
zk: complex
Helmholtz parameter - k
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipstr: complex(nd,n) or complex(n)
dipole densities (d_{j})
dipole orientation vectors: float(nd,2,n) or complex(2,n)
dipole orientation vectors (v_{j})
targets: float(2,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potential, gradient and hessians at targets evaluated if pgt=3
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg - potential at target locations if requested
out.gradtarg - gradient at target locations if requested
out.hesstarg - hessian at target locations if requested
Example:
see hfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None or dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2 and dipvec.shape[1] == ns, "dipole vectors must be of shape [2,number of sources]"
dipvec = dipvec.reshape(1,2,ns)
dipstr = dipstr.reshape(1,ns)
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2, "dipole vectors must be of shape [2,number of sources]"
dipvec = dipvec.reshape(1,2,ns)
dipstr = dipstr.reshape(1,ns)
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 2 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,2,ns] where nd is number of densities, and ns is number of sources"
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
nt = targets.shape[1]
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = hfmm.h2d_directcp(zk,sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = hfmm.h2d_directcg(zk,sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = hfmm.h2d_directdp(zk,sources,dipstr,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = hfmm.h2d_directdg(zk,sources,dipstr,dipvec,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = hfmm.h2d_directcdp(zk,sources,charges,dipstr,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = hfmm.h2d_directcdg(zk,sources,charges,dipstr,dipvec,targets,thresh)
if(nd == 1):
if(ifcharge==1):
charges = charges.reshape(ns,)
if(ifdipole==1):
dipvec = dipvec.reshape(2,ns)
dipstr = dipstr.reshape(ns,)
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(2,nt)
return out
def r2ddir(*,sources,targets,charges=None,dipstr=None,dipvec=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body Laplace interactions
in two dimensions where the interaction kernel is given by $log(r)$
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) + d_{j}v_{j} \cdot \\nabla( log(\|x-x_{j}\|) ) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole strengths
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When |x-x_{m}|leq thresh, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
sources: float(2,n)
source locations (x_{j})
charges: float(nd,n) or float(n)
charge densities (c_{j})
dipstr: float(nd,n) or float(n)
dipole densities (d_{j})
dipvec: float(nd,2,n) or float(2,n)
dipole orientation vectors (v_{j})
targets: float(2,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potenial, gradient, and hessians at targets evaluated if pgt=3
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg - potential at target locations if requested
out.gradtarg - gradient at target locations if requested
out.hesstarg - hessian at target locations if requested
Example:
see rfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None or dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2 and dipvec.shape[1] == ns, "dipole vectors must be of shape [2,number of sources]"
dipvec = dipvec.reshape(1,2,ns)
dipstr = dipstr.reshape(1,ns)
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2, "dipole vectors must be of shape [2,number of sources]"
dipvec = dipvec.reshape(1,2,ns)
dipstr = dipstr.reshape(1,ns)
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 2 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,2,ns] where nd is number of densities, and ns is number of sources"
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
nt = targets.shape[1]
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = lfmm.r2d_directcp(sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = lfmm.r2d_directcg(sources,charges,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.r2d_directch(sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = lfmm.r2d_directdp(sources,dipstr,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.r2d_directdg(sources,dipstr,dipvec,targets,thresh)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.r2d_directdh(sources,dipstr,dipvec,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = lfmm.r2d_directcdp(sources,charges,dipstr,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.r2d_directcdg(sources,charges,dipstr,dipvec,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.r2d_directcdh(sources,charges,dipstr,dipvec,targets,thresh)
if(nd == 1):
if(ifcharge == 1):
charges = charges.reshape(ns,)
if(ifdipole ==1):
dipvec = dipvec.reshape(2,ns)
dipstr = dipstr.reshape(ns,)
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(2,nt)
if(pgt==3):
out.hesstarg = out.hesstarg.reshape(3,nt)
return out
def l2ddir(*,sources,targets,charges=None,dipstr=None,dipvec=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body Laplace interactions
in two dimensions where the interaction kernel is given by $log(r)$
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) + d_{j}v_{j} \cdot \\nabla( log(\|x-x_{j}\|) ) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole strengths
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When |x-x_{m}|leq thresh, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipstr: complex(nd,n) or complex(n)
dipole densities (d_{j})
dipvec: float(nd,2,n) or float(2,n)
dipole orientation vectors (v_{j})
targets: float(2,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potenial, gradient, and hessians at targets evaluated if pgt=3
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg - potential at target locations if requested
out.gradtarg - gradient at target locations if requested
out.hesstarg - hessian at target locations if requested
Example:
see lfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None or dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2 and dipvec.shape[1] == ns, "dipole vectors must be of shape [2,number of sources]"
dipvec = dipvec.reshape(1,2,ns)
dipstr = dipstr.reshape(1,ns)
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
assert dipvec.shape[0] == 2, "dipole vectors must be of shape [2,number of sources]"
dipvec = dipvec.reshape(1,2,ns)
dipstr = dipstr.reshape(1,ns)
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 2 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,2,ns] where nd is number of densities, and ns is number of sources"
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
nt = targets.shape[1]
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = lfmm.l2d_directcp(sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = lfmm.l2d_directcg(sources,charges,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.l2d_directch(sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = lfmm.l2d_directdp(sources,dipstr,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.l2d_directdg(sources,dipstr,dipvec,targets,thresh)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.l2d_directdh(sources,dipstr,dipvec,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = lfmm.l2d_directcdp(sources,charges,dipstr,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.l2d_directcdg(sources,charges,dipstr,dipvec,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.l2d_directcdh(sources,charges,dipstr,dipvec,targets,thresh)
if(nd == 1):
if(ifcharge == 1):
charges = charges.reshape(ns,)
if(ifdipole ==1):
dipvec = dipvec.reshape(2,ns)
dipstr = dipstr.reshape(ns,)
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(2,nt)
if(pgt==3):
out.hesstarg = out.hesstarg.reshape(3,nt)
return out
def c2ddir(*,sources,targets,charges=None,dipstr=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body Laplace interactions
in two dimensions where the interaction kernel is given by $log(r)$
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) + d_{j}/(x-x_{j}) \, ,
where $c_{j}$ are the charge densities, $d_{j}$ are the dipole strengths
and $x_{j}$ are the source locations.
When |x-x_{m}|leq thresh, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipstr: complex(nd,n) or complex(n)
dipole densities (d_{j})
targets: float(2,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potenial, gradient, and hessians at targets evaluated if pgt=3
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg - potential at target locations if requested
out.gradtarg - gradient at target locations if requested
out.hesstarg - hessian at target locations if requested
Example:
see cfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipstr is not None):
if nd == 1 and ns>1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
dipstr = dipstr.reshape(1,ns)
if nd == 1 and ns==1:
assert dipstr.shape[0] == ns, "Dipole strengths must be same length as second dimension of sources"
dipstr = dipstr.reshape(1,ns)
if nd>1:
assert dipstr.shape[0] == nd and dipstr.shape[1]==ns, "Dipole strengths must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
nt = targets.shape[1]
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = lfmm.c2d_directcp(sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = lfmm.c2d_directcg(sources,charges,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.c2d_directch(sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = lfmm.c2d_directdp(sources,dipstr,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.c2d_directdg(sources,dipstr,targets,thresh)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.c2d_directdh(sources,dipstr,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = lfmm.c2d_directcdp(sources,charges,dipstr,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.c2d_directcdg(sources,charges,dipstr,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.c2d_directcdh(sources,charges,dipstr,targets,thresh)
if(nd == 1):
if(ifcharge == 1):
charges = charges.reshape(ns,)
if(ifdipole ==1):
dipstr = dipstr.reshape(ns,)
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(nt,)
if(pgt==3):
out.hesstarg = out.hesstarg.reshape(nt,)
return out
def bh2ddir(*,sources,targets,charges=None,dipoles=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body biharmonic interactions
in two dimensions where the interaction kernel is related to the
biharmonic greens function r^2 log (r) and its derivatives
.. math::
u(x) = \sum_{j=1}^{N} c_{j} * log\(\|x-x_{j}\|\) +
\overline{c}_{j} (x-x_{j})/(\overline{x-x_{j}) + d_{j,1}/(x-x_{j}) -
d_{j,2}/(\overline{x-x_{j}}) -
\overline{d_{j,1}} (x-x_{j})/(\overline{x-x_{j}})^2\, ,
where $c_{j}$ are the charge densities, $d_{j,1}$, $d_{j,2}$ are the dipole strengths,
and $x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
sources: float(2,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipoles: complex(nd,2,n) or complex(2,n)
dipole densities (d_{j,1}, d_{j,2})
targets: float(2,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
Example:
see bhfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 2, "The first dimension of sources must be 2"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
ifcharge = 0
ifdipole = 0
iftarg = 0
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
charges = charges.reshape([nd,ns])
ifcharge = 1
else:
charges = np.zeros([nd,ns],dtype='complex')
if(dipoles is not None):
if nd == 1 and ns>1:
assert dipoles.shape[0] == 2 and dipoles.shape[1] == ns, "Dipole must of shape [2, number of sources]"
if nd == 1 and ns==1:
assert dipoles.shape[0] == 2, "Dipole must of shape [2, number of sources]"
if nd>1:
assert dipoles.shape[0] == nd and dipoles.shape[1] == 2 and dipoles.shape[2]==ns, "Dipole must of shape [nd,2, number of sources]"
dipoles = dipoles.reshape([nd,2,ns])
ifdipole = 1
else:
dipoles = np.zeros([nd,2,ns],dtype='complex')
assert targets.shape[0] == 2, "The first dimension of targets must be 2"
nt = targets.shape[1]
#
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = bhfmm.bh2d_directcp(sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = bhfmm.bh2d_directcg(sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = bhfmm.bh2d_directdp(sources,dipoles,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = bhfmm.bh2d_directdg(sources,dipoles,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = bhfmm.bh2d_directcdp(sources,charges,dipoles,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = bhfmm.bh2d_directcdg(sources,charges,dipoles,targets,thresh)
if(nd == 1):
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(2,nt,)
return out
def comperr(*,ntest,out,outex,pg=0,pgt=0,nd=1,cauchy=0):
r = 0
err = 0
if(nd == 1):
if(pg > 0):
if(cauchy==0):
r = r+la.norm(outex.pot[0:ntest])**2
err = err+la.norm(outex.pot[0:ntest]-out.pot[0:ntest])**2
else:
r = r+la.norm(outex.pot.real[0:ntest])**2
err = err+la.norm(outex.pot.real[0:ntest]-out.pot.real[0:ntest])**2
if(pg >= 2):
if(cauchy==0):
g = out.grad[:,0:ntest].reshape(2*ntest,)
gex = outex.grad[:,0:ntest].reshape(2*ntest,)
else:
g = out.grad[0:ntest].reshape(ntest,)
gex = outex.grad[0:ntest].reshape(ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pg >= 3):
if(cauchy==0):
h = out.hess[:,0:ntest].reshape(3*ntest,)
hhex = outex.hess[:,0:ntest].reshape(3*ntest,)
else:
h = out.hess[0:ntest].reshape(ntest,)
hhex = outex.hess[0:ntest].reshape(ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
if(pgt > 0):
if(cauchy==0):
r = r+la.norm(outex.pottarg[0:ntest])**2
err = err+la.norm(outex.pottarg[0:ntest]-out.pottarg[0:ntest])**2
else:
r = r+la.norm(outex.pottarg.real[0:ntest])**2
err = err+la.norm(outex.pottarg.real[0:ntest]-out.pottarg.real[0:ntest])**2
if(pgt >= 2):
if(cauchy==0):
g = out.gradtarg[:,0:ntest].reshape(2*ntest,)
gex = outex.gradtarg[:,0:ntest].reshape(2*ntest,)
else:
g = out.gradtarg[0:ntest].reshape(ntest,)
gex = outex.gradtarg[0:ntest].reshape(ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pgt >= 3):
if(cauchy==0):
h = out.hesstarg[:,0:ntest].reshape(3*ntest,)
hhex = outex.hesstarg[:,0:ntest].reshape(3*ntest,)
else:
h = out.hesstarg[0:ntest].reshape(ntest,)
hhex = outex.hesstarg[0:ntest].reshape(ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
if(nd > 1):
if(pg > 0):
if(cauchy==0):
p = out.pot[:,0:ntest].reshape(nd*ntest,)
pex = outex.pot[:,0:ntest].reshape(nd*ntest,)
else:
p = out.pot.real[:,0:ntest].reshape(nd*ntest,)
pex = outex.pot.real[:,0:ntest].reshape(nd*ntest,)
r = r+la.norm(pex)**2
err = err+la.norm(p-pex)**2
if(pg >= 2):
if(cauchy==0):
g = out.grad[:,:,0:ntest].reshape(2*nd*ntest,)
gex = outex.grad[:,:,0:ntest].reshape(2*nd*ntest,)
else:
g = out.grad[:,0:ntest].reshape(nd*ntest,)
gex = outex.grad[:,0:ntest].reshape(nd*ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pg >= 3):
if(cauchy==0):
h = out.hess[:,:,0:ntest].reshape(3*nd*ntest,)
hhex = outex.hess[:,:,0:ntest].reshape(3*nd*ntest,)
else:
h = out.hess[:,0:ntest].reshape(nd*ntest,)
hhex = outex.hess[:,0:ntest].reshape(nd*ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
if(pgt > 0):
if(cauchy==0):
p = out.pottarg[:,0:ntest].reshape(nd*ntest,)
pex = outex.pottarg[:,0:ntest].reshape(nd*ntest,)
else:
p = out.pottarg.real[:,0:ntest].reshape(nd*ntest,)
pex = outex.pottarg.real[:,0:ntest].reshape(nd*ntest,)
r = r+la.norm(pex)**2
err = err+la.norm(p-pex)**2
if(pgt >= 2):
if(cauchy==0):
g = out.gradtarg[:,:,0:ntest].reshape(2*nd*ntest,)
gex = outex.gradtarg[:,:,0:ntest].reshape(2*nd*ntest,)
else:
g = out.gradtarg[:,0:ntest].reshape(nd*ntest,)
gex = outex.gradtarg[:,0:ntest].reshape(nd*ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pgt >= 3):
if(cauchy==0):
h = out.hesstarg[:,:,0:ntest].reshape(3*nd*ntest,)
hhex = outex.hesstarg[:,:,0:ntest].reshape(3*nd*ntest,)
else:
h = out.hesstarg[:,0:ntest].reshape(nd*ntest,)
hhex = outex.hesstarg[:,0:ntest].reshape(nd*ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
err = np.sqrt(err/r)
return err
| 83,367 | 45.264151 | 198 | py |
fmm2d | fmm2d-main/python/fmm2dpy/__init__.py | from .fmm2d import hfmm2d,rfmm2d,lfmm2d,cfmm2d,bhfmm2d,h2ddir,r2ddir,l2ddir,c2ddir,bh2ddir,comperr,Output
| 106 | 52.5 | 105 | py |
fmm2d | fmm2d-main/python/test/test_rfmm.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
import numpy.linalg as la
def main():
test_rfmm()
def test_rfmm():
ntests = 36
testres = np.zeros(ntests)
#
# This is a testing code for making sure all the
# fmm routines are accessible through fmm2d.py
#
n = 2000
ntest = 10
zk = 1.1 + 1j*0
sources = np.random.uniform(0,1,(2,n))
stmp = sources[:,0:ntest]
nt = 1880
targ = np.random.uniform(0,1,(2,nt))
ttmp = targ[:,0:ntest]
eps = 10**(-5)
charges = np.random.uniform(0,1,n)
dipstr = np.random.uniform(0,1,n)
dipvec = np.random.uniform(0,1,(2,n))
outex=fmm.Output()
itest = 0
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=1)
out2 = fmm.r2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=1)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges,pg=2)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=2)
out2 = fmm.r2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=2)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad")
itest=itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1)
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2=fmm.r2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2=fmm.r2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2)
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2=fmm.r2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2 =fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1)
out2=fmm.r2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2)
out2=fmm.r2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad")
nd = 2
charges = np.random.uniform(0,1,(nd,n))
dipstr = np.random.uniform(0,1,(nd,n))
dipvec = np.random.uniform(0,1,(nd,2,n))
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges,pg=1,nd=nd)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.r2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges,pg=2,nd=nd)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.r2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.r2ddir(sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad, vectorized")
itest=itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2 =fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.rfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.r2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.r2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad, vectorized")
if(sum(testres)==ntests):
print("all rfmm tests succeeded")
if __name__ == "__main__":
main()
| 19,099 | 32.333333 | 101 | py |
fmm2d | fmm2d-main/python/test/test_lfmm.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
import numpy.linalg as la
def main():
test_lfmm()
def test_lfmm():
ntests = 36
testres = np.zeros(ntests)
#
# This is a testing code for making sure all the
# fmm routines are accessible through fmm2d.py
#
n = 2000
ntest = 10
zk = 1.1 + 1j*0
sources = np.random.uniform(0,1,(2,n))
stmp = sources[:,0:ntest]
nt = 1880
targ = np.random.uniform(0,1,(2,nt))
ttmp = targ[:,0:ntest]
eps = 10**(-5)
charges = np.random.uniform(0,1,n)+1j*np.random.uniform(0,1,n)
dipstr = np.random.uniform(0,1,n)+1j*np.random.uniform(0,1,n)
dipvec = np.random.uniform(0,1,(2,n))
outex=fmm.Output()
itest = 0
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=1)
out2 = fmm.l2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=1)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges,pg=2)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=2)
out2 = fmm.l2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=2)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad")
itest=itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1)
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2=fmm.l2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2=fmm.l2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2)
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2=fmm.l2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2 =fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1)
out2=fmm.l2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2)
out2=fmm.l2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad")
nd = 2
charges = np.random.uniform(0,1,(nd,n))
dipstr = np.random.uniform(0,1,(nd,n))
dipvec = np.random.uniform(0,1,(nd,2,n))
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges,pg=1,nd=nd)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.l2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges,pg=2,nd=nd)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.l2ddir(sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.l2ddir(sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad, vectorized")
itest=itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2 =fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.lfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.l2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad, vectorized")
if(sum(testres)==ntests):
print("all lfmm tests succeeded")
if __name__ == "__main__":
main()
| 19,155 | 32.431065 | 101 | py |
fmm2d | fmm2d-main/python/test/test_hfmm.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
import numpy.linalg as la
def main():
test_hfmm()
def test_hfmm():
ntests = 36
testres = np.zeros(ntests)
#
# This is a testing code for making sure all the
# fmm routines are accessible through fmm2d.py
#
n = 2000
ntest = 10
zk = 1.1 + 1j*0
sources = np.random.uniform(0,1,(2,n))
stmp = sources[:,0:ntest]
nt = 1880
targ = np.random.uniform(0,1,(2,nt))
ttmp = targ[:,0:ntest]
eps = 10**(-5)
zk = 1.1 + 1j*0
charges = np.random.uniform(0,1,n)+ 1j*np.random.uniform(0,1,n)
dipstr = np.random.uniform(0,1,n)+ 1j*np.random.uniform(0,1,n)
dipvec = np.random.uniform(0,1,(2,n))
outex=fmm.Output()
itest = 0
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges,pg=1)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=1)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=1)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges,pg=2)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=2)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=2)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad")
itest=itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=1)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=2)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
out2 =fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=1,pg=1)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=2,pg=2)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad")
nd = 2
charges = np.random.uniform(0,1,(nd,n))+ 1j*np.random.uniform(0,1,(nd,n))
dipstr = np.random.uniform(0,1,(nd,n))+ 1j*np.random.uniform(0,1,(nd,n))
dipvec = np.random.uniform(0,1,(nd,2,n))
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges,pg=1,nd=nd)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges,pg=2,nd=nd)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,dipstr=dipstr,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad, vectorized")
itest=itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=1,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=2,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
out2 =fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=1,pg=1,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,pgt=2,pg=2,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,dipstr=dipstr,dipvec=dipvec, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.hfmm2d(eps=eps,zk=zk,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.h2ddir(zk=zk,sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.h2ddir(zk=zk,sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,dipvec=dipvec,pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad, vectorized")
if(sum(testres)==ntests):
print("all hfmm tests succeeded")
if __name__ == "__main__":
main()
| 19,754 | 33.356522 | 101 | py |
fmm2d | fmm2d-main/python/test/test_bhfmm.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
import numpy.linalg as la
def main():
test_bhfmm()
def test_bhfmm():
ntests = 36
testres = np.zeros(ntests)
#
# This is a testing code for making sure all the
# fmm routines are accessible through fmm2d.py
#
n = 2000
ntest = 100
zk = 1.1 + 1j*0
sources = np.random.uniform(0,1,(2,n))
stmp = sources[:,0:ntest]
nt = 1880
targ = np.random.uniform(0,1,(2,nt))
ttmp = targ[:,0:ntest]
eps = 10**(-5)
charges = np.random.uniform(0,1,n) + 0j
dipoles = np.random.uniform(0,1,(2,n))+1j*np.random.uniform(0,1,(2,n))
outex=fmm.Output()
itest = 0
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,dipoles=dipoles,pg=1)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,dipoles=dipoles,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges, \
dipoles=dipoles,pg=1)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
dipoles=dipoles,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges,pg=2)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,dipoles=dipoles,pg=2)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,dipoles=dipoles, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges, \
dipoles=dipoles,pg=2)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges,
dipoles=dipoles,pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad")
itest=itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=1)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,\
dipoles=dipoles,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipoles=dipoles,pgt=1)
out2=fmm.bh2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipoles=dipoles,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=2)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,\
dipoles=dipoles,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipoles=dipoles,pgt=2)
out2 =fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,\
dipoles=dipoles,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=1,pg=1)
out2=fmm.bh2ddir(sources=sources,targets=stmp, \
dipoles=dipoles,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp, \
dipoles=dipoles,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipoles=dipoles,pgt=1,pg=1)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
dipoles=dipoles,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges, \
dipoles=dipoles,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=2,pg=2)
out2=fmm.bh2ddir(sources=sources,targets=stmp, \
dipoles=dipoles,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,dipoles=dipoles, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipoles=dipoles,pgt=2,pg=2)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
dipoles=dipoles,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges, \
dipoles=dipoles,pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad")
nd = 2
charges = np.random.uniform(0,1,(nd,n))
dipoles = np.random.uniform(0,1,(nd,2,n))
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges,pg=1,nd=nd)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,dipoles=dipoles,pg=1,nd=nd)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,dipoles=dipoles,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges, \
dipoles=dipoles,pg=1,nd=nd)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
dipoles=dipoles,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges,pg=2,nd=nd)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,dipoles=dipoles,pg=2,nd=nd)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,dipoles=dipoles, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,charges=charges, \
dipoles=dipoles,pg=2,nd=nd)
out2 = fmm.bh2ddir(sources=sources,targets=stmp,charges=charges,
dipoles=dipoles,pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad, vectorized")
itest=itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=1,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,\
dipoles=dipoles,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipoles=dipoles,pgt=1,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipoles=dipoles,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=2,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=ttmp,\
dipoles=dipoles,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipoles=dipoles,pgt=2,nd=nd)
out2 =fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,\
dipoles=dipoles,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=1,pg=1,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=stmp, \
dipoles=dipoles,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp, \
dipoles=dipoles,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipoles=dipoles,pgt=1,pg=1,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
dipoles=dipoles,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges, \
dipoles=dipoles,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,\
dipoles=dipoles,pgt=2,pg=2,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=stmp, \
dipoles=dipoles,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,dipoles=dipoles, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.bhfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipoles=dipoles,pgt=2,pg=2,nd=nd)
out2=fmm.bh2ddir(sources=sources,targets=stmp,charges=charges, \
dipoles=dipoles,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.bh2ddir(sources=sources,targets=ttmp,charges=charges, \
dipoles=dipoles,pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad, vectorized")
if(sum(testres)==ntests):
print("all bhfmm tests succeeded")
if __name__ == "__main__":
main()
| 18,473 | 31.353765 | 101 | py |
fmm2d | fmm2d-main/python/test/test_cfmm.py | #!/usr/bin/env python
import fmm2dpy as fmm
import numpy as np
import numpy.linalg as la
def main():
test_cfmm()
def test_cfmm():
ntests = 36
testres = np.zeros(ntests)
#
# This is a testing code for making sure all the
# fmm routines are accessible through fmm2d.py
#
n = 2000
ntest = 100
zk = 1.1 + 1j*0
sources = np.random.uniform(0,1,(2,n))
stmp = sources[:,0:ntest]
nt = 1880
targ = np.random.uniform(0,1,(2,nt))
ttmp = targ[:,0:ntest]
eps = 10**(-5)
charges = np.random.uniform(0,1,n) + 0j
dipstr = np.random.uniform(0,1,n)+1j*np.random.uniform(0,1,n)
outex=fmm.Output()
itest = 0
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges,pg=1)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,dipstr=dipstr,pg=1)
out2 = fmm.c2ddir(sources=sources,targets=stmp,dipstr=dipstr,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,pg=1)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges,pg=2)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,dipstr=dipstr,pg=2)
out2 = fmm.c2ddir(sources=sources,targets=stmp,dipstr=dipstr, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,pg=2)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad")
itest=itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1)
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=1)
out2=fmm.c2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,pgt=1)
out2=fmm.c2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2)
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=2)
out2=fmm.c2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,pgt=2)
out2 =fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=1,pg=1)
out2=fmm.c2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp, \
dipstr=dipstr,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,pgt=1,pg=1)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=2,pg=2)
out2=fmm.c2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,dipstr=dipstr, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,pgt=2,pg=2)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad")
nd = 2
charges = np.random.uniform(0,1,(nd,n))
dipstr = np.random.uniform(0,1,(nd,n))
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges,pg=1,nd=nd)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,dipstr=dipstr,pg=1,nd=nd)
out2 = fmm.c2ddir(sources=sources,targets=stmp,dipstr=dipstr,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,pg=1,nd=nd)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges,pg=2,nd=nd)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,dipstr=dipstr,pg=2,nd=nd)
out2 = fmm.c2ddir(sources=sources,targets=stmp,dipstr=dipstr, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,charges=charges, \
dipstr=dipstr,pg=2,nd=nd)
out2 = fmm.c2ddir(sources=sources,targets=stmp,charges=charges,
dipstr=dipstr,pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad, vectorized")
itest=itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=1,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,pgt=1,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipstr=dipstr,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=2,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=ttmp,\
dipstr=dipstr,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,pgt=2,nd=nd)
out2 =fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,\
dipstr=dipstr,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=1,pg=1,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp, \
dipstr=dipstr,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipstr=dipstr,pgt=1,pg=1,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,\
dipstr=dipstr,pgt=2,pg=2,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=stmp, \
dipstr=dipstr,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,dipstr=dipstr, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.cfmm2d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipstr=dipstr,pgt=2,pg=2,nd=nd)
out2=fmm.c2ddir(sources=sources,targets=stmp,charges=charges, \
dipstr=dipstr,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.c2ddir(sources=sources,targets=ttmp,charges=charges, \
dipstr=dipstr,pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd,cauchy=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad, vectorized")
if(sum(testres)==ntests):
print("all cfmm tests succeeded")
if __name__ == "__main__":
main()
| 18,577 | 31.535902 | 101 | py |
fmm2d | fmm2d-main/test/modified-biharmonic/jinjaroot.yaml.py |
fname="jinjaroot.yaml"
file = open(fname,"w")
file.write("mbhDirectRouts:\n")
outs=["p","g","h"]
for out in outs:
for i in range(16):
i1 = i % 2
i2 = (i // 2) % 2
i3 = (i // 4) % 2
i4 = (i // 8) % 2
ker = ""
if (i1 == 1): ker += "c"
if (i2 == 1): ker += "d"
if (i3 == 1): ker += "q"
if (i4 == 1): ker += "o"
if ker != "":
name = "mbh2d_direct"+ker+out+"_vec"
file.write(" -\n")
file.write(" name: " + name + "\n")
file.write(" ker: " + ker + "\n")
file.write(" out: " + out + "\n")
| 688 | 21.966667 | 72 | py |
fmm2d | fmm2d-main/docs/genfortdocumentation_helm.py | from numpy import *
intro = "This subroutine evaluates the "
pgstr = ["potential ", "potential and its gradient ", "potential, its gradient, and its hessian "]
intro2 = "\n\n .. math::\n\n"
eq_start = "u(x) = "
eq_start2 = "\sum_{j=1}^{N} "
str1 = "c_{j} H_{0}^{(1)}(k\|x-x_{j}\|)"
str2 = "v_{j} d_{j}\cdot \\nabla \\left( H_{0}^{(1)}(k\|x-x_{j}\|)\\right)"
str3 = str1+" - "+str2
eq_cjs = [eq_start+eq_start2+str1,eq_start+"-"+eq_start2+str2,eq_start+eq_start2+str3]
eq_start = "u_{\ell}(x) = "
eq_start2 = "\sum_{j=1}^{N} "
str1 = "c_{\ell,j} H_{0}^{(1)}(k\|x-x_{j}\|)"
str2 = "v_{\ell,j} d_{\ell,j}\cdot \\nabla \\left( H_{0}^{(1)}(k\|x-x_{j}\|)\\right)"
str3 = str1+" - "+str2
eq_nd_cjs = [eq_start+eq_start2+str1,eq_start+"-"+eq_start2+str2,eq_start+eq_start2+str3]
stflag = ["at the source locations $x=x_{j}$.", "at the target locations $x=t_{i}$.", "at the source and target locations $x=x_{j},t_{i}$."]
intro3 = "When $x=x_{j}$, the term corresponding to $x_{j}$ is "
intro3_cont = "dropped from the sum."
inp_args ="Input arguments:"
nd_txt = ["- nd: integer","number of densities"]
eps_txt = ["- eps: double precision","precision requested"]
zk_txt = ["- zk: double complex","Helmholtz parameter, k"]
ns_txt = ["- nsource: integer","Number of sources"]
src_txt =["- source: double precision(3,nsource)","Source locations, $x_{j}$"]
charge_txt =["- charge: double complex(nsource)","Charge strengths, $c_{j}$"]
charge_nd_txt =["- charge: double complex(nd,nsource)","Charge strengths, $c_{\ell,j}$"]
dipole_txt =["- dipstr: double complex(nsource)","Dipole strengths, $v_{j}$"]
dipole_nd_txt =["- dipstr: double complex(nd,nsource)","Dipole strengths, $v_{\ell,j}$"]
dipvec_txt =["- dipvec: double precision(2,nsource)","Dipole orientation vectors, $d_{j}$"]
dipvec_nd_txt =["- dipvec: double precision(nd,2,nsource)","Dipole orientation vectors, $d_{\ell,j}$"]
nt_txt = ["- ntarg: integer","Number of targets"]
targ_txt =["- targ: double precision(3,ntarg)","Target locations, $t_{i}$"]
inp_returns = "Output arguments:"
pot_txt =["- pot: double complex(nsource)","Potential at source locations, $u(x_{j})$"]
grad_txt =["- grad: double complex(2,nsource)","Gradient at source locations, $\\nabla u(x_{j})$"]
hess_txt =["- hess: double complex(3,nsource)","Hessian at source locations, $\\nabla \\nabla u(x_{j})$"]
pottarg_txt =["- pottarg: double complex(ntarg)","Potential at target locations, $u(t_{i})$"]
gradtarg_txt =["- gradtarg: double complex(2,ntarg)","Gradient at target locations, $\\nabla u(t_{i})$"]
hesstarg_txt =["- hesstarg: double complex(3,ntarg)","Hessian at target locations, $\\nabla \\nabla u(t_{i})$"]
pot_nd_txt =["- pot: double complex(nd,nsource)","Potential at source locations, $u_{\ell}(x_{j})$"]
grad_nd_txt =["- grad: double complex(nd,2,nsource)","Gradient at source locations, $\\nabla u_{\ell}(x_{j})$"]
hess_nd_txt =["- hess: double complex(nd,3,nsource)","Hessian at source locations, $\\nabla \\nabla u_{\ell}(x_{j})$"]
pottarg_nd_txt =["- pottarg: double complex(nd,ntarg)","Potential at target locations, $u_{\ell}(t_{i})$"]
gradtarg_nd_txt =["- gradtarg: double complex(nd,2,ntarg)","Gradient at target locations, $\\nabla u_{\ell}(t_{i})$"]
hesstarg_nd_txt =["- hesstarg: double complex(nd,3,ntarg)","Hessian at target locations, $\\nabla u_{\ell}(t_{i})$"]
ier_txt= ["- ier: integer","Error flag; ier=0 implies successful execution, and ier=4/8 implies insufficient memory"]
sp1 = " "
sp1 = " "
sp2 = " "
c_opts = ['_c','_d','_cd']
c_opts2 = ['Charges','Dipoles', 'Charges and Dipoles']
st_opts = ['_s','_t','_st']
st_opts2 = ['Sources','Targets','Sources and Targets']
p_opts = ['_p','_g','_h']
p_opts2 = ['Potential','Potential and Gradient','Potential, Gradient and Hessian']
###
# Generate webpage for helmholtz
#
f1 = open('fortrandocs_helm.raw','w')
for i in range(3):
for j in range(3):
for k in range(3):
subname0 = 'subroutine hfmm2d'+st_opts[i]+c_opts[j]+p_opts[k]+'(eps,zk,nsource,source,'
subname1 = 'subroutine hfmm2d'+st_opts[i]+c_opts[j]+p_opts[k]+'_vec(nd,eps,zk,nsource,source,'
subname = ''
if(j==0 or j==2):
subname=subname+'charge,'
if(j==1 or j==2):
subname=subname+'dipstr,dipvec,'
if(i==2 and k==0):
subname=subname+'pot,'
if(i==2 and k==1):
subname=subname+'pot,grad,'
if(i==2 and k==2):
subname=subname+'pot,grad,hess,'
if(i>0):
subname=subname+'ntarg,targ,'
if(i==0 and k==0):
subname=subname+'pot,ier)'
if(i==0 and k==1):
subname=subname+'pot,grad,ier)'
if(i==0 and k==2):
subname=subname+'pot,grad,hess,ier)'
if(i>=1 and k==0):
subname=subname+'pottarg,ier)'
if(i>=1 and k==1):
subname=subname+'pottarg,gradtarg,ier)'
if(i>=1 and k==2):
subname=subname+'pottarg,gradtarg,hesstarg,ier)'
str_ini = 'h'+st_opts[i][1::]+c_opts[j][1::]+p_opts[k][1::]
subname3 = 'hfmm2d'+st_opts[i]+c_opts[j]+p_opts[k]
f1.writelines('.. _'+str_ini+':\n\n')
f1.writelines(subname3+'\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n')
f1.writelines('- Evaluation points: '+st_opts2[i]+'\n')
f1.writelines('- Interaction kernel: '+c_opts2[j]+'\n')
f1.writelines('- Outputs requested: '+p_opts2[k]+'\n\n')
f1.writelines('-------------------------------------\n\n')
f1.writelines('.. code:: fortran\n\n')
f1.writelines(sp1+subname0+subname+'\n\n')
f1.writelines(intro+pgstr[k]+'\n'+intro2)
f1.writelines(" "+eq_cjs[j]+"\n\n"+stflag[i]+" "+intro3+intro3_cont+"\n\n")
f1.writelines(inp_args+"\n\n")
f1.writelines(sp1+eps_txt[0]+"\n"+sp2+eps_txt[1]+"\n")
f1.writelines(sp1+zk_txt[0]+"\n"+sp2+zk_txt[1]+"\n")
f1.writelines(sp1+ns_txt[0]+"\n"+sp2+ns_txt[1]+"\n")
f1.writelines(sp1+src_txt[0]+"\n"+sp2+src_txt[1]+"\n")
if(j==0):
f1.writelines(sp1+charge_txt[0]+"\n"+sp2+charge_txt[1]+"\n")
if(j==1):
f1.writelines(sp1+dipole_txt[0]+"\n"+sp2+dipole_txt[1]+"\n")
f1.writelines(sp1+dipvec_txt[0]+"\n"+sp2+dipvec_txt[1]+"\n")
if(j==2):
f1.writelines(sp1+charge_txt[0]+"\n"+sp2+charge_txt[1]+"\n")
f1.writelines(sp1+dipole_txt[0]+"\n"+sp2+dipole_txt[1]+"\n")
f1.writelines(sp1+dipvec_txt[0]+"\n"+sp2+dipvec_txt[1]+"\n")
if(i>0):
f1.writelines(sp1+nt_txt[0]+"\n"+sp2+nt_txt[1]+"\n")
f1.writelines(sp1+targ_txt[0]+"\n"+sp2+targ_txt[1]+"\n")
f1.writelines("\n\n")
f1.writelines(inp_returns+"\n\n")
if(i==0 or i==2):
f1.writelines(sp1+pot_txt[0]+"\n"+sp2+pot_txt[1]+"\n")
if(k>=1):
f1.writelines(sp1+grad_txt[0]+"\n"+sp2+grad_txt[1]+"\n")
if(k==2):
f1.writelines(sp1+hess_txt[0]+"\n"+sp2+hess_txt[1]+"\n")
if(i==1 or i==2):
f1.writelines(sp1+pottarg_txt[0]+"\n"+sp2+pottarg_txt[1]+"\n")
if(k>=1):
f1.writelines(sp1+gradtarg_txt[0]+"\n"+sp2+gradtarg_txt[1]+"\n")
if(k==2):
f1.writelines(sp1+hesstarg_txt[0]+"\n"+sp2+hesstarg_txt[1]+"\n")
f1.writelines(sp1+ier_txt[0]+"\n"+sp2+ier_txt[1]+" \n")
f1.writelines("\n\n--------------------------------\n\nVectorized version: \n\n")
f1.writelines('.. code:: fortran\n\n')
f1.writelines(sp1+subname1+subname+'\n\n')
f1.writelines(intro+pgstr[k]+'\n'+intro2)
f1.writelines(" "+eq_nd_cjs[j]+"\n\n"+stflag[i]+" "+intro3+intro3_cont+"\n\n")
f1.writelines(inp_args+"\n\n")
f1.writelines(sp1+nd_txt[0]+"\n"+sp2+nd_txt[1]+"\n")
if(j==0):
f1.writelines(sp1+charge_nd_txt[0]+"\n"+sp2+charge_nd_txt[1]+"\n")
if(j==1):
f1.writelines(sp1+dipole_nd_txt[0]+"\n"+sp2+dipole_nd_txt[1]+"\n")
f1.writelines(sp1+dipvec_nd_txt[0]+"\n"+sp2+dipvec_nd_txt[1]+"\n")
if(j==2):
f1.writelines(sp1+charge_nd_txt[0]+"\n"+sp2+charge_nd_txt[1]+"\n")
f1.writelines(sp1+dipole_nd_txt[0]+"\n"+sp2+dipole_nd_txt[1]+"\n")
f1.writelines(sp1+dipvec_nd_txt[0]+"\n"+sp2+dipvec_nd_txt[1]+"\n")
f1.writelines("\n\n")
f1.writelines(inp_returns+"\n\n")
if(i==0 or i==2):
f1.writelines(sp1+pot_nd_txt[0]+"\n"+sp2+pot_nd_txt[1]+"\n")
if(k>=1):
f1.writelines(sp1+grad_nd_txt[0]+"\n"+sp2+grad_nd_txt[1]+"\n")
if(k==2):
f1.writelines(sp1+hess_nd_txt[0]+"\n"+sp2+hess_nd_txt[1]+"\n")
if(i==1 or i==2):
f1.writelines(sp1+pottarg_nd_txt[0]+"\n"+sp2+pottarg_nd_txt[1]+"\n")
if(k>=1):
f1.writelines(sp1+gradtarg_nd_txt[0]+"\n"+sp2+gradtarg_nd_txt[1]+"\n")
if(k==2):
f1.writelines(sp1+hesstarg_nd_txt[0]+"\n"+sp2+hesstarg_nd_txt[1]+"\n")
f1.writelines(sp1+ier_txt[0]+"\n"+sp2+ier_txt[1]+" \n")
f1.writelines("\n\n.. container:: rttext\n\n `Back to Helmholtz FMM <fortran-c.html#helm>`__")
f1.writelines("\n\n.. container:: rttext\n\n `Back to top <fortran-c.html#fcexmp>`__\n\n\n")
f1.close()
######
# Write fortran comments for helmholtz
#
eq_start = "u(x) = "
eq_start2 = "\sum_{j=1}^{N} "
str1 = "c_{j} H_{0}^{(1)}(k\|x-x_{j}\|)"
str2 = "v_{j} d_{j} \cdot \\nabla \\left( \nc H_{0}^{(1)}(k\|x-x_{j}\|)\\right)"
str3 = str1+" - \nc "+str2
eq_cjs_fort = [eq_start+eq_start2+str1,eq_start+"-"+eq_start2+str2,eq_start+eq_start2+str3]
eq_start = "u_{\ell}(x) = "
eq_start2 = "\sum_{j=1}^{N} "
str1 = "c_{\ell,j}\nc \\frac{e^{ik\|x- x_{j}\|}}{\|x-x_{j}\|}"
str2 = "v_{\ell,j} \cdot \\nabla \\left( \nc \\frac{e^{ik\|x-x_{j}\|}}{\|x-x_{j}\|}\\right)"
str3 = str1+" - \nc "+str2
eq_cjs_fort_nd = [eq_start+eq_start2+str1,eq_start+"-"+eq_start2+str2,eq_start+eq_start2+str3]
f1 = open('fortrandocs_helm2.raw','w')
f2 = open('fortrandocs_helm2_vec.raw','w')
f3 = open('fortrandocs_helm_header.raw','w')
f4 = open('fortrandocs_helm_header_vec.raw','w')
for i in range(3):
for j in range(3):
for k in range(3):
f1.writelines('c-------------------------------------\n')
f3.writelines('c -hfmm2d'+st_opts[i]+c_opts[j]+p_opts[k]+'\n')
f3.writelines('c - Evaluation points: '+st_opts2[i]+'\n')
f3.writelines('c - Interaction kernel: '+c_opts2[j]+'\n')
f3.writelines('c - Outputs requested: '+p_opts2[k]+'\n')
f3.writelines('c-------------------------------------\n')
f4.writelines('c -hfmm2d'+st_opts[i]+c_opts[j]+p_opts[k]+'_vec\n')
f4.writelines('c - Evaluation points: '+st_opts2[i]+'\n')
f4.writelines('c - Interaction kernel: '+c_opts2[j]+'\n')
f4.writelines('c - Outputs requested: '+p_opts2[k]+'\n')
f4.writelines('c-------------------------------------\n')
f1.writelines('c\n')
f1.writelines("c "+intro+pgstr[k]+'\n')
f1.writelines("c "+eq_cjs_fort[j]+"\nc\nc "+stflag[i]+"\nc "+intro3+"\nc "+intro3_cont+"\nc\n")
f1.writelines("c "+inp_args+"\nc\n")
f1.writelines("c "+sp1+eps_txt[0]+"\nc"+sp2+eps_txt[1]+"\n")
f1.writelines("c "+sp1+zk_txt[0]+"\nc"+sp2+zk_txt[1]+"\n")
f1.writelines("c "+sp1+ns_txt[0]+"\nc"+sp2+ns_txt[1]+"\n")
f1.writelines("c "+sp1+src_txt[0]+"\nc"+sp2+src_txt[1]+"\n")
if(j==0):
f1.writelines("c "+sp1+charge_txt[0]+"\nc"+sp2+charge_txt[1]+"\n")
if(j==1):
f1.writelines("c "+sp1+dipole_txt[0]+"\nc"+sp2+dipole_txt[1]+"\n")
f1.writelines("c "+sp1+dipvec_txt[0]+"\nc"+sp2+dipvec_txt[1]+"\n")
if(j==2):
f1.writelines("c "+sp1+charge_txt[0]+"\nc"+sp2+charge_txt[1]+"\n")
f1.writelines("c "+sp1+dipole_txt[0]+"\nc"+sp2+dipole_txt[1]+"\n")
f1.writelines("c "+sp1+dipvec_txt[0]+"\nc"+sp2+dipvec_txt[1]+"\n")
if(i>0):
f1.writelines("c "+sp1+nt_txt[0]+"\nc"+sp2+nt_txt[1]+"\n")
f1.writelines("c "+sp1+targ_txt[0]+"\nc"+sp2+targ_txt[1]+"\n")
f1.writelines("c\nc\n")
f1.writelines("c "+inp_returns+"\nc\n")
if(i==0 or i==2):
f1.writelines("c "+sp1+pot_txt[0]+"\nc"+sp2+pot_txt[1]+"\nc")
if(k>=1):
f1.writelines(" "+sp1+grad_txt[0]+"\nc"+sp2+grad_txt[1]+"\nc")
if(k==2):
f1.writelines(" "+sp1+hess_txt[0]+"\nc"+sp2+hess_txt[1]+"\nc")
if(i==1 or i==2):
f1.writelines("c "+sp1+pottarg_txt[0]+"\nc"+sp2+pottarg_txt[1]+"\nc")
if(k>=1):
f1.writelines(" "+sp1+gradtarg_txt[0]+"\nc"+sp2+gradtarg_txt[1]+"\nc")
if(k==2):
f1.writelines(" "+sp1+hesstarg_txt[0]+"\nc"+sp2+hesstarg_txt[1]+"\nc")
f1.writelines(" "+sp1+ier_txt[0]+"\nc"+sp2+ier_txt[1]+" \nc")
f1.writelines("\nc\nc--------------------------------\nc\n")
f2.writelines('c-------------------------------------\n')
f2.writelines('c\n')
f2.writelines("c "+intro+pgstr[k]+'\n')
f2.writelines("c "+eq_cjs_fort_nd[j]+"\nc\nc "+stflag[i]+"\nc "+intro3+"\nc "+intro3_cont+"\nc\n")
f2.writelines("c "+inp_args+"\nc\n")
f2.writelines("c "+sp1+nd_txt[0]+"\nc"+sp2+nd_txt[1]+"\n")
f2.writelines("c "+sp1+eps_txt[0]+"\nc"+sp2+eps_txt[1]+"\n")
f2.writelines("c "+sp1+zk_txt[0]+"\nc"+sp2+zk_txt[1]+"\n")
f2.writelines("c "+sp1+ns_txt[0]+"\nc"+sp2+ns_txt[1]+"\n")
f2.writelines("c "+sp1+src_txt[0]+"\nc"+sp2+src_txt[1]+"\n")
if(j==0):
f2.writelines("c "+sp1+charge_nd_txt[0]+"\nc"+sp2+charge_nd_txt[1]+"\n")
if(j==1):
f2.writelines("c "+sp1+dipole_nd_txt[0]+"\nc"+sp2+dipole_nd_txt[1]+"\n")
f2.writelines("c "+sp1+dipvec_nd_txt[0]+"\nc"+sp2+dipvec_nd_txt[1]+"\n")
if(j==2):
f2.writelines("c "+sp1+charge_nd_txt[0]+"\nc"+sp2+charge_nd_txt[1]+"\n")
f2.writelines("c "+sp1+dipole_nd_txt[0]+"\nc"+sp2+dipole_nd_txt[1]+"\n")
f2.writelines("c "+sp1+dipvec_nd_txt[0]+"\nc"+sp2+dipvec_nd_txt[1]+"\n")
if(i>0):
f2.writelines("c "+sp1+nt_txt[0]+"\nc"+sp2+nt_txt[1]+"\n")
f2.writelines("c "+sp1+targ_txt[0]+"\nc"+sp2+targ_txt[1]+"\n")
f2.writelines("c\nc\n")
f2.writelines("c "+inp_returns+"\nc\n")
if(i==0 or i==2):
f2.writelines("c "+sp1+pot_nd_txt[0]+"\nc"+sp2+pot_nd_txt[1]+"\nc")
if(k>=1):
f2.writelines(" "+sp1+grad_nd_txt[0]+"\nc"+sp2+grad_nd_txt[1]+"\nc")
if(k==2):
f2.writelines(" "+sp1+hess_nd_txt[0]+"\nc"+sp2+hess_nd_txt[1]+"\nc")
if(i==1 or i==2):
f2.writelines("c "+sp1+pottarg_nd_txt[0]+"\nc"+sp2+pottarg_nd_txt[1]+"\nc")
if(k>=1):
f2.writelines(" "+sp1+gradtarg_nd_txt[0]+"\nc"+sp2+gradtarg_nd_txt[1]+"\nc")
if(k==2):
f2.writelines(" "+sp1+hesstarg_nd_txt[0]+"\nc"+sp2+hesstarg_nd_txt[1]+"\nc")
f2.writelines(" "+sp1+ier_txt[0]+"\n"+sp2+ier_txt[1]+" \nc")
f2.writelines("\nc\nc--------------------------------\nc\n")
f1.close()
f2.close()
f3.close()
f4.close()
| 16,282 | 52.739274 | 140 | py |
fmm2d | fmm2d-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# fmm2d documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 1 16:19:13 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx.ext.autodoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('sphinxext'))
sys.path.insert(0,os.path.abspath('../../texext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.6' # dylan, but I only have 1.3.6
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
# 'sphinx.ext.autosectionlabel', # needs v 1.4; can :ref: other files w/o this; removed 7/29/18
'texext',
# 'sphinxcontrib.bibtex',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fmm2d'
copyright = u'2018-2019 The Simons Foundation, Inc. - All Rights Reserved'
author = u"Zydrunas Gimbutas, Leslie Greengard, Mike O'Neil, Manas Rachh, and Vladimir Rokhlin"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'classic'
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'collapsiblesidebar': 'true', 'sidebarwidth': '270px'}
#html_theme_options = {"codebgcolor":"rgb(240,240,240)"}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fmm2ddoc'
# To fix location of equation numbering. Barnett tried 6/19/18
# see https://samnicholls.net/2016/06/15/how-to-sphinx-readthedocs/
def setup(app):
app.add_css_file('theme_overrides.css')
app.add_css_file('custom.css')
# it doesn't fail if this file not found in _static :(
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fmm2d.tex', u'fmm2d Documentation',
u"Zydrunas Gimbutas \\and Leslie Greengard \\and Mike O'Neil \\and Manas Rachh \\and Vladimir Rokhlin", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fmm2d', u'fmm2d Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fmm2d', u'fmm2d Documentation',
author, 'fmm2d', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 10,222 | 32.299674 | 119 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/ssim.py | import torch
import torch.nn.functional as F
from math import exp
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]).double()
# gauss.requires_grad = True
return gauss/gauss.sum()
def create_window(window_size):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).double().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(1, 1, window_size, window_size).contiguous()
return window.cuda()
def ssim(img1, img2, window_size=11, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channels, height, width) = img1.size()
real_size = min(window_size, height, width)
window = create_window(real_size)
ret_channels = []
cs_channels = []
for ch in range(channels): # loop over channels, then average
img1_ch = torch.unsqueeze(img1[:, ch, :, :], 1)
img2_ch = torch.unsqueeze(img2[:, ch, :, :], 1)
mu1 = F.conv2d(img1_ch, window, padding=padd)
mu2 = F.conv2d(img2_ch, window, padding=padd)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1_ch * img1_ch, window, padding=padd) - mu1_sq
sigma2_sq = F.conv2d(img2_ch * img2_ch, window, padding=padd) - mu2_sq
sigma12 = F.conv2d(img1_ch * img2_ch, window, padding=padd) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
cs_channels.append(cs)
ret_channels.append(ret)
cs_mean = torch.mean(torch.stack(cs_channels), dim=-1)
ret_mean = torch.mean(torch.stack(ret_channels), dim=-1)
if full:
return ret_mean, cs_mean
return ret_mean
def msssim(img1, img2, window_size=11, size_average=True, val_range=None):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# # Normalize (to avoid NaNs)
#
# mssim = (mssim + 1) / 2
# mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# output = torch.prod(pow1 * pow2)
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output | 3,447 | 30.925926 | 118 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/dataloader.py | from torch.utils.data.dataset import Dataset
import torchvision.transforms as transforms
from os.path import join
from PIL import Image
class CelebDataSet(Dataset):
"""CelebA dataset
Parameters:
data_path (str) -- CelebA dataset main directory(inculduing '/Img' and '/Anno') path
state (str) -- dataset phase 'train' | 'val' | 'test'
Center crop the alingned celeb dataset to 178x178 to include the face area and then downsample to 128x128(Step3).
In addition, for progressive training, the target image for each step is resized to 32x32(Step1) and 64x64(Step2).
"""
def __init__(self, data_path = './dataset/', state = 'train', data_augmentation=None):
self.main_path = data_path
self.state = state
self.data_augmentation = data_augmentation
self.img_path = join(self.main_path, 'CelebA/Img/img_align_celeba')
self.eval_partition_path = join(self.main_path, 'Anno/list_eval_partition.txt')
train_img_list = []
val_img_list = []
test_img_list = []
f = open(self.eval_partition_path, mode='r')
while True:
line = f.readline().split()
if not line: break
if line[1] == '0':
train_img_list.append(line)
elif line[1] =='1':
val_img_list.append(line)
else:
test_img_list.append(line)
f.close()
if state=='train':
train_img_list.sort()
self.image_list = train_img_list
elif state=='val':
val_img_list.sort()
self.image_list = val_img_list
else:
test_img_list.sort()
self.image_list = test_img_list
if state=='train' and self.data_augmentation:
self.pre_process = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.CenterCrop((178, 178)),
transforms.Resize((128, 128)),
transforms.RandomRotation(20, resample=Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
])
else:
self.pre_process = transforms.Compose([
transforms.CenterCrop((178, 178)),
transforms.Resize((128,128)),
])
self.totensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self._64x64_down_sampling = transforms.Resize((64, 64))
self._32x32_down_sampling = transforms.Resize((32, 32))
self._16x16_down_sampling = transforms.Resize((16,16))
def __getitem__(self, index):
image_path = join(self.img_path, self.image_list[index][0])
target_image = Image.open(image_path).convert('RGB')
target_image = self.pre_process(target_image)
x4_target_image = self._64x64_down_sampling(target_image)
x2_target_image = self._32x32_down_sampling(x4_target_image)
input_image = self._16x16_down_sampling(x2_target_image)
x2_target_image = self.totensor(x2_target_image)
x4_target_image = self.totensor(x4_target_image)
target_image = self.totensor(target_image)
input_image = self.totensor(input_image)
return x2_target_image, x4_target_image, target_image, input_image
def __len__(self):
return len(self.image_list)
| 3,841 | 39.87234 | 125 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/model.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from math import sqrt
"""Original EqualConv2d code is at
https://github.com/rosinality/style-based-gan-pytorch/blob/master/model.py
"""
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name+'_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2/fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super(EqualConv2d, self).__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
class ResBlock(nn.Module):
def __init__(self, dim, kernel_size=3, padding=1, stride=1):
super(ResBlock, self).__init__()
self.conv = nn.Sequential(
EqualConv2d(dim, dim, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(dim),
nn.ReLU(),
EqualConv2d(dim, dim, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(dim),
nn.ReLU()
)
def forward(self, x):
return self.conv(x) + x
class ConvBlock(nn.Module):
def __init__(self, in_plane, out_plane, kernel_size=3, padding=1, stride=1):
super(ConvBlock, self).__init__()
self.conv = nn.Sequential(
EqualConv2d(in_plane, out_plane, kernel_size=3, padding=1, stride=1),
nn.LeakyReLU(0.2),
EqualConv2d(out_plane, out_plane, kernel_size=3, padding=1, stride=1),
nn.LeakyReLU(0.2))
def forward(self, x):
return self.conv(x)
class Generator(nn.Module):
def __init__(self, ):
super(Generator, self).__init__()
step1 = [nn.Conv2d(3, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU()]
step1 += [ResBlock(dim=512, kernel_size=3, stride=1, padding=1),
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU()
]
step2 = [ResBlock(dim=256, kernel_size=3, stride=1, padding=1),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU()
]
step3 = [ResBlock(dim=128, kernel_size=3, stride=1, padding=1),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.ReLU()]
self.to_rgb = nn.ModuleList([nn.Conv2d(256, 3, kernel_size=1, stride=1, padding=0),
nn.Conv2d(128, 3, kernel_size=1, stride=1, padding=0),
nn.Conv2d(64, 3, kernel_size=1, stride=1, padding=0)])
self.step1 = nn.Sequential(*step1)
self.step2 = nn.Sequential(*step2)
self.step3 = nn.Sequential(*step3)
#self.model = nn.Sequential(self.step1, self.step2, self.step3)
def forward(self, input, step=1, alpha=-1):
"""Progressive generator forward"""
if step == 1:
out = self.step1(input)
out = self.to_rgb[step-1](out)
elif step == 2:
if 0 <= alpha < 1:
prev = self.step1(input)
skip_rgb = F.interpolate(self.to_rgb[step-2](prev), scale_factor=2, mode='nearest')
out = self.step2(prev)
out = (1-alpha)*skip_rgb + alpha*self.to_rgb[step-1](out)
else:
out = self.step2(self.step1(input))
out = self.to_rgb[step-1](out)
else:
if 0 <= alpha < 1:
prev = self.step2(self.step1(input))
skip_rgb = F.interpolate(self.to_rgb[step-2](prev), scale_factor=2, mode='nearest')
out = self.step3(prev)
out = (1-alpha)*skip_rgb + alpha*self.to_rgb[step-1](out)
else:
out = self.step3(self.step2(self.step1(input)))
out = self.to_rgb[step-1](out)
return out
class Discriminator(nn.Module):
"""Discriminator"""
def __init__(self,):
super(Discriminator, self).__init__()
self.from_rgb = nn.ModuleList([
nn.Conv2d(3, 256, kernel_size=1, stride=1, padding=0),
nn.Conv2d(3, 128, kernel_size=1, stride=1, padding=0),
nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0)])
step1 = [ConvBlock(256, 512, kernel_size=3, padding=1, stride=1), nn.AvgPool2d(kernel_size=2, stride=2)]
step2 = [ConvBlock(128, 256, kernel_size=3, padding=1, stride=1), nn.AvgPool2d(kernel_size=2, stride=2)]
step3 = [ConvBlock(64, 128, kernel_size=3, padding=1, stride=1), nn.AvgPool2d(kernel_size=2, stride=2)]
self.step1 = nn.Sequential(*step1)
self.step2 = nn.Sequential(*step2)
self.step3 = nn.Sequential(*step3)
#for last layer
self.equal_conv = EqualConv2d(513, 512, kernel_size=3, stride=1, padding=1)
self.linear = nn.Linear(512, 2048)
self.linear2 = nn.Linear(2048, 1)
def forward(self, input, step=1, alpha=-1):
"""Progressive discriminator forward
Each step's output(generator output) is mixed with previous genertor output
stacked from step1 to step3.
| step1 -----> step2 ------> step3 |
"""
if step == 1:#32x32
out = self.from_rgb[step-1](input)
out = self.step1(out)
if step ==2:#64x64
out = self.from_rgb[step-1](input)#128x64x64
out = self.step2(out) #256x32x32
if 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(input, kernel_size=2,stride=2)#F.interpolate(input, size=(32, 32), mode='nearest') #3x32x32
skip_rgb = self.from_rgb[step-2](skip_rgb) #256x32x32
out = (1-alpha)*skip_rgb + alpha * out
out = self.step1(out) #256x16x16
elif step ==3:#128x128
out = self.from_rgb[step-1](input) #64x128x128
out = self.step3(out) #128x64x64
if 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(input, kernel_size=2,stride=2) #F.interpolate(input, size=(64, 64), mode='nearest') #3x64x64
skip_rgb = self.from_rgb[step-2](skip_rgb) #128x64x64
out = (1-alpha)*skip_rgb + alpha * out #128x64x64
out = self.step2(out) #256x32x32
out = self.step1(out) #512x16x16
mean_std = input.std(0).mean()
mean_std = mean_std.expand(input.size(0), 1, 16, 16)
out = torch.cat([out, mean_std], dim=1)
out = self.equal_conv(out)
out = F.avg_pool2d(out, 16, stride=1)
out = out.view(input.size(0), -1)
out = self.linear(out)
out = self.linear2(out)
out = out.squeeze_(dim=1)
return out
| 7,795 | 35.773585 | 132 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/demo.py | import torch
import argparse
from model import Generator
from PIL import Image
import torchvision.transforms as transforms
from torchvision import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser('Demo of Progressive Face Super-Resolution')
parser.add_argument('--image-path', type=str)
parser.add_argument('--checkpoint-path', default='./checkpoints/generator_checkpoint_singleGPU.ckpt')
parser.add_argument('--output-path', type=str)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
generator = Generator().to(device)
generator.eval()
g_checkpoint = torch.load(args.checkpoint_path)
generator.load_state_dict(g_checkpoint['model_state_dict'], strict=False)
step = g_checkpoint['step']
alpha = g_checkpoint['alpha']
iteration = g_checkpoint['iteration']
print('pre-trained model is loaded step:%d, alpha:%d iteration:%d'%(step, alpha, iteration))
input_image = Image.open(args.image_path).convert('RGB')
#for aligned CelebA evaluation images
#input_image = transforms.CenterCrop((178, 178))(input_image)
_16x16_down_sampling = transforms.Resize((16,16))
_64x64_down_sampling = transforms.Resize((64, 64))
_32x32_down_sampling = transforms.Resize((32, 32))
totensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
#Note: Our netowork is trained by progressively downsampled images.
transformed_image = _16x16_down_sampling(_32x32_down_sampling(_64x64_down_sampling(input_image)))
transformed_image = totensor(transformed_image).unsqueeze(0).to(device)
output_image = generator(transformed_image, step, alpha)
utils.save_image(0.5*output_image+0.5, args.output_path)
| 2,016 | 41.020833 | 105 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/eval.py | import torch
from torch import optim, nn
import argparse
from dataloader import CelebDataSet
from torch.utils.data import DataLoader
from model import Generator
import os
from torch.autograd import Variable, grad
import sys
from torchvision import utils
from math import log10
from ssim import ssim, msssim
def test(dataloader, generator, MSE_Loss, step, alpha):
avg_psnr = 0
avg_ssim = 0
avg_msssim = 0
for i, (x2_target_image, x4_target_image, target_image, input_image) in enumerate(dataloader):
input_image = input_image.to(device)
if step==1:
target_image = x2_target_image.to(device)
elif step==2:
target_image = x4_target_image.to(device)
else:
target_image = target_image.to(device)
input_image = input_image.to(device)
predicted_image = generator(input_image, step, alpha)
predicted_image = predicted_image.double()
target_image = target_image.double()
mse_loss = MSE_Loss(0.5*predicted_image+0.5, 0.5*target_image+0.5)
psnr = 10*log10(1./mse_loss.item())
avg_psnr += psnr
_ssim = ssim(0.5*predicted_image+0.5, 0.5*target_image+0.5)
avg_ssim += _ssim.item()
ms_ssim = msssim(0.5*predicted_image+0.5, 0.5*target_image+0.5)
avg_msssim += ms_ssim.item()
sys.stdout.write('\r [%d/%d] Test progress... PSNR: %6.4f'%(i, len(dataloader), psnr))
save_image = torch.cat([predicted_image, target_image], dim=0)
if args.local_rank==0:
utils.save_image(0.5*save_image+0.5, os.path.join(args.result_path, '%d_results.jpg'%i))
print('Test done, Average PSNR:%6.4f, Average SSIM:%6.4f, Average MS-SSIM:%6.4f '%(avg_psnr/len(dataloader),avg_ssim/len(dataloader), avg_msssim/len(dataloader)))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Implemnetation of Progressive Face Super-Resolution Attention to Face Landmarks')
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--checkpoint-path', default='./checkpoints/', type=str)
parser.add_argument('--data-path', default='./dataset/', type=str)
parser.add_argument('--result-path', default='./result/', type=str)
parser.add_argument('--workers', default=4, type=int)
parser.add_argument('--local_rank', default=0, type=int, help='node rank for distributed training')
parser.add_argument('--distributed', action='store_true')
args = parser.parse_args()
if args.local_rank == 0:
if not os.path.exists(args.result_path):
os.mkdir(args.result_path)
print('===>make directory', args.result_path)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.gpu = 0
args.world_size = 1
dataset = CelebDataSet(data_path=args.data_path, state='test')
if args.distributed:
import apex.parallel as parallel
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=train_sampler)
else:
dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
generator = Generator().to(device)
if args.distributed:
g_checkpoint = torch.load(args.checkpoint_path, map_location = lambda storage, loc: storage.cuda(args.local_rank))
generator = parallel.DistributedDataParallel(generator)
generator = parallel.convert_syncbn_model(generator)
else:
g_checkpoint = torch.load(args.checkpoint_path)
generator.load_state_dict(g_checkpoint['model_state_dict'], strict=False)
step = g_checkpoint['step']
alpha = g_checkpoint['alpha']
iteration = g_checkpoint['iteration']
print('pre-trained model is loaded step:%d, alpha:%d iteration:%d'%(step, alpha, iteration))
MSE_Loss = nn.MSELoss()
generator.eval()
test(dataloader, generator, MSE_Loss, step, alpha)
| 4,296 | 41.97 | 166 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/totalComprRatio_generate.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import pandas as pd
import datetime
import multiprocessing
import tqdm
import bpcUtils
import dataCollect
import analysisTools
import referenceImpl
analyzer = analysisTools.Analyzer(quantMethod='fixed16', compressor=None)
def zeroRLE_default(x, wordwidth):
return referenceImpl.zeroRLE(x, maxZeroBurstLen=16, wordwidth=wordwidth)
def ZVC_default(x, wordwidth):
return referenceImpl.ZVC(x, wordwidth=wordwidth)
def BPC_default(x, wordwidth):
return bpcUtils.BPC(x, chunkSize=8, variant='baseline', wordwidth=wordwidth)
def BPCplus_default(x, wordwidth):
# bpcVariant in ['ours-02', 'baseline', 'ours-03']
return bpcUtils.BPCplus(x, chunkSize=8, maxRLEBurstLen=16,
variant='baseline', bpcVariant='ours-04', wordwidth=wordwidth)
comprMethodsByName = {'zero-RLE': zeroRLE_default,
'ZVC': ZVC_default,
'BPC': BPC_default,
'ours': BPCplus_default}
training=True
batchSize = 250
modelNames = ['alexnet', 'resnet34', 'squeezenet', 'vgg16', 'mobilenet2']#, 'alexnet-cust', 'mobilenetV2-cust']
for modelName in modelNames:
print('running on model: %s' % modelName)
model, loss_func = dataCollect.getModel(modelName)
outputsReLU, _, _, gradsReLU = dataCollect.getFMs(model, loss_func,
training=training, computeGrads=True,
numBatches=1, batchSize=batchSize)#250)
numLayers = len(outputsReLU)
print('done getting FMs, now compressing...')
def configsGen():
if training:
dataDescrOpts = ['outputs', 'gradients']
else:
dataDescrOpts = ['outputs']
for netName in [modelName]:
for dataDescr in dataDescrOpts:
if dataDescr == 'outputs':
quantMethods = ['fixed8', 'fixed12', 'fixed16', 'float16']
elif dataDescr == 'gradients':
quantMethods = ['fixed16', 'float16']
else:
assert(False)
for qm in quantMethods:
for comprName in comprMethodsByName.keys():
for layerIdx in range(numLayers):
for intraBatchIdx in range(batchSize):
yield netName, dataDescr, qm, comprName, layerIdx, intraBatchIdx
def do_work(config):
netName, dataDescr, qm, comprName, layerIdx, intraBatchIdx = config
if dataDescr == 'outputs':
dataTensor = outputsReLU
elif dataDescr == 'gradients':
dataTensor = gradsReLU
else:
assert(False)
return analyzer.getComprProps(dataTensor[layerIdx][intraBatchIdx],
quant=qm,
compressor=lambda x: comprMethodsByName[comprName](
x, wordwidth=bpcUtils.getWW(qm))
)
configs = list(configsGen())
pool = multiprocessing.Pool(processes=20)
comprPropsByConfig = [r for r in tqdm.tqdm(pool.imap(do_work, configs), total=len(configs))]
# comprPropsByConfig = [do_work(c) for c in configs] # for debugging...
pool.close()
df = pd.DataFrame.from_records(configs,
columns=['modelName', 'dataDescr', 'quantMethod',
'comprName', 'layerIdx', 'intraBatchIdx'])
df = df.join(pd.DataFrame.from_records(comprPropsByConfig,
columns=['comprRatio', 'comprSize', 'comprSizeBaseline']))
df.to_pickle('results/results-%s.pkl' % (datetime.datetime.now().strftime('%y%m%d-%H%M%S')))
| 3,879 | 41.173913 | 111 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/groupedBarPlot.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
def groupedBarPlot(data, groupNames, legend=None, xtickRot=None):
import matplotlib.pyplot as plt
barWidth = 1/(1+len(data[0]))
xpos = list(range(len(data)))
for s in range(len(data[0])):
plt.bar([x + s*barWidth for x in xpos],
[data[i][s] for i in range(len(data))], width=barWidth)
plt.xticks([x + (len(data[0])-1)*barWidth/2 for x in xpos], groupNames, rotation=xtickRot)
if legend is not None:
plt.legend(legend)
if __name__ == "__main__":
data = [[1,2,3],[6,5,7],[-1,-2,3],[-6,-7,-1]]
groupNames = ['small', 'medium', 'neutral', 'negative']
legend = ['a', 'b', 'max']
groupedBarPlot(data, groupNames, legend) | 737 | 40 | 92 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/referenceImpl.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import math
import numpy as np
from bpcUtils import valuesToBinary
def CSC(values, maxDist=16, wordwidth=None):
"""CSC: encode each non-zero value as (rel. pos + value)"""
#only transfer non-zero values, but with incremental coordinate
if maxDist == None:
maxDist = len(values)-1
distBits = math.ceil(math.log2(maxDist))
zeroVal = np.zeros((1,), dtype=values.dtype)[0]
debug = False
vals = [v for v in values if v != 0]
idxs = [-1] + [i for i, v in enumerate(values) if v != 0]
idxsRel = [i-iPrev for i, iPrev in zip(idxs[1:], idxs[:-1])]
codedStream = ''
for ir, v in zip(idxsRel, vals):
while ir > maxDist:
ir -= maxDist
codedStream += bin(0)[2:].zfill(distBits)
if debug: codedStream += '|'
codedStream += valuesToBinary(zeroVal, wordwidth=wordwidth)
codedStream += bin(ir-1)[2:].zfill(distBits)
if debug: codedStream += '|'
codedStream += valuesToBinary(v, wordwidth=wordwidth)
if debug:
codedStream += '||'
return codedStream
def zeroRLE(values, maxZeroBurstLen=64, wordwidth=None):
"""zeroRLE: values with prefix, or different prefix and number of zeros"""
#only transfer non-zero values, but with incremental coordinate
if maxZeroBurstLen == None:
maxZeroBurstLen = len(values)-1
maxZeroBurstLenBits = math.ceil(math.log2(maxZeroBurstLen))
zeroVal = np.zeros((1,), dtype=values.dtype)[0]
debug = False
codedStream = ''
zeroCnt = 0
for v in values:
if v == 0 and zeroCnt < maxZeroBurstLen:
zeroCnt += 1
else:
if zeroCnt > 0:
#emit symbol
codedStream += '0' + bin(zeroCnt-1+1)[2:].zfill(maxZeroBurstLenBits)
if debug: codedStream += '|'
#encode/keep track of current symbol
if v == 0:
zeroCnt = 1
else:
codedStream += '1' + valuesToBinary(v, wordwidth=wordwidth)
if debug: codedStream += '|'
zeroCnt = 0
if zeroCnt > 0:
#emit last symbol
codedStream += '0' + bin(zeroCnt-1+1)[2:].zfill(maxZeroBurstLenBits)
if debug: codedStream += '|'
return codedStream
def ZVC(values, wordwidth=None, debug=False):
"""ZVC: zero value coding -- bit-vector for zero or not"""
symbols = ''.join(['0' if v == 0 else '1' + valuesToBinary(v, wordwidth=wordwidth) for v in values])
if debug:
return '|'.join(symbols)
else:
return ''.join(symbols)
# DEFLATE
#https://docs.python.org/3.7/library/zlib.html
import zlib
def deflate(values, level=9, wordwidth=None):
valuesBitStream = valuesToBinary(values, wordwidth=wordwidth)
valuesByteArr = int(valuesBitStream, 2).to_bytes(len(valuesBitStream) // 8, byteorder='big')
byteArrCompr = zlib.compress(valuesByteArr, level=level)
# intArrCompr = int.from_bytes(byteArrCompr, byteorder='big')
finalBitStream = 'x'*len(byteArrCompr)*8 #valuesToBinary(values, wordwidth=32)
return finalBitStream
| 2,994 | 26.227273 | 102 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/reporting.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import csv
import os
def readTable(tableName='totalComprRate-alexnet-after ReLU'):
filename = './results/%s.csv' % tableName
with open(filename, 'r') as csvfile:
cr = csv.reader(csvfile)
tbl = [row for row in cr]
return tbl
def parseTable(tbl):
comprMethods = tbl[0][1:]
quantMethods = [r[0] for r in tbl[1:]]
data = [[float(v) for v in r[1:]] for r in tbl[1:]]
return comprMethods, quantMethods, data
def writeTable(tableName='test', table=None, tableHeader=None, tableRowHeader=None):
if table == None:
print('No data to be written for table %s.' % tableName)
return
if tableRowHeader != None:
table = [[rowheader] + row for rowheader, row in zip(tableRowHeader,table)]
filename = './results/%s.csv' % tableName
with open(filename, 'w', newline='') as csvfile:
cw = csv.writer(csvfile)
if tableHeader != None:
cw.writerow(tableHeader)
cw.writerows(table) | 1,061 | 34.4 | 84 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/bpcUtils.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import numpy as np
import math
def valuesToBinary(t, wordwidth=None):
"""Converts a numpy array using its datatype to a string of 1/0 values"""
arr = t.byteswap().tobytes()
wordwidthInput = t.dtype.itemsize*8# t[0].nbytes*8
if wordwidth is None:
wordwidth = wordwidthInput
longbinarr = bin(int.from_bytes(arr, byteorder='big'))[2:].zfill(len(arr)*8)
#shorten the subwords
binarr = ''.join([longbinarr[i:i+wordwidthInput][-wordwidth:]
for i in range(0, len(longbinarr), wordwidthInput)])
return binarr
###############################
# Ingredients for BPC: delta compression, delta-binarization, and bit-place XOR-ing
###############################
def deltaCompr(values):
base = values[0]
diffs = values[1:] - values[0:-1]
return base, diffs
def deltaBP(values, wordwidth=None):
base, diffs = deltaCompr(values)
binDiffs = [valuesToBinary(v, wordwidth=wordwidth+1) for v in diffs]
DBPs = [''.join(dbp) for dbp in zip(*binDiffs)]
return valuesToBinary(base, wordwidth=wordwidth), DBPs
def DBX(values, wordwidth=None):
base, DBPs = deltaBP(values, wordwidth=wordwidth)
def xor(a, b):
y = int(a, 2)^int(b, 2)
return bin(y)[2:].zfill(len(a))
it1, it2 = iter(DBPs[::-1]), iter(DBPs[::-1])
baseDBP = next(it1)
DBXs = [xor(dbp, dbpPrev) for dbpPrev, dbp in zip(it1, it2)][::-1]
return base, DBPs, DBXs
###############################
# BPC IMPLEMENTATION
###############################
bpcPerfCounter_init = {'multi-all-0 DBX': 0, 'all-0 DBX': 0, 'all-1 DBX': 0,
'all-0 DBP': 0, '2-consec 1s': 0, 'single-1': 0,
'uncompressed': 0,
'numBlocks': 0}
bpcPerfCounter = bpcPerfCounter_init
bpcPerfCounterEnable = True
def bpcPerfCounterEnable(enable=True, reset=True):
bpcPerfCounterEnable = enable
if reset:
import copy
bpcPerfCounter = copy.deepcopy(bpcPerfCounter_init)
def bpcPerfCounterIncr(propname, increment=1):
if bpcPerfCounterEnable:
bpcPerfCounter[propname] += increment
def BPC_block(values, variant='baseline', wordwidth=None, prevValue=0):
bpcPerfCounterIncr('numBlocks')
if len(values) < 2:
return '' # this is wrong, but just a minor boundary effect...
#sanitize input
if variant == 'ours-02' or variant == 'ours-03':
values = [prevValue] + values
base, DBPs, DBXs = DBX(values, wordwidth=wordwidth)
baseDBP = DBPs[-1]
#flags
debug = False
chunkSize = len(values)
chunkSizeL2C = math.ceil(math.log2(chunkSize))
wordwidthL2C = math.ceil(math.log2(wordwidth))
#encode first value before differences
codedStream = ''
if variant == 'ours-02' or variant == 'ours-03':
pass
else:
codedStream += base
if debug:
codedStream += '!'
DBPsymbols = DBXs + [baseDBP]
# perform code mapping of individual DBP/DBX symbols
def codeMapper(dbx, dbp):
if dbx == '0'*len(dbx):
bpcPerfCounterIncr('all-0 DBX')
if variant == 'ours-03' or variant == 'ours-04':
return '01'
else:
return '001'
if dbx == '1'*len(dbx):
bpcPerfCounterIncr('all-1 DBX')
return '00000'
if dbp == '0'*len(dbp): # and implicitly dbx != 0
bpcPerfCounterIncr('all-0 DBP')
return '00001'
# find first 1-bit and count number of 1-bits
numOnes = 0
firstIdx = -1
for idx, b in enumerate(dbx):
if b == '1':
numOnes += 1
if numOnes == 1:
firstIdx = idx
if numOnes == 2 and dbx[firstIdx+1] == '1': #two consec 1s
bpcPerfCounterIncr('2-consec 1s')
return '00010' + bin(firstIdx)[2:].zfill(chunkSizeL2C)
if numOnes == 1:
bpcPerfCounterIncr('single-1')
return '00011' + bin(firstIdx)[2:].zfill(chunkSizeL2C)
bpcPerfCounterIncr('uncompressed')
return '1' + dbx
def reencoder(codedDBXsymbols):
#postprocessing to merge multiple consecutive DBX==all-0 cases (001 symbols) to (01+cnt)
codedSymbols = []
runLen = 0
for idx, symb in enumerate(codedDBXsymbols):
if variant == 'ours-04':
if symb == '01':
runLen += 1
if idx == len(codedDBXsymbols)-1:
if runLen == 1:
codedSymbols.append('001')
elif runLen > 1:
bpcPerfCounterIncr('multi-all-0 DBX')
bpcPerfCounterIncr('all-0 DBX', -runLen)
codedSymbols.append('01' + bin(runLen-2)[2:].zfill(wordwidthL2C))
else:
if runLen == 1:
codedSymbols.append('001')
elif runLen > 1:
bpcPerfCounterIncr('multi-all-0 DBX')
bpcPerfCounterIncr('all-0 DBX', -runLen)
codedSymbols.append('01' + bin(runLen-2)[2:].zfill(wordwidthL2C))#zfill(5) for max. 32 values per block
runLen = 0
codedSymbols.append(symb)
elif variant == 'ours-03':
if symb == '01':
runLen += 1
if idx == len(codedDBXsymbols)-1:
if runLen == 1:
codedSymbols.append('01')
elif runLen > 1:
bpcPerfCounterIncr('multi-all-0 DBX')
bpcPerfCounterIncr('all-0 DBX', -runLen)
codedSymbols.append('001' + bin(runLen-2)[2:].zfill(wordwidthL2C))
else:
if runLen == 1:
codedSymbols.append('01')
elif runLen > 1:
bpcPerfCounterIncr('multi-all-0 DBX')
bpcPerfCounterIncr('all-0 DBX', -runLen)
codedSymbols.append('001' + bin(runLen-2)[2:].zfill(wordwidthL2C))#zfill(5) for max. 32 values per block
runLen = 0
codedSymbols.append(symb)
else:
if symb == '001':
runLen += 1
if idx == len(codedDBXsymbols) - 1:
if runLen == 1:
codedSymbols.append('001')
elif runLen > 1:
bpcPerfCounterIncr('multi-all-0 DBX')
bpcPerfCounterIncr('all-0 DBX', -runLen)
codedSymbols.append('01' + bin(runLen-2)[2:].zfill(wordwidthL2C))
else:
if runLen == 1:
codedSymbols.append('001')
elif runLen > 1:
bpcPerfCounterIncr('multi-all-0 DBX')
bpcPerfCounterIncr('all-0 DBX', -runLen)
codedSymbols.append('01' + bin(runLen-2)[2:].zfill(wordwidthL2C))
runLen = 0
codedSymbols.append(symb)
if debug:
codedStream = '|'.join(codedSymbols)
else:
codedStream = ''.join(codedSymbols)
return codedStream
codedDBXsymbols = [codeMapper(dbx, dbp) for dbx, dbp in zip(DBXs, DBPs[:-1])]
codedDBXsymbols += [codeMapper(baseDBP, baseDBP)]
codedStream += reencoder(codedDBXsymbols)
return codedStream
def BPC(values, chunkSize=32, variant='baseline', wordwidth=None):
if chunkSize == None:
chunkSize = int(1e12)
if variant == 'ours-02' or variant == 'ours-03':
chunkSize -= 1
def blocks(l, n):
"""Yield successive n-sized blocks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
strm = ''
prevBlock = [0]
for w in blocks(values, chunkSize):
strm += BPC_block(w, variant=variant, wordwidth=wordwidth, prevValue=prevBlock[0])
prevBlock = w
return strm
##############################
# Implementation BPCplus
##############################
def BPCplus(values, variant='baseline', bpcVariant='ours-02',
maxRLEBurstLen=16, chunkSize=16, wordwidth=None, debug=False):
# return BPCplus_golden(values, variant=variant, bpcVariant=bpcVariant,
# maxRLEBurstLen=maxRLEBurstLen, chunkSize=chunkSize, wordwidth=wordwidth, debug=debug)
return BPCplus_fastLength(values, variant=variant, bpcVariant=bpcVariant,
maxRLEBurstLen=maxRLEBurstLen, chunkSize=chunkSize, wordwidth=wordwidth, debug=debug)
def BPCplus_golden(values, variant='baseline', bpcVariant='ours-02',
maxRLEBurstLen=16, chunkSize=16, wordwidth=None, debug=False):
if maxRLEBurstLen == None:
maxRLEBurstLen = len(values)-1
maxRLEBurstLenBits = math.ceil(math.log2(maxRLEBurstLen))
def RLE(values):
#only transfer non-zero values, but with incremental coordinate
zeroVal = np.zeros((1,), dtype=values.dtype)[0]
zeroBlockLens = []
zeroCnt = 0
for v in values:
if v == 0 and zeroCnt < maxRLEBurstLen:
zeroCnt += 1
else:
zeroBlockLens += [zeroCnt]
zeroCnt = 1 if v == 0 else 0 #encode/keep track of current symbol
RLEblocks = []
obl = 0
for zbl in zeroBlockLens:
emitOB = False
if zbl > 0:
if obl > 0:
emitOB = True
RLEblocks += [(0,zbl)]
else:
obl += 1
if obl >= chunkSize:
emitOB = True
if emitOB:
RLEblocks += [(1,obl)]
obl = 0
return RLEblocks
def encodeRLEsym(symbol, count):
if symbol == 0:
return '0' + bin(count-1)[2:].zfill(maxRLEBurstLenBits)
elif symbol == 1:
return '1' + bin(count-1)[2:].zfill(maxRLEBurstLenBits)
else:
assert(False)
def encodeRLEBursts(RLEblocks, fast=True):
encodedSyms = [encodeRLEsym(symbol, count) for symbol, count in RLEblocks]
if debug:
return '|'.join(encodedSyms)
else:
return ''.join(encodedSyms)
global nonzeroValues
nonzeroValues = np.array(list(filter(lambda x: x != 0, values)),
dtype=values.dtype)
if variant == 'ZVC':
RLEBitStream = ''.join(['0' if v == 0 else '1' for v in values])
else:
RLEblocks = RLE(values)
RLEBitStream = encodeRLEBursts(RLEblocks)
nonzeroBitStream = BPC(nonzeroValues, chunkSize=chunkSize,
variant=bpcVariant, wordwidth=wordwidth)
if debug:
codedStream = RLEBitStream + '||' + nonzeroBitStream
else:
codedStream = RLEBitStream + nonzeroBitStream
return codedStream
def BPCplus_fastLength(values, variant='baseline', bpcVariant='ours-02',
maxRLEBurstLen=16, chunkSize=16, wordwidth=None, debug=False):
if maxRLEBurstLen == None:
maxRLEBurstLen = len(values)-1
maxRLEBurstLenBits = math.ceil(math.log2(maxRLEBurstLen))
nonzeroValues = np.array(values[values!=0],
dtype=values.dtype)
#fast bit counting
assert(variant != 'ZVC' and debug == False)
def symbolize(binData):
t = binData
sel = t[:-1] * (1-t[1:])
u = t.cumsum()
lens = (u[1:])[sel>0]
lens = lens - np.concatenate(([0],lens[:-1]))
symbsLen = ((lens-1)//maxRLEBurstLen + 1).sum()*(maxRLEBurstLenBits+1)
return symbsLen
zeroNonzeroStreamLen = symbolize(values == 0) + (values != 0).sum()#symbolize(values != 0)
RLEBitStream = 'x'*zeroNonzeroStreamLen
nonzeroBitStream = BPC(nonzeroValues, chunkSize=chunkSize,
variant=bpcVariant, wordwidth=wordwidth)
if debug:
codedStream = RLEBitStream + '||' + nonzeroBitStream
else:
codedStream = RLEBitStream + nonzeroBitStream
return codedStream
################################
# quantization function
################################
def quantize(x, quant, safetyFactor=1.0, normalize=False):
"""Quantizes the tensor 'x' using the method 'quant'.
@param x: tensor with data to quantize.
@param quant: quantization method to use (float32/16, fixed32/16/8, ufixed32/16/8)
@param normalize: select whether to normalize the value range of x here. If not the data is assumed to be normalized already.
@returns: 1) the quantized tensor (not done in-place),
2) the number of bits used,
3) the numpy data type to use to encode the data
@comments: 1) fixed32 and ufixed32 are of limited use if input data is float32
2) quantization is done based on the (symmetric; except for ufixed)
maximum range covered by the data in the tensor
"""
if quant[:5] == 'float':
numBit = int(quant[5:])
if x is not None:
x = x.clone() # perform no quantization; comes with dtype conversion
if numBit == 32:
dtype = np.float32
elif numBit == 16:
dtype = np.float16
else:
assert(False)
elif quant[:5] == 'fixed':
numBit = int(quant[5:])
if numBit > 16:
assert(numBit <= 32)
dtype = np.int32
elif numBit > 8:
dtype = np.int16
else:
dtype = np.int8
if x is not None:
if normalize:
x = x.div(x.abs().max().item()/safetyFactor) # now op in [-1.0, 1.0]
x = x.mul(2**(numBit-1)-1) # now quantized to use full range
elif quant[:6] == 'ufixed':
numBit = int(quant[6:])
if numBit > 16:
assert(numBit <= 32)
dtype = np.uint32
elif numBit > 8:
dtype = np.uint16
else:
dtype = np.uint8
assert(x.ge(0).all().item())
if x is not None:
if normalize:
x = x.div(x.max().item()/safetyFactor) # now op in [0, 1.0]
x = x.mul(2**(numBit)-1) # now quantized to use full range
else:
assert(False)
return x, numBit, dtype
getWW = lambda qm: quantize(None, quant=qm)[1]
| 13,302 | 31.446341 | 130 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/analysisTools.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import functools
import numpy as np
from bpcUtils import quantize
class Analyzer:
def __init__(self, quantMethod, compressor):
""" quantMethod: provides the default quantization method. Can e.g. in ['float32', 'float16', 'fixed16', 'fixed12', 'fixed8', ...]
compressor: default compressor to use. A function taking a tensor and returning a string of bits.
"""
self.quantMethod_default = quantMethod
self.compressor_default = compressor
@functools.lru_cache(maxsize=8*200*100)
def getComprProps(self, t, quant=None, compressor=None):
if quant == None:
quant = self.quantMethod_default
if compressor == None:
compressor = self.compressor_default
t, numBit, dtype = quantize(t, quant=quant)
valueStream = t.view(-1).numpy().astype(dtype)
codeStream = compressor(valueStream)
if len(codeStream) > 0:
comprRatio = len(valueStream)*numBit/len(codeStream) #strmLen
else:
comprRatio = None
comprSize = len(codeStream) #strmLen
comprSizeBaseline = len(valueStream)*numBit
return comprRatio, comprSize, comprSizeBaseline
def getComprRatio(self, t, quant=None, compressor=None):
if quant == None:
quant = self.quantMethod_default
if compressor == None:
compressor = self.compressor_default
comprRatio, _, _ = self.getComprProps(t, quant=quant, compressor=compressor)
return comprRatio
def getSparsity(self, t, quant=None):
"""get the share of 0-valued entries"""
if quant == None:
quant = self.quantMethod_default
return t.contiguous().view(-1).eq(0).long().sum().item()/t.numel()
def getTotalComprProps(self, outputs, quant=None, compressor=None):
comprProps = [self.getComprProps(outp, quant=quant, compressor=compressor)
for outp in outputs]
totalLen = np.array([l for _, l, _ in comprProps]).sum()
uncomprLen = np.array([l for _, _, l in comprProps]).sum()
return uncomprLen/totalLen, totalLen, uncomprLen | 2,209 | 35.833333 | 138 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/dataCollect.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import torch
import numpy as np
import tensorboard
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import os
import glob
import csv
import sys
sys.path.append('./quantLab')
def getModel(modelName, epoch=None, returnPath=False):
import torchvision as tv
import quantlab.ImageNet.topology as topo
loss_func = torch.nn.CrossEntropyLoss()
model = None
if modelName == 'alexnet':
model = tv.models.alexnet(pretrained=True)
elif modelName == 'squeezenet':
model = tv.models.squeezenet1_1(pretrained=True)
elif modelName == 'resnet34':
model = tv.models.resnet34(pretrained=True)
elif modelName == 'vgg16':
model = tv.models.vgg16_bn(pretrained=True)
elif modelName == 'mobilenet2':
model = tv.models.mobilenet_v2(pretrained=True)
elif modelName == 'alexnet-cust':
path = './quantLab/ImageNet/log/exp00/'
model = topo.AlexNetBaseline(capacity=1)
if epoch == None: epoch = 54
tmp = torch.load(path + '/save/epoch%04d.ckpt' % epoch, map_location=torch.device('cpu'))
model.load_state_dict(tmp['net'])
elif modelName == 'mobilenetV2-cust':
path = './quantLab/ImageNet/log/exp05/'
model = topo.MobileNetV2Baseline(capacity=1, expansion=6)
if epoch == None: epoch = 200
tmp = torch.load(path + '/save/epoch%04d.ckpt' % epoch, map_location=torch.device('cpu'))
model.load_state_dict(tmp['net'])
assert(model != None)
if returnPath:
return model, loss_func, path
else:
return model, loss_func
def getTensorBoardData(path, traceName):
"""Reads values form Tensorboard log files."""
if not os.path.isfile(path):
pathOpts = glob.glob(path + '/events.out.tfevents.*')
assert(len(pathOpts) == 1)
path = pathOpts[0]
event_acc = EventAccumulator(path)
event_acc.Reload()
# Show all tags in the log file: print(event_acc.Tags())
trace = event_acc.Scalars(traceName)
values = [v.value for v in trace]
steps = [v.step for v in trace]
return steps, values
def getFMs(model, loss_func, training=True, numBatches=1, batchSize=10, computeGrads=False, safetyFactor=0.75):
# CREATE DATASET LOADERS
import quantLab.quantlab.ImageNet.preprocess as pp
datasetTrain, datasetVal, _ = pp.load_datasets('./ilsvrc12/', augment=False)
if training:
dataset = datasetTrain
model.train()
else:
dataset = datasetVal
model.eval()
dataLoader = torch.utils.data.DataLoader(dataset, batch_size=batchSize, shuffle=False)
# SELECT MODULES
msReLU = list(filter(lambda m: type(m) == torch.nn.modules.ReLU or type(m) == torch.nn.modules.ReLU6, model.modules()))
msConv = list(filter(lambda m: type(m) == torch.nn.modules.Conv2d, model.modules()))
msBN = list(filter(lambda m: type(m) == torch.nn.modules.BatchNorm2d, model.modules()))
#register hooks to get intermediate outputs:
def setupFwdHooks(modules):
outputs = []
def hook(module, input, output):
outputs.append(output.detach().contiguous().clone())
for i, m in enumerate(modules):
m.register_forward_hook(hook)
return outputs
#register hooks to get gradient maps:
def setupGradHooks(modules):
grads = []
def hook(module, gradInput, gradOutput):
assert(len(gradInput) == 1)
grads.insert(0, gradInput[0].contiguous().clone())
for i, m in enumerate(modules):
m.register_backward_hook(hook)
return grads
outputsReLU = setupFwdHooks(msReLU)
outputsConv = setupFwdHooks(msConv)
outputsBN = setupFwdHooks(msBN)
gradsReLU = setupGradHooks(msReLU)
# PASS IMAGES THROUGH NETWORK
outputSetsMaxMulti, gradSetsMaxMulti = [], []
outputSets, gradSets = [outputsReLU, outputsConv, outputsBN], [gradsReLU]
dataIterator = iter(dataLoader)
for _ in range(numBatches):
for outputs in outputSets:
outputs.clear()
for grads in gradSets:
grads.clear()
(image, target) = next(dataIterator)
if training:
model.train()
outp = model(image)
if computeGrads:
loss = loss_func(outp, target)
loss.backward()
else:
model.eval()
outp = model(image)
tmp = [[outp.max().item() for outp in outputs]
for outputs in outputSets]
outputSetsMaxMulti.append(tmp)
if computeGrads:
tmp = [[grad.max().item() for grad in grads]
for grads in gradSets]
gradSetsMaxMulti.append(tmp)
outputSetsMax = [np.array([om2[i] for om2 in outputSetsMaxMulti]).max(axis=0) for i in range(len(outputSets))]
if computeGrads:
gradSetsMax = [np.array([om2[i] for om2 in gradSetsMaxMulti]).max(axis=0) for i in range(len(gradSets))]
# NORMALIZE
for outputs, outputsMax in zip(outputSets, outputSetsMax):
for op, opmax in zip(outputs,outputsMax):
op.mul_(safetyFactor/opmax)
if computeGrads:
for grads, gradsMax in zip(gradSets, gradSetsMax):
for op, opmax in zip(grads,gradsMax):
op.mul_(safetyFactor/opmax)
else:
gradsReLU = []
return outputsReLU, outputsConv, outputsBN, gradsReLU | 5,473 | 33.64557 | 123 | py |
l2hmc | l2hmc-master/baseline_vae.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VAE baseline following Kingma et al. 2013
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time, sys, string
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from utils.func_utils import accept, jacobian, autocovariance, get_log_likelihood, get_data, binarize, normal_kl
from utils.distributions import Gaussian, GMM, GaussianFunnel, gen_ring
from utils.layers import Linear, Parallel, Sequential, Zip, ScaleTanh
from utils.dynamics import Dynamics
from tensorflow.examples.tutorials.mnist import input_data
def get_data():
mnist = input_data.read_data_sets("MNIST_data/", validation_size=0)
train_data = mnist.train.next_batch(60000, shuffle=False)[0]
test_data = mnist.test.next_batch(10000, shuffle=False)[0]
return train_data, test_data
def binarize_and_shuffle(x):
N = x.shape[0]
float_x_train = x[np.random.permutation(N), :]
x_train = binarize(float_x_train)
return x_train
def var_from_scope(scope_name):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name)
def loss_func(x, Lx, px):
v1 = tf.reduce_sum(tf.square(x - Lx), axis=1) * px + 1e-4
scale = 1.0
sampler_loss = 0.
sampler_loss += scale * (tf.reduce_mean(1.0 / v1))
sampler_loss += (- tf.reduce_mean(v1)) / scale
return sampler_loss
def tf_accept(x, Lx, px):
mask = (px - tf.random_uniform(tf.shape(px)) >= 0.)
return tf.where(mask, Lx, x)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('hparams', '', 'Comma sep list of name=value')
DEFAULT_HPARAMS = tf.contrib.training.HParams(
learning_rate=0.001,
epoch=300,
optimizer='adam',
batch_size=512,
latent_dim=50,
eval_samples_every=5,
)
OPTIMIZERS = {
'adam': tf.train.AdamOptimizer,
'rmsprop': tf.train.RMSPropOptimizer,
'nesterov': tf.train.MomentumOptimizer,
'sgd': tf.train.GradientDescentOptimizer,
}
def main(_):
hps = DEFAULT_HPARAMS
print(FLAGS.hparams)
hps.parse(FLAGS.hparams)
# hack for logdir
hps_values = hps.values()
del(hps_values['epoch'])
train_folder = string.join(
[
str(k)+'='+str(hps_values[k])
for k in hps_values
],
',',
)
logdir = 'logs/baseline/%s' % train_folder
print('Saving logs to %s' % logdir)
float_x_train, float_x_test = get_data()
N = float_x_train.shape[0]
with tf.variable_scope('encoder'):
encoder = Sequential([
Linear(784, 1024, scope='encoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='encoder_2'),
tf.nn.softplus,
Parallel([
Linear(1024, hps.latent_dim, scope='encoder_mean'),
Linear(1024, hps.latent_dim, scope='encoder_std'),
])
])
with tf.variable_scope('decoder'):
decoder = Sequential([
Linear(hps.latent_dim, 1024, scope='decoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='decoder_2'),
tf.nn.softplus,
Linear(1024, 784, scope='decoder_3', factor=0.01)
])
# Setting up the VAE
inp = tf.placeholder(tf.float32, shape=(None, 784))
mu, log_sigma = encoder(inp)
noise = tf.random_normal(tf.shape(mu))
latent_q = mu + noise * tf.exp(log_sigma)
logits = decoder(latent_q)
kl = normal_kl(mu, tf.exp(log_sigma), 0., 1.)
bce = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=inp, logits=logits), axis=1)
elbo = tf.reduce_mean(kl+bce)
opt = tf.train.AdamOptimizer(hps.learning_rate)
tf.summary.scalar('elbo', elbo)
loss_summaries = tf.summary.merge_all()
elbo_train_op = opt.minimize(elbo)
z_eval = tf.random_normal((64, 50))
x_eval = tf.nn.sigmoid(decoder(z_eval))
samples_summary = tf.summary.image(
'samples',
tf.reshape(x_eval, (-1, 28, 28, 1)),
64,
)
time0 = time.time()
batch_per_epoch = N / hps.batch_size
saver = tf.train.Saver()
writer = tf.summary.FileWriter(logdir)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
counter = 0
for e in range(hps.epoch):
x_train = binarize_and_shuffle(float_x_train)
for t in range(batch_per_epoch):
start = t * hps.batch_size
end = start + hps.batch_size
batch = x_train[start:end, :]
fetches = [
elbo, loss_summaries, elbo_train_op
]
fetched = sess.run(fetches, {inp: batch})
if t % 50 == 0:
print '%d/%d::ELBO: %.2e::Time: %.2e' \
% (t, batch_per_epoch, fetched[0], time.time()-time0)
time0 = time.time()
writer.add_summary(fetched[1], global_step=counter)
counter += 1
if e % hps.eval_samples_every == 0:
saver.save(sess, '%s/model.ckpt' % logdir)
samples_summary_ = sess.run(samples_summary)
writer.add_summary(samples_summary_, global_step=(e / hps.eval_samples_every))
if __name__ == '__main__':
tf.app.run(main)
| 5,886 | 27.57767 | 112 | py |
l2hmc | l2hmc-master/mnist_vae.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Train a decoder-based model using L2HMC (or HMC) as a posterior sampler
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time, sys, string, os
import tensorflow as tf
import numpy as np
from utils.func_utils import accept, jacobian, autocovariance, get_log_likelihood, get_data,\
var_from_scope, binarize, normal_kl, binarize_and_shuffle
from utils.distributions import Gaussian, GMM, GaussianFunnel, gen_ring
from utils.layers import Linear, Parallel, Sequential, Zip, ScaleTanh
from utils.dynamics import Dynamics
from utils.sampler import propose, tf_accept, chain_operator
from utils.losses import get_loss, loss_mixed
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('hparams', '', 'Comma sep list of name=value')
tf.app.flags.DEFINE_string('exp_id', '', 'exp_id')
DEFAULT_HPARAMS = tf.contrib.training.HParams(
learning_rate=0.001,
epoch=100,
leapfrogs=5,
MH=5,
optimizer='adam',
batch_size=512,
latent_dim=50,
update_sampler_every=1,
eval_samples_every=1,
random_lf_composition=0,
stop_gradient=False,
hmc=False,
eps=0.1,
energy_scale=0.,
)
# hardcode the loss
LOSS = 'mixed'
OPTIMIZERS = {
'adam': tf.train.AdamOptimizer,
'rmsprop': tf.train.RMSPropOptimizer,
'nesterov': tf.train.MomentumOptimizer,
'sgd': tf.train.GradientDescentOptimizer,
}
def main(_):
hps = DEFAULT_HPARAMS
print(FLAGS.hparams)
hps.parse(FLAGS.hparams)
# hack for logdir
hps_values = hps.values()
del(hps_values['epoch'])
del(hps_values['eval_samples_every'])
train_folder = string.join(
[
str(k)+'='+str(hps_values[k])
for k in hps_values
],
',',
)
logdir = 'logs/%s/%s' % (FLAGS.exp_id, train_folder)
print('Saving logs to %s' % logdir)
float_x_train, float_x_test = get_data()
N = float_x_train.shape[0]
with tf.variable_scope('encoder'):
encoder = Sequential([
Linear(784, 1024, scope='encoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='encoder_2'),
tf.nn.softplus,
Parallel([
Linear(1024, hps.latent_dim, scope='encoder_mean'),
Linear(1024, hps.latent_dim, scope='encoder_std'),
])
])
with tf.variable_scope('decoder'):
decoder = Sequential([
Linear(hps.latent_dim, 1024, scope='decoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='decoder_2'),
tf.nn.softplus,
Linear(1024, 784, scope='decoder_3', factor=0.01)
])
# Setting up the VAE
inp = tf.placeholder(tf.float32, shape=(None, 784))
mu, log_sigma = encoder(inp)
noise = tf.random_normal(tf.shape(mu))
latent_q = mu + noise * tf.exp(log_sigma)
logits = decoder(latent_q)
# Setting up sampler
def energy(z, aux=None):
logits = decoder(z)
log_posterior = -tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=aux, logits=logits), axis=1)
log_prior = -0.5 * tf.reduce_sum(tf.square(z), axis=1)
return (-log_posterior - log_prior)
energy_stop_grad = lambda z, aux=None: energy(tf.stop_gradient(z), aux=None)
sampler_loss = 0.
with tf.variable_scope('sampler'):
size1 = 200
size2 = 200
encoder_sampler = Sequential([
Linear(784, 512, scope='encoder_1'),
tf.nn.softplus,
Linear(512, 512, scope='encoder_2'),
tf.nn.softplus,
Linear(512, size1, scope='encoder_3'),
])
def net_factory(x_dim, scope, factor):
with tf.variable_scope(scope):
net = Sequential([
Zip([
Linear(hps.latent_dim, size1, scope='embed_1', factor=0.33),
Linear(hps.latent_dim, size1, scope='embed_2', factor=factor * 0.33),
Linear(2, size1, scope='embed_3', factor=0.33),
encoder_sampler,
]),
sum,
tf.nn.relu,
Linear(size1, size2, scope='linear_1'),
tf.nn.relu,
Parallel([
Sequential([
Linear(size2, hps.latent_dim, scope='linear_s', factor=0.01),
ScaleTanh(hps.latent_dim, scope='scale_s')
]),
Linear(size2, hps.latent_dim, scope='linear_t', factor=0.01),
Sequential([
Linear(size2, hps.latent_dim, scope='linear_f', factor=0.01),
ScaleTanh(hps.latent_dim, scope='scale_f'),
])
])
])
return net
dynamics = Dynamics(
hps.latent_dim,
energy,
T=hps.leapfrogs,
eps=hps.eps,
hmc=hps.hmc,
net_factory=net_factory,
eps_trainable=True,
use_temperature=False,
)
init_x = tf.stop_gradient(latent_q)
init_v = tf.random_normal(tf.shape(init_x))
for t in range(hps.MH):
inverse_term = 0.
other_term = 0.
energy_loss = 0.
if hps.stop_gradient:
init_x = tf.stop_gradient(init_x)
if hps.random_lf_composition > 0:
nb_steps = tf.random_uniform((), minval=1, maxval=hps.random_lf_composition, dtype=tf.int32)
final_x, _, px, MH = chain_operator(init_x, dynamics, nb_steps, aux=inp, do_mh_step=True)
energy_loss = 0.
else:
inverse_term = 0.
other_term = 0.
final_x, _, px, MH = propose(init_x, dynamics, aux=inp, do_mh_step=True)
#sampler_loss += 1.0 / hps.MH * loss_mixed(latent, Lx, px, scale=tf.stop_gradient(tf.exp(log_sigma)))
# distance
v = tf.square(final_x - init_x) / (tf.stop_gradient(tf.exp(2 * log_sigma)) + 1e-4)
v = tf.reduce_sum(v, 1) * px + 1e-4
# energy
energy_diff = tf.square(energy(final_x, aux=inp) - energy(init_x, aux=inp)) * px + 1e-4
inverse_term += 1.0 / hps.MH * tf.reduce_mean(1.0 / v)
other_term -= 1.0 / hps.MH * tf.reduce_mean(v)
energy_loss += 1.0 / hps.MH * (tf.reduce_mean(1.0 / energy_diff) - tf.reduce_mean(energy_diff))
init_x = MH[0]
latent_T = init_x
sampler_loss = inverse_term + other_term + hps.energy_scale * energy_loss
logits_T = decoder(tf.stop_gradient(latent_T))
partition = tf.constant(np.sqrt((2 * np.pi) ** hps.latent_dim), dtype=tf.float32)
prior_probs = tf.log(partition) + \
0.5 * tf.reduce_sum(tf.square(tf.stop_gradient(latent_T)), axis=1)
posterior_probs = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=inp, logits=logits_T), axis=1)
likelihood = tf.reduce_mean(prior_probs+posterior_probs, axis=0)
kl = normal_kl(mu, tf.exp(log_sigma), 0., 1.)
bce = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=inp, logits=logits), axis=1)
elbo = tf.check_numerics(tf.reduce_mean(kl+bce), 'elbo NaN')
batch_per_epoch = N / hps.batch_size
# Setting up train ops
global_step = tf.Variable(0., trainable=False)
# learning_rate = tf.train.exponential_decay(
# hps.learning_rate,
# global_step,
# 750,
# 0.96,
# staircase=True
# )
learning_rate = tf.train.piecewise_constant(global_step, [batch_per_epoch * 500.], [1e-3, 1e-4])
opt_sampler = tf.train.AdamOptimizer(learning_rate)
opt = tf.train.AdamOptimizer(learning_rate)
elbo_train_op = opt.minimize(elbo, var_list=var_from_scope('encoder'))
if not hps.hmc:
gradients, variables = zip(*opt_sampler.compute_gradients(sampler_loss, var_list=var_from_scope('sampler')))
gradients, global_norm = tf.clip_by_global_norm(gradients, 5.0)
sampler_train_op = opt_sampler.apply_gradients(zip(gradients, variables))
# sampler_train_op = opt_sampler.minimize(sampler_loss, var_list=var_from_scope('sampler'), global_step=global_step)
else:
sampler_train_op = tf.no_op()
decoder_train_op = opt.minimize(likelihood, var_list=var_from_scope('decoder'), global_step=global_step)
# if not hps.hmc:
# tf.summary.scalar('sampler_grad_norm', global_norm)
tf.summary.scalar('inverse_term', inverse_term)
tf.summary.scalar('other_term', other_term)
tf.summary.scalar('energy_loss', energy_loss)
tf.summary.scalar('sampler_loss', sampler_loss)
tf.summary.scalar('log_prob', likelihood)
tf.summary.scalar('elbo', elbo)
tf.summary.scalar('p_accept', tf.reduce_mean(px))
loss_summaries = tf.summary.merge_all()
# For sample generation
z_eval = tf.placeholder(tf.float32, shape=(None, hps.latent_dim))
x_eval = tf.nn.sigmoid(decoder(z_eval))
samples_summary = tf.summary.image(
'samples',
tf.reshape(x_eval, (-1, 28, 28, 1)),
64,
)
saver = tf.train.Saver()
writer = tf.summary.FileWriter(logdir)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
counter = 0
# For graph restore
tf.add_to_collection('inp', inp)
tf.add_to_collection('latent_q', latent_q)
tf.add_to_collection('latent_T', latent_T)
tf.add_to_collection('logits_T', logits_T)
tf.add_to_collection('z_eval', z_eval)
tf.add_to_collection('x_eval', x_eval)
time0 = time.time()
for e in range(hps.epoch):
x_train = binarize_and_shuffle(float_x_train)
for t in range(batch_per_epoch):
start = t * hps.batch_size
end = start + hps.batch_size
batch = x_train[start:end, :]
fetches = [
elbo, sampler_loss, likelihood, loss_summaries, \
global_step, elbo_train_op, decoder_train_op, learning_rate
]
if t % hps.update_sampler_every == 0:
fetches += [sampler_train_op]
fetched = sess.run(fetches, {inp: batch})
if t % 50 == 0:
print 'Step:%d::%d/%d::ELBO: %.3e::Loss sampler: %.3e:: Log prob: %.3e:: Lr: %g:: Time: %.2e' \
% (fetched[4], t, batch_per_epoch, fetched[0], fetched[1], fetched[2], fetched[-2], time.time()-time0)
time0 = time.time()
writer.add_summary(fetched[3], global_step=counter)
counter += 1
if e % hps.eval_samples_every == 0:
saver.save(sess, '%s/model.ckpt' % logdir)
samples_summary_ = sess.run(samples_summary, {z_eval: np.random.randn(64, hps.latent_dim)})
writer.add_summary(samples_summary_, global_step=(e / hps.eval_samples_every))
for AS in [64, 256, 1024, 4096, 8192]:
cmd = 'python eval_vae.py --path "%s/" --split %s --anneal_steps %d'
print 'Train fold evaluation. AS steps: %d' % AS
os.system(cmd % (logdir, 'train', AS))
print 'Test fold evaluation. AS steps: %d' % AS
os.system(cmd % (logdir, 'test', AS))
print 'Sampler eval'
os.system('python eval_sampler.py --path "%s"' % logdir)
if __name__ == '__main__':
tf.app.run(main)
| 12,163 | 33.556818 | 124 | py |
l2hmc | l2hmc-master/eval_vae.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluates decoder using ais
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time, argparse
import tensorflow as tf
import numpy as np
from utils.layers import Sequential, Linear
from utils.distributions import Gaussian
from utils.ais import ais_estimate
from utils.func_utils import get_data, binarize
from tensorflow.examples.tutorials.mnist import input_data
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str)
parser.add_argument('--leapfrogs', default=10, type=int)
parser.add_argument('--anneal_steps', default=100, type=int)
parser.add_argument('--split', default='test', type=str)
parser.add_argument('--latent_dim', default=50, type=int)
args = parser.parse_args()
with tf.variable_scope('decoder'):
decoder = Sequential([
Linear(args.latent_dim, 1024, scope='decoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='decoder_2'),
tf.nn.softplus,
Linear(1024, 784, scope='decoder_3', factor=0.01)
])
inp = tf.placeholder(tf.float32, shape=(None, 784))
z = tf.random_normal((tf.shape(inp)[0], args.latent_dim))
gaussian = Gaussian(np.zeros((args.latent_dim,)), np.eye(args.latent_dim))
init_energy = gaussian.get_energy_function()
def final_energy(z, aux=None):
logits = decoder(z)
log_posterior = -tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=aux, logits=logits), axis=1)
log_prior = -0.5 * tf.reduce_sum(tf.square(z), axis=1)
return -log_posterior - log_prior
p_x_hat = ais_estimate(init_energy, final_energy, args.anneal_steps, z, x_dim=args.latent_dim, aux=inp, leapfrogs=args.leapfrogs, step_size=0.1, num_splits=50,) #refresh=True, refreshment=0.1)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(save_path=args.path+'model.ckpt', sess=sess)
_, float_x_test = get_data()
x_test = np.load(args.split+'.npy') # Fixed binarization of MNIST
N = x_test.shape[0]
est_log_p = 0.
time0 = time.time()
num_splits = 50
for i in xrange(0, N, num_splits):
ais_batch = x_test[i:i+num_splits]
print ais_batch.shape
ais_batch = ais_batch[:, np.newaxis, :] + np.zeros([1, 20, 1]).astype('float32')
ais_batch = np.reshape(ais_batch, [-1, 784])
print ais_batch.shape
if i > 0:
print '%d / %d in %.2e seconds, est=%.2f' % (i, N, time.time() - time0, est_log_p / i)
print fetched[0]
time0 = time.time()
single = x_test[i, :]
tiled = np.tile(single, (20, 1))
fetched = sess.run(p_x_hat, {inp: ais_batch})
est_log_p += fetched[0]
print fetched[1]
print(est_log_p / N)
with open(args.path+args.split+'_ll.txt', 'a') as f:
f.write(str(est_log_p / N)+'\n')
| 3,278 | 31.465347 | 192 | py |
l2hmc | l2hmc-master/eval_sampler.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given a trained decoder and sampler returns figure of auto-covariance
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from utils.layers import Sequential, Zip, Parallel, Linear, ScaleTanh
from utils.dynamics import Dynamics
from utils.func_utils import get_data, binarize, tf_accept, autocovariance
from utils.sampler import propose, chain_operator
parser = argparse.ArgumentParser()
parser.add_argument('--exp_id', default='09-24', type=str)
parser.add_argument('--leapfrogs', default=5, type=int)
parser.add_argument('--latent_dim', default=50, type=int)
parser.add_argument('--MH', default=5, type=int)
parser.add_argument('--eps', default=0.01, type=float)
parser.add_argument('--path', type=str)
args = parser.parse_args()
# First load the graph and grab the mask
logdir = 'logs/%s/optimizer=adam,learning_rate=0.001,latent_dim=%d,eps=%g,MH=%d,batch_size=512,update_sampler_every=1,leapfrogs=%d,hmc=False' \
% (args.exp_id, args.latent_dim, args.eps, args.MH, args.leapfrogs)
path = '%s/model.ckpt' % args.path
with tf.gfile.Open(path+'.meta'):
tf.reset_default_graph()
tf.train.import_meta_graph(path+'.meta')
mask = tf.get_default_graph().get_tensor_by_name('sampler/Const_%d:0' % 1)
with tf.Session() as sess:
mask = sess.run(mask)
tf.reset_default_graph()
# set up model variables
with tf.variable_scope('encoder'):
encoder = Sequential([
Linear(784, 1024, scope='encoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='encoder_2'),
tf.nn.softplus,
Parallel([
Linear(1024, 50, scope='encoder_mean'),
Linear(1024, 50, scope='encoder_std'),
])
])
with tf.variable_scope('decoder'):
decoder = Sequential([
Linear(50, 1024, scope='decoder_1'),
tf.nn.softplus,
Linear(1024, 1024, scope='decoder_2'),
tf.nn.softplus,
Linear(1024, 784, scope='decoder_3', factor=0.01)
])
# Setting up the VAE
inp = tf.placeholder(tf.float32, shape=(None, 784))
mu, log_sigma = encoder(inp)
noise = tf.random_normal(tf.shape(mu))
latent_q = mu + noise * tf.exp(log_sigma)
logits = decoder(latent_q)
# Setting up sampler
def energy(z, aux=None):
logits = decoder(z)
log_posterior = -tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=aux, logits=logits), axis=1)
log_prior = -0.5 * tf.reduce_sum(tf.square(z), axis=1)
return -log_posterior - log_prior
with tf.variable_scope('sampler'):
size1 = 200
size2 = 200
encoder_sampler = Sequential([
Linear(784, 512, scope='encoder_1'),
tf.nn.softplus,
Linear(512, 512, scope='encoder_2'),
tf.nn.softplus,
Linear(512, size1, scope='encoder_3'),
])
def net_factory(x_dim, scope, factor):
with tf.variable_scope(scope):
net = Sequential([
Zip([
Linear(50, size1, scope='embed_1', factor=0.33),
Linear(50, size1, scope='embed_2', factor=factor * 0.33),
Linear(2, size1, scope='embed_3', factor=0.33),
encoder_sampler,
]),
sum,
tf.nn.relu,
Linear(size1, size2, scope='linear_1'),
tf.nn.relu,
Parallel([
Sequential([
Linear(size2, 50, scope='linear_s', factor=0.01),
ScaleTanh(50, scope='scale_s')
]),
Linear(size2, 50, scope='linear_t', factor=0.01),
Sequential([
Linear(size2, 50, scope='linear_f', factor=0.01),
ScaleTanh(50, scope='scale_f'),
])
])
])
return net
dynamics = Dynamics(
args.latent_dim,
energy,
T=args.leapfrogs,
eps=0.1,
hmc=False,
net_factory=net_factory,
eps_trainable=True,
use_temperature=False,
)
dynamics.mask = tf.constant(mask, tf.float32)
# CS placeholders
z_start = tf.placeholder(tf.float32, shape=(None, 50))
# _, _, _, MH = propose(z_start, dynamics, do_mh_step=True, aux=inp)
nb_steps = tf.random_uniform((), minval=1, maxval=4, dtype=tf.int32)
_, _, _, MH = chain_operator(z_start, dynamics, nb_steps, do_mh_step=True, aux=inp)
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(save_path=path, sess=sess)
# pull MNIST
train, test = get_data()
x_train = binarize(train)
x_0 = np.tile(x_train[456, :][None, :], (200, 1))
init_chain = sess.run(latent_q, {inp: x_0})
list_samples = []
samples = np.copy(init_chain)
for t in range(2000):
list_samples.append(np.copy(samples))
samples = sess.run(MH[0], {inp: x_0, z_start: samples})
F = np.array(list_samples)
mu = F[1000:, :, :].mean(axis=(0, 1))
for eps in np.arange(0.05, 0.2, 0.025):
hmc_dynamics = Dynamics(
50,
energy,
T=args.leapfrogs,
eps=eps,
hmc=True,
)
z_start_hmc = tf.placeholder(tf.float32, shape=(None, 50))
_, _, _, MH_HMC = propose(z_start_hmc, hmc_dynamics, do_mh_step=True, aux=inp)
hmc_samples = []
samples = np.copy(init_chain)
for t in range(2000):
hmc_samples.append(np.copy(samples))
samples = sess.run(MH_HMC[0], {inp: x_0, z_start_hmc: samples})
G = np.array(hmc_samples[1000:])
print G.shape
plt.plot(np.abs([autocovariance(G - mu, tau=t) for t in range(199)]), label='$\epsilon=%.2f$' % eps)
plt.plot(np.abs([autocovariance(F[1000:, :, :] - mu, tau=t) for t in range(199)]), label='CS')
plt.xlabel('# MH steps')
plt.ylabel('Autocovariance')
plt.legend()
plt.savefig('%s/sampler_eval.png' % args.path)
| 6,509 | 30 | 143 | py |
l2hmc | l2hmc-master/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 40.214286 | 74 | py |
l2hmc | l2hmc-master/utils/notebook_utils.py | import tensorflow as tf
import numpy as np
from dynamics import Dynamics
from sampler import propose
import matplotlib.pyplot as plt
def plot_grid(S, width=8):
sheet_width = width
plt.figure(figsize=(12, 12))
for i in xrange(S.shape[0]):
plt.subplot(sheet_width, sheet_width, i + 1)
plt.imshow(S[i], cmap='gray')
plt.grid('off')
plt.axis('off')
def plot_line(S):
sheet_width = S.shape[0]
plt.figure(figsize=(16, 3))
for i in xrange(S.shape[0]):
plt.subplot(1, sheet_width, i + 1)
plt.imshow(S[i], cmap='gray')
plt.grid('off')
plt.axis('off')
def get_hmc_samples(x_dim, eps, energy_function, sess, T=10, steps=200, samples=None):
hmc_dynamics = Dynamics(x_dim, energy_function, T=T, eps=eps, hmc=True)
hmc_x = tf.placeholder(tf.float32, shape=(None, x_dim))
Lx, _, px, hmc_MH = propose(hmc_x, hmc_dynamics, do_mh_step=True)
if samples is None:
samples = gaussian.get_samples(n=200)
final_samples = []
for t in range(steps):
final_samples.append(np.copy(samples))
Lx_, px_, samples = sess.run([Lx, px, hmc_MH[0]], {hmc_x: samples})
return np.array(final_samples)
| 1,248 | 30.225 | 86 | py |
l2hmc | l2hmc-master/utils/distributions.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Distribution object providing TF Energy function, sampling (when possible)
and numpy log-density
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
import numpy as np
from scipy.stats import multivariate_normal, ortho_group
def quadratic_gaussian(x, mu, S):
return tf.diag_part(0.5 * tf.matmul(tf.matmul(x - mu, S), tf.transpose(x - mu)))
def random_tilted_gaussian(dim, log_min=-2., log_max=2.):
mu = np.zeros((dim,))
R = ortho_group.rvs(dim)
sigma = np.diag(np.exp(np.log(10.) * np.random.uniform(log_min, log_max, size=(dim,)))) + 1e-6 * np.eye(dim)
S = R.T.dot(sigma).dot(R)
return Gaussian(mu, S)
class Gaussian(object):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
print(np.linalg.det(self.sigma), self.sigma.dtype)
self.i_sigma = np.linalg.inv(np.copy(sigma))
def get_energy_function(self):
def fn(x, *args, **kwargs):
S = tf.constant(self.i_sigma.astype('float32'))
mu = tf.constant(self.mu.astype('float32'))
return quadratic_gaussian(x, mu, S)
return fn
def get_samples(self, n):
'''
Sampling is broken in numpy for d > 10
'''
C = np.linalg.cholesky(self.sigma)
X = np.random.randn(n, self.sigma.shape[0])
return X.dot(C.T)
def log_density(self, X):
return multivariate_normal(mean=self.mu, cov=self.sigma).logpdf(X)
class TiltedGaussian(Gaussian):
def __init__(self, dim, log_min, log_max):
self.R = ortho_group.rvs(dim)
self.diag = np.diag(np.exp(np.log(10.) * np.random.uniform(log_min, log_max, size=(dim,)))) + 1e-8 * np.eye(dim)
S = self.R.T.dot(self.diag).dot(self.R)
self.dim = dim
Gaussian.__init__(self, np.zeros((dim,)), S)
def get_samples(self, n):
X = np.random.randn(200, self.dim)
X = X.dot(np.sqrt(self.diag))
X = X.dot(self.R)
return X
class RoughWell(object):
def __init__(self, dim, eps, easy=False):
self.dim = dim
self.eps = eps
self.easy = easy
def get_energy_function(self):
def fn(x, *args, **kwargs):
n = tf.reduce_sum(tf.square(x), 1)
if not self.easy:
return 0.5 * n + self.eps * tf.reduce_sum(tf.cos(x / (self.eps * self.eps)), 1)
else:
return 0.5 * n + self.eps * tf.reduce_sum(tf.cos(x / self.eps), 1)
return fn
def get_samples(self, n):
# we can approximate by a gaussian for eps small enough
return np.random.randn(n, self.dim)
class GMM(object):
def __init__(self, mus, sigmas, pis):
assert len(mus) == len(sigmas)
assert sum(pis) == 1.0
self.mus = mus
self.sigmas = sigmas
self.pis = pis
self.nb_mixtures = len(pis)
self.k = mus[0].shape[0]
self.i_sigmas = []
self.constants = []
for i, sigma in enumerate(sigmas):
self.i_sigmas.append(np.linalg.inv(sigma).astype('float32'))
det = np.sqrt((2 * np.pi) ** self.k * np.linalg.det(sigma)).astype('float32')
self.constants.append((pis[i] / det).astype('float32'))
def get_energy_function(self):
def fn(x):
V = tf.concat([
tf.expand_dims(-quadratic_gaussian(x, self.mus[i], self.i_sigmas[i])
+ tf.log(self.constants[i]), 1)
for i in range(self.nb_mixtures)
], axis=1)
return -tf.reduce_logsumexp(V, axis=1)
return fn
def get_samples(self, n):
categorical = np.random.choice(self.nb_mixtures, size=(n,), p=self.pis)
counter_samples = collections.Counter(categorical)
samples = []
for k, v in counter_samples.iteritems():
samples.append(np.random.multivariate_normal(self.mus[k], self.sigmas[k], size=(v,)))
samples = np.concatenate(samples, axis=0)
np.random.shuffle(samples)
return samples
def log_density(self, X):
return np.log(sum([self.pis[i] * multivariate_normal(mean=self.mus[i], cov=self.sigmas[i]).pdf(X) for i in range(self.nb_mixtures)]))
class GaussianFunnel(object):
def __init__(self, dim=2, clip=6.):
self.dim = dim
self.sigma = 2.0
self.clip = 4 * self.sigma
def get_energy_function(self):
print('getting energy fn')
def fn(x):
v = x[:, 0]
log_p_v = tf.square(v / self.sigma)
s = tf.exp(v)
sum_sq = tf.reduce_sum(tf.square(x[:, 1:]), axis=1)
n = tf.cast(tf.shape(x)[1] - 1, tf.float32)
E = 0.5 * (log_p_v + sum_sq / s + n * tf.log(2.0 * np.pi * s))
s_min = tf.exp(-self.clip)
s_max = tf.exp(self.clip)
E_safe1 = 0.5 * (log_p_v + sum_sq / s_max + n * tf.log(2.0 * np.pi * s_max))
E_safe2 = 0.5 * (log_p_v + sum_sq / s_min + n * tf.log(2.0 * np.pi * s_min))
E_safe = tf.minimum(E_safe1, E_safe2)
E_ = tf.where(tf.greater(v, self.clip), E_safe1, E)
E_ = tf.where(tf.greater(-self.clip, v), E_safe2, E_)
return E_
return fn
def get_samples(self, n):
samples = np.zeros((n, self.dim))
for t in range(n):
v = self.sigma * np.random.randn()
s = np.exp(v / 2)
samples[t, 0] = v
samples[t, 1:] = s * np.random.randn(self.dim-1)
return samples
def log_density(self, x):
v = x[:, 0]
log_p_v = np.square(v / self.sigma)
s = np.exp(v)
sum_sq = np.square(x[:, 1:]).sum(axis=1)
n = tf.shape(x)[1] - 1
return 0.5 * (log_p_v + sum_sq / s + (n / 2) * tf.log(2 * tf.pi * s))
def gen_ring(r=1.0, var=1.0, nb_mixtures=2):
base_points = []
for t in range(nb_mixtures):
c = np.cos(2 * np.pi * t / nb_mixtures)
s = np.sin(2 * np.pi * t / nb_mixtures)
base_points.append(np.array([r * c, r * s]))
v = np.array(base_points)
sigmas = [var * np.eye(2) for t in range(nb_mixtures)]
pis = [1. / nb_mixtures] * nb_mixtures
pis[0] += 1-sum(pis)
return GMM(base_points, sigmas, pis)
| 6,397 | 28.897196 | 137 | py |
l2hmc | l2hmc-master/utils/losses.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of losses
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
def get_loss(name):
assoc = {
'mixed': loss_mixed,
'standard': loss_std,
'inverse': loss_inverse,
'logsumexp': loss_logsumexp,
}
return assoc[name]
def loss_vec(x, X, p):
return tf.multiply(tf.reduce_sum(tf.square(X - x), axis=1), p) + 1e-4
def loss_logsumexp(x, X, p):
v = loss_vec(x, X, p)
dN = tf.cast(tf.shape(v)[0], tf.float32)
return tf.reduce_logsumexp(-v) - tf.log(dN)
def loss_inverse(x, X, p):
v = loss_vec(x, X, p)
return -1.0 / tf.reduce_mean(1.0 / (v + 1e-4))
def loss_std(x, X, p):
v = loss_vec(x, X, p)
return - tf.reduce_mean(v, axis=0)
def loss_mixed(x, Lx, px, scale=1.0):
v1 = loss_vec(x, Lx, px)
v1 /= scale
sampler_loss = 0.
sampler_loss += (tf.reduce_mean(1.0 / v1))
sampler_loss += (- tf.reduce_mean(v1))
return sampler_loss
| 1,566 | 25.116667 | 74 | py |
l2hmc | l2hmc-master/utils/sampler.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sampling functions given dynamics and placeholders
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
TF_FLOAT = tf.float32
def propose(x, dynamics, init_v=None, aux=None, do_mh_step=False, log_jac=False):
if dynamics.hmc:
Lx, Lv, px = dynamics.forward(x, init_v=init_v, aux=aux)
return Lx, Lv, px, [tf_accept(x, Lx, px)]
else:
# sample mask for forward/backward
mask = tf.cast(tf.random_uniform((tf.shape(x)[0], 1), maxval=2, dtype=tf.int32), TF_FLOAT)
Lx1, Lv1, px1 = dynamics.forward(x, aux=aux, log_jac=log_jac)
Lx2, Lv2, px2 = dynamics.backward(x, aux=aux, log_jac=log_jac)
Lx = mask * Lx1 + (1 - mask) * Lx2
Lv = None
if init_v is not None:
Lv = mask * Lv1 + (1 - mask) * Lv2
px = tf.squeeze(mask, axis=1) * px1 + tf.squeeze(1 - mask, axis=1) * px2
outputs = []
if do_mh_step:
outputs.append(tf_accept(x, Lx, px))
return Lx, Lv, px, outputs
def tf_accept(x, Lx, px):
mask = (px - tf.random_uniform(tf.shape(px)) >= 0.)
return tf.where(mask, Lx, x)
def chain_operator(init_x, dynamics, nb_steps, aux=None, init_v=None, do_mh_step=False):
if not init_v:
init_v = tf.random_normal(tf.shape(init_x))
def cond(latent, v, log_jac, t):
return tf.less(t, tf.cast(nb_steps, tf.float32))
def body(x, v, log_jac, t):
Lx, Lv, px, _ = propose(x, dynamics, init_v=v, aux=aux, log_jac=True, do_mh_step=False)
return Lx, Lv, log_jac+px, t+1
final_x, final_v, log_jac, _ = tf.while_loop(
cond=cond,
body=body,
loop_vars=[
init_x,
init_v,
tf.zeros((tf.shape(init_x)[0],)),
tf.constant(0.),
]
)
p_accept = dynamics.p_accept(init_x, init_v, final_x, final_v, log_jac, aux=aux)
outputs = []
if do_mh_step:
outputs.append(tf_accept(init_x, final_x, p_accept))
return final_x, final_v, p_accept, outputs
| 2,684 | 29.862069 | 95 | py |
l2hmc | l2hmc-master/utils/ais.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AIS implementation following Wu et al. 2016
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from dynamics import Dynamics
from func_utils import tf_accept
def ais_estimate(
init_energy,
final_energy,
anneal_steps,
initial_x,
aux=None,
step_size=0.5,
leapfrogs=25,
x_dim=5,
num_splits=1,
refresh=False,
refreshment=0.1
):
beta = tf.linspace(0., 1., anneal_steps+1)[1:]
beta_diff = beta[1] - beta[0]
refreshment = tf.constant(refreshment)
def body(a, beta):
def curr_energy(z, aux=None):
return (1-beta) * init_energy(z) + (beta) * final_energy(z, aux=aux)
last_x = a[1]
w = a[2]
v = a[3]
if refresh:
refreshed_v = v * tf.sqrt(1-refreshment) + tf.random_normal(tf.shape(v)) * tf.sqrt(refreshment)
else:
refreshed_v = tf.random_normal(tf.shape(v))
w = w + beta_diff * (- final_energy(last_x, aux=aux) \
+ init_energy(last_x, aux=aux))
dynamics = Dynamics(x_dim, energy_function=curr_energy, eps=step_size, hmc=True, T=leapfrogs)
Lx, Lv, px = dynamics.forward(last_x, aux=aux, init_v=refreshed_v)
mask = (px - tf.random_uniform(tf.shape(px)) >= 0.)
updated_x = tf.where(mask, Lx, last_x)
updated_v = tf.where(mask, Lv, -Lv)
return (px, updated_x, w, updated_v)
alpha, x, w, _ = tf.scan(body, beta,
(
tf.zeros_like(initial_x[:, 0]),
initial_x,
tf.zeros_like(initial_x[:, 0]),
tf.random_normal(tf.shape(initial_x))
)
)
logmeanexp = lambda z: tf.reduce_logsumexp(z) - tf.log(tf.cast(tf.shape(z)[0], tf.float32))
if num_splits == 1:
return logmeanexp(w[-1]), tf.reduce_mean(alpha)
list_w = tf.split(w[-1], num_splits, axis=0)
return tf.reduce_sum(tf.stack(map(logmeanexp, list_w), axis=0), 0), tf.reduce_mean(alpha)
| 2,843 | 33.26506 | 107 | py |
l2hmc | l2hmc-master/utils/layers.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of useful layers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
TF_FLOAT = tf.float32
NP_FLOAT = np.float32
class Linear(object):
def __init__(self, in_, out_, scope='linear', factor=1.0):
with tf.variable_scope(scope):
initializer = tf.contrib.layers.variance_scaling_initializer(factor=factor * 2.0, mode='FAN_IN', uniform=False, dtype=TF_FLOAT)
self.W = tf.get_variable('W', shape=(in_, out_), initializer=initializer)
self.b = tf.get_variable('b', shape=(out_,), initializer=tf.constant_initializer(0., dtype=TF_FLOAT))
def __call__(self, x):
return tf.add(tf.matmul(x, self.W), self.b)
class ConcatLinear(object):
def __init__(self, ins_, out_, factors=None, scope='concat_linear'):
self.layers = []
with tf.variable_scope(scope):
for i, in_ in enumerate(ins_):
if factors is None:
factor = 1.0
else:
factor = factors[i]
self.layers.append(Linear(in_, out_, scope='linear_%d' % i, factor=factor))
def __call__(self, inputs):
output = 0.
for i, x in enumerate(inputs):
output += self.layers[i](x)
return output
class Parallel(object):
def __init__(self, layers=[]):
self.layers = layers
def add(self, layer):
self.layers.append(layer)
def __call__(self, x):
return [layer(x) for layer in self.layers]
class Sequential(object):
def __init__(self, layers = []):
self.layers = layers
def add(self, layer):
self.layers.append(layer)
def __call__(self, x):
y = x
for layer in self.layers:
y = layer(y)
return y
class ScaleTanh(object):
def __init__(self, in_, scope='scale_tanh'):
with tf.variable_scope(scope):
self.scale = tf.exp(tf.get_variable('scale', shape=(1, in_), initializer=tf.constant_initializer(0., dtype=TF_FLOAT)))
def __call__(self, x):
return self.scale * tf.nn.tanh(x)
class Zip(object):
def __init__(self, layers=[]):
self.layers = layers
def __call__(self, x):
assert len(x) == len(self.layers)
n = len(self.layers)
return [self.layers[i](x[i]) for i in range(n)]
| 2,826 | 28.14433 | 133 | py |
l2hmc | l2hmc-master/utils/config.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration to switch to float64
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
TF_FLOAT = tf.float32
NP_FLOAT = np.float32 | 818 | 29.333333 | 74 | py |
l2hmc | l2hmc-master/utils/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 40.214286 | 74 | py |
l2hmc | l2hmc-master/utils/func_utils.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Useful auxiliary functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from scipy.stats import multivariate_normal
from tensorflow.examples.tutorials.mnist import input_data
def prout():
return 3
def accept(x_i, x_p, p):
assert x_i.shape == x_p.shape
dN, dX = x_i.shape
u = np.random.uniform(size=(dN,))
m = (p - u >= 0).astype('int32')[:, None]
return x_i * (1 - m) + x_p * m
def autocovariance(X, tau=0):
dT, dN, dX = np.shape(X)
s = 0.
for t in range(dT - tau):
x1 = X[t, :, :]
x2 = X[t+tau, :, :]
s += np.sum(x1 * x2) / dN
return s / (dT - tau)
def jacobian(x, fx):
return tf.transpose(tf.stack([tf.gradients(component, x)[0][0] for component in tf.unstack(fx[0])]))
def get_log_likelihood(X, gaussian):
m = multivariate_normal(mean=gaussian.mu, cov=gaussian.sigma)
return m.logpdf(X).mean()
def get_data():
mnist = input_data.read_data_sets("MNIST_data/", validation_size=0)
train_data = mnist.train.next_batch(60000, shuffle=False)[0]
test_data = mnist.test.next_batch(10000, shuffle=False)[0]
return train_data, test_data
def binarize(x):
assert(x.max() <= 1.)
return (np.random.random(x.shape) < x).astype(np.float32)
def tf_accept(x, Lx, px):
mask = (px - tf.random_uniform(tf.shape(px)) >= 0.)
return tf.where(mask, Lx, x)
def normal_kl(q_means, q_stddevs, p_means, p_stddevs):
'''Returns the KL divergence between two normal distributions q and p.
KLs are summed over the inner dimension.
Args:
`q_means`: Means of q.
`q_stddevs`: Standard deviations of q.
`p_means`: Means of p.
`p_stddevs`: Standard deviations of p.
'''
# The log(2*pi) terms cancel, so no need to compute them.
q_entropy = 0.5 + tf.log(q_stddevs)
# E_q[(z - p_means)**2] = (q_means - p_means)**2 + q_stddevs**2
q_p_cross_entropy = 0.5 * tf.square(q_stddevs / p_stddevs)
q_p_cross_entropy += 0.5 * tf.square((q_means - p_means) / p_stddevs)
q_p_cross_entropy += tf.log(p_stddevs)
q_p_kl = tf.reduce_sum(-q_entropy + q_p_cross_entropy, -1)
return q_p_kl
def binarize_and_shuffle(x):
''' Given a numpy array, returns a shuffled binarization
Args:
'x': numpy array
'''
N = x.shape[0]
float_x_train = x[np.random.permutation(N), :]
x_train = binarize(float_x_train)
return x_train
def var_from_scope(scope_name):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name)
def acl_spectrum(X, scale):
n = X.shape[0]
return np.array([autocovariance(X / scale, tau=t) for t in range(n-1)])
def ESS(A):
A = A * (A > 0.05)
return 1. / (1. + 2 * np.sum(A[1:]))
| 3,310 | 26.363636 | 102 | py |
l2hmc | l2hmc-master/utils/dynamics.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dynamics object
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
# from config import TF_FLOAT, NP_FLOAT
TF_FLOAT = tf.float32
NP_FLOAT = np.float32
def safe_exp(x, name=None):
return tf.exp(x)
return tf.check_numerics(tf.exp(x), message='%s is NaN' % name)
class Dynamics(object):
def __init__(self,
x_dim,
energy_function,
T=25,
eps=0.1,
hmc=False,
net_factory=None,
eps_trainable=True,
use_temperature=False):
self.x_dim = x_dim
self.use_temperature = use_temperature
self.temperature = tf.placeholder(TF_FLOAT, shape=())
if not hmc:
alpha = tf.get_variable(
'alpha',
initializer=tf.log(tf.constant(eps)),
trainable=eps_trainable,
)
else:
alpha = tf.log(tf.constant(eps, dtype=TF_FLOAT))
self.eps = safe_exp(alpha, name='alpha')
self._fn = energy_function
self.T = T
self.hmc = hmc
self._init_mask()
# m = np.zeros((x_dim,))
# m[np.arange(0, x_dim, 2)] = 1
# mb = 1 - m
# self.m = tf.constant(m, dtype=tf.float32)
# self.mb = tf.constant(mb, dtype=tf.float32)
# if HMC we just return all zeros
if hmc:
z = lambda x, *args, **kwargs: tf.zeros_like(x)
self.XNet = lambda inp: [tf.zeros_like(inp[0]) for t in range(3)]
self.VNet = lambda inp: [tf.zeros_like(inp[0]) for t in range(3)]
else:
self.XNet = net_factory(x_dim, scope='XNet', factor=2.0)
self.VNet = net_factory(x_dim, scope='VNet', factor=1.0)
# self.Sv, self.Tv, self.Fv = self.VNet.S, self.VNet.T, self.VNet.F
# self.Sx, self.Tx, self.Fx = self.XNet.S, self.XNet.T, self.XNet.F
def _init_mask(self):
mask_per_step = []
for t in range(self.T):
ind = np.random.permutation(np.arange(self.x_dim))[:int(self.x_dim / 2)]
m = np.zeros((self.x_dim,))
m[ind] = 1
mask_per_step.append(m)
self.mask = tf.constant(np.stack(mask_per_step), dtype=TF_FLOAT)
def _get_mask(self, step):
m = tf.gather(self.mask, tf.cast(step, dtype=tf.int32))
return m, 1.-m
def _format_time(self, t, tile=1):
trig_t = tf.squeeze([
tf.cos(2 * np.pi * t / self.T),
tf.sin(2 * np.pi * t / self.T),
])
return tf.tile(tf.expand_dims(trig_t, 0), (tile, 1))
def kinetic(self, v):
return 0.5 * tf.reduce_sum(tf.square(v), axis=1)
def clip_with_grad(self, u, min_u=-32., max_u=32.):
u = u - tf.stop_gradient(tf.nn.relu(u - max_u))
u = u + tf.stop_gradient(tf.nn.relu(min_u - u))
return u
def _forward_step(self, x, v, step, aux=None):
t = self._format_time(step, tile=tf.shape(x)[0])
grad1 = self.grad_energy(x, aux=aux)
S1 = self.VNet([x, grad1, t, aux])
sv1 = 0.5 * self.eps * S1[0]
tv1 = S1[1]
fv1 = self.eps * S1[2]
v_h = tf.multiply(v, safe_exp(sv1, name='sv1F')) + 0.5 * self.eps * (-tf.multiply(safe_exp(fv1, name='fv1F'), grad1) + tv1)
m, mb = self._get_mask(step)
# m, mb = self._gen_mask(x)
X1 = self.XNet([v_h, m * x, t, aux])
sx1 = (self.eps * X1[0])
tx1 = X1[1]
fx1 = self.eps * X1[2]
y = m * x + mb * (tf.multiply(x, safe_exp(sx1, name='sx1F')) + self.eps * (tf.multiply(safe_exp(fx1, name='fx1F'), v_h) + tx1))
X2 = self.XNet([v_h, mb * y, t, aux])
sx2 = (self.eps * X2[0])
tx2 = X2[1]
fx2 = self.eps * X2[2]
x_o = mb * y + m * (tf.multiply(y, safe_exp(sx2, name='sx2F')) + self.eps * (tf.multiply(safe_exp(fx2, name='fx2F'), v_h) + tx2))
S2 = self.VNet([x_o, self.grad_energy(x_o, aux=aux), t, aux])
sv2 = (0.5 * self.eps * S2[0])
tv2 = S2[1]
fv2 = self.eps * S2[2]
grad2 = self.grad_energy(x_o, aux=aux)
v_o = tf.multiply(v_h, safe_exp(sv2, name='sv2F')) + 0.5 * self.eps * (-tf.multiply(safe_exp(fv2, name='fv2F'), grad2) + tv2)
log_jac_contrib = tf.reduce_sum(sv1 + sv2 + mb * sx1 + m * sx2, axis=1)
return x_o, v_o, log_jac_contrib
def _backward_step(self, x_o, v_o, step, aux=None):
t = self._format_time(step, tile=tf.shape(x_o)[0])
grad1 = self.grad_energy(x_o, aux=aux)
S1 = self.VNet([x_o, grad1, t, aux])
sv2 = (-0.5 * self.eps * S1[0])
tv2 = S1[1]
fv2 = self.eps * S1[2]
v_h = tf.multiply((v_o - 0.5 * self.eps * (-tf.multiply(safe_exp(fv2, name='fv2B'), grad1) + tv2)), safe_exp(sv2, name='sv2B'))
m, mb = self._get_mask(step)
# m, mb = self._gen_mask(x_o)
X1 = self.XNet([v_h, mb * x_o, t, aux])
sx2 = (-self.eps * X1[0])
tx2 = X1[1]
fx2 = self.eps * X1[2]
y = mb * x_o + m * tf.multiply(safe_exp(sx2, name='sx2B'), (x_o - self.eps * (tf.multiply(safe_exp(fx2, name='fx2B'), v_h) + tx2)))
X2 = self.XNet([v_h, m * y, t, aux])
sx1 = (-self.eps * X2[0])
tx1 = X2[1]
fx1 = self.eps * X2[2]
x = m * y + mb * tf.multiply(safe_exp(sx1, name='sx1B'), (y - self.eps * (tf.multiply(safe_exp(fx1, name='fx1B'), v_h) + tx1)))
grad2 = self.grad_energy(x, aux=aux)
S2 = self.VNet([x, grad2, t, aux])
sv1 = (-0.5 * self.eps * S2[0])
tv1 = S2[1]
fv1 = self.eps * S2[2]
v = tf.multiply(safe_exp(sv1, name='sv1B'), (v_h - 0.5 * self.eps * (-tf.multiply(safe_exp(fv1, name='fv1B'), grad2) + tv1)))
return x, v, tf.reduce_sum(sv1 + sv2 + mb * sx1 + m * sx2, axis=1)
def energy(self, x, aux=None):
if self.use_temperature:
T = self.temperature
else:
T = tf.constant(1.0, dtype=TF_FLOAT)
if aux is not None:
return self._fn(x, aux=aux) / T
else:
return self._fn(x) / T
def hamiltonian(self, x, v, aux=None):
return self.energy(x, aux=aux) + self.kinetic(v)
def grad_energy(self, x, aux=None):
return tf.gradients(self.energy(x, aux=aux), x)[0]
def _gen_mask(self, x):
dX = x.get_shape().as_list()[1]
b = np.zeros(self.x_dim)
for i in range(self.x_dim):
if i % 2 == 0:
b[i] = 1
b = b.astype('bool')
nb = np.logical_not(b)
return b.astype(NP_FLOAT), nb.astype(NP_FLOAT)
#
# def forward(self, x, init_v=None):
# if init_v is None:
# v = tf.random_normal(tf.shape(x))
# else:
# v = init_v
#
# dN = tf.shape(x)[0]
# j = tf.zeros((dN,))
# curr_x, curr_v = x, v
# for t in range(self.T):
# curr_x, curr_v, log_j = self._forward_step(curr_x, curr_v, t)
# j += log_j
#
# return curr_x, curr_v, self.p_accept(x, v, curr_x, curr_v, j)
def forward(self, x, init_v=None, aux=None, log_path=False, log_jac=False):
if init_v is None:
v = tf.random_normal(tf.shape(x))
else:
v = init_v
dN = tf.shape(x)[0]
t = tf.constant(0., dtype=TF_FLOAT)
j = tf.zeros((dN,))
def body(x, v, t, j):
new_x, new_v, log_j = self._forward_step(x, v, t, aux=aux)
return new_x, new_v, t+1, j+log_j
def cond(x, v, t, j):
return tf.less(t, self.T)
X, V, t, log_jac_ = tf.while_loop(
cond=cond,
body=body,
loop_vars=[x, v, t, j]
)
if log_jac:
return X, V, log_jac_
return X, V, self.p_accept(x, v, X, V, log_jac_, aux=aux)
def backward(self, x, init_v=None, aux=None, log_jac=False):
if init_v is None:
v = tf.random_normal(tf.shape(x))
else:
v = init_v
dN = tf.shape(x)[0]
t = tf.constant(0., name='step_backward', dtype=TF_FLOAT)
j = tf.zeros((dN,), name='acc_jac_backward')
def body(x, v, t, j):
new_x, new_v, log_j = self._backward_step(x, v, self.T - t - 1, aux=aux)
return new_x, new_v, t+1, j+log_j
def cond(x, v, t, j):
return tf.less(t, self.T)
X, V, t, log_jac_ = tf.while_loop(
cond=cond,
body=body,
loop_vars=[x, v, t, j]
)
if log_jac:
return X, V, log_jac_
return X, V, self.p_accept(x, v, X, V, log_jac_, aux=aux)
def p_accept(self, x0, v0, x1, v1, log_jac, aux=None):
e_new = self.hamiltonian(x1, v1, aux=aux)
e_old = self.hamiltonian(x0, v0, aux=aux)
v = e_old - e_new + log_jac
p = tf.exp(tf.minimum(v, 0.0))
return tf.where(tf.is_finite(p), p, tf.zeros_like(p))
| 8,936 | 27.736334 | 135 | py |
blp | blp-master/utils.py | import torch
import logging
import models
def get_model(model, dim, rel_model, loss_fn, num_entities, num_relations,
encoder_name, regularizer):
if model == 'blp':
return models.BertEmbeddingsLP(dim, rel_model, loss_fn, num_relations,
encoder_name, regularizer)
elif model == 'bert-bow':
return models.BOW(rel_model, loss_fn, num_relations, regularizer,
encoder_name=encoder_name)
elif model == 'bert-dkrl':
return models.DKRL(dim, rel_model, loss_fn, num_relations, regularizer,
encoder_name=encoder_name)
elif model == 'glove-bow':
return models.BOW(rel_model, loss_fn, num_relations, regularizer,
embeddings='data/glove/glove.6B.300d.pt')
elif model == 'glove-dkrl':
return models.DKRL(dim, rel_model, loss_fn, num_relations, regularizer,
embeddings='data/glove/glove.6B.300d.pt')
elif model == 'transductive':
return models.TransductiveLinkPrediction(dim, rel_model, loss_fn,
num_entities, num_relations,
regularizer)
else:
raise ValueError(f'Unkown model {model}')
def make_ent2idx(entities, max_ent_id):
"""Given a tensor with entity IDs, return a tensor indexed with
an entity ID, containing the position of the entity.
Empty positions are filled with -1.
Example:
> make_ent2idx(torch.tensor([4, 5, 0]))
tensor([ 2, -1, -1, -1, 0, 1])
"""
idx = torch.arange(entities.shape[0])
ent2idx = torch.empty(max_ent_id + 1, dtype=torch.long).fill_(-1)
ent2idx.scatter_(0, entities, idx)
return ent2idx
def get_triple_filters(triples, graph, num_ents, ent2idx):
"""Given a set of triples, filter candidate entities that are valid
substitutes of an entity in the triple at a given position (head or tail).
For a particular triple, this allows to compute rankings for an entity of
interest, against other entities in the graph that would actually be wrong
substitutes.
Results are returned as a mask array with a value of 1.0 for filtered
entities, and 0.0 otherwise.
Args:
triples: Bx3 tensor of type torch.long, where B is the batch size,
and each row contains a triple of the form (head, tail, rel)
graph: nx.MultiDiGraph containing all edges used to filter candidates
num_ents: int, number of candidate entities
ent2idx: tensor, contains at index ent_id the index of the column for
that entity in the output mask array
"""
num_triples = triples.shape[0]
heads_filter = torch.zeros((num_triples, num_ents), dtype=torch.bool)
tails_filter = torch.zeros_like(heads_filter)
triples = triples.tolist()
for i, (head, tail, rel) in enumerate(triples):
head_edges = graph.out_edges(head, data='weight')
for (h, t, r) in head_edges:
if r == rel and t != tail:
ent_idx = ent2idx[t]
if ent_idx != -1:
tails_filter[i, ent_idx] = True
tail_edges = graph.in_edges(tail, data='weight')
for (h, t, r) in tail_edges:
if r == rel and h != head:
ent_idx = ent2idx[h]
if ent_idx != -1:
heads_filter[i, ent_idx] = True
return heads_filter, tails_filter
def get_metrics(pred_scores: torch.Tensor,
true_idx: torch.Tensor,
k_values: torch.Tensor):
"""Calculates mean number of hits@k. Higher values are ranked first.
Args:
pred_scores: (B, N) tensor of prediction values where B is batch size
and N number of classes.
ground_truth_idx: (B, 1) tensor with index of ground truth class
k_values: (1, k) tensor containing number of top-k results to be
considered as hits.
Returns:
reciprocals: (B, 1) tensor containing reciprocals of the ranks
hits: (B, k) tensor containing the number of hits for each value of k
"""
# Based on PyKEEN's implementation
true_scores = pred_scores.gather(dim=1, index=true_idx)
best_rank = (pred_scores > true_scores).sum(dim=1, keepdim=True) + 1
worst_rank = (pred_scores >= true_scores).sum(dim=1, keepdim=True)
average_rank = (best_rank + worst_rank).float() * 0.5
reciprocals = average_rank.reciprocal()
hits = average_rank <= k_values
return reciprocals, hits
def split_by_new_position(triples, mrr_values, new_entities):
"""Split MRR results by the position of new entity. Use to break down
results for a triple where a new entity is at the head and the tail,
at the head only, or the tail only.
Since MRR is calculated by corrupting the head first, and then the head,
the size of mrr_values should be twice the size of triples. The calculated
MRR is then the average of the two cases.
Args:
triples: Bx3 tensor containing (head, tail, rel).
mrr_values: 2B tensor, with first half containing MRR for corrupted
triples at the head position, and second half at the tail position.
new_entities: set, entities to be considered as new.
Returns:
mrr_by_position: tensor of 3 elements breaking down MRR by new entities
at both positions, at head, and tail.
mrr_pos_counts: tensor of 3 elements containing counts for each case.
"""
mrr_by_position = torch.zeros(3, device=mrr_values.device)
mrr_pos_counts = torch.zeros_like(mrr_by_position)
num_triples = triples.shape[0]
for i, (h, t, r) in enumerate(triples):
head, tail = h.item(), t.item()
mrr_val = (mrr_values[i] + mrr_values[i + num_triples]).item() / 2.0
if head in new_entities and tail in new_entities:
mrr_by_position[0] += mrr_val
mrr_pos_counts[0] += 1.0
elif head in new_entities:
mrr_by_position[1] += mrr_val
mrr_pos_counts[1] += 1.0
elif tail in new_entities:
mrr_by_position[2] += mrr_val
mrr_pos_counts[2] += 1.0
return mrr_by_position, mrr_pos_counts
def split_by_category(triples, mrr_values, rel_categories):
mrr_by_category = torch.zeros([2, 4], device=mrr_values.device)
mrr_cat_count = torch.zeros([1, 4], dtype=torch.float,
device=mrr_by_category.device)
num_triples = triples.shape[0]
for i, (h, t, r) in enumerate(triples):
rel_category = rel_categories[r]
mrr_val_head_pred = mrr_values[i]
mrr_by_category[0, rel_category] += mrr_val_head_pred
mrr_val_tail_pred = mrr_values[i + num_triples]
mrr_by_category[1, rel_category] += mrr_val_tail_pred
mrr_cat_count[0, rel_category] += 1
return mrr_by_category, mrr_cat_count
def get_logger():
"""Get a default logger that includes a timestamp."""
logger = logging.getLogger("")
logger.handlers = []
ch = logging.StreamHandler()
str_fmt = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
formatter = logging.Formatter(str_fmt, datefmt='%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('INFO')
return logger
| 7,375 | 39.306011 | 79 | py |
blp | blp-master/data.py | import os.path as osp
import torch
from torch.utils.data import Dataset
import transformers
import string
import nltk
from tqdm import tqdm
from nltk.corpus import stopwords
import logging
UNK = '[UNK]'
nltk.download('stopwords')
nltk.download('punkt')
STOP_WORDS = stopwords.words('english')
DROPPED = STOP_WORDS + list(string.punctuation)
CATEGORY_IDS = {'1-to-1': 0, '1-to-many': 1, 'many-to-1': 2, 'many-to-many': 3}
def file_to_ids(file_path):
"""Read one line per file and assign it an ID.
Args:
file_path: str, path of file to read
Returns: dict, mapping str to ID (int)
"""
str2id = dict()
with open(file_path) as file:
for i, line in enumerate(file):
str2id[line.strip()] = i
return str2id
def get_negative_sampling_indices(batch_size, num_negatives, repeats=1):
""""Obtain indices for negative sampling within a batch of entity pairs.
Indices are sampled from a reshaped array of indices. For example,
if there are 4 pairs (batch_size=4), the array of indices is
[[0, 1],
[2, 3],
[4, 5],
[6, 7]]
From this array, we corrupt either the first or second element of each row.
This yields one negative sample.
For example, if the positions with a dash are selected,
[[0, -],
[-, 3],
[4, -],
[-, 7]]
they are then replaced with a random index from a row other than the row
to which they belong:
[[0, 3],
[5, 3],
[4, 6],
[1, 7]]
The returned array has shape (batch_size, num_negatives, 2).
"""
num_ents = batch_size * 2
idx = torch.arange(num_ents).reshape(batch_size, 2)
# For each row, sample entities, assigning 0 probability to entities
# of the same row
zeros = torch.zeros(batch_size, 2)
head_weights = torch.ones(batch_size, num_ents, dtype=torch.float)
head_weights.scatter_(1, idx, zeros)
random_idx = head_weights.multinomial(num_negatives * repeats,
replacement=True)
random_idx = random_idx.t().flatten()
# Select randomly the first or the second column
row_selector = torch.arange(batch_size * num_negatives * repeats)
col_selector = torch.randint(0, 2, [batch_size * num_negatives * repeats])
# Fill the array of negative samples with the sampled random entities
# at the right positions
neg_idx = idx.repeat((num_negatives * repeats, 1))
neg_idx[row_selector, col_selector] = random_idx
neg_idx = neg_idx.reshape(-1, batch_size * repeats, 2)
neg_idx.transpose_(0, 1)
return neg_idx
class GraphDataset(Dataset):
"""A Dataset storing the triples of a Knowledge Graph.
Args:
triples_file: str, path to the file containing triples. This is a
text file where each line contains a triple of the form
'subject predicate object'
write_maps_file: bool, if set to True, dictionaries mapping
entities and relations to IDs are saved to disk (for reuse with
other datasets).
"""
def __init__(self, triples_file, neg_samples=None, write_maps_file=False,
num_devices=1):
directory = osp.dirname(triples_file)
maps_path = osp.join(directory, 'maps.pt')
# Create or load maps from entity and relation strings to unique IDs
if not write_maps_file:
if not osp.exists(maps_path):
raise ValueError('Maps file not found.')
maps = torch.load(maps_path)
ent_ids, rel_ids = maps['ent_ids'], maps['rel_ids']
else:
ents_file = osp.join(directory, 'entities.txt')
rels_file = osp.join(directory, 'relations.txt')
ent_ids = file_to_ids(ents_file)
rel_ids = file_to_ids(rels_file)
entities = set()
relations = set()
# Read triples and store as ints in tensor
file = open(triples_file)
triples = []
for i, line in enumerate(file):
values = line.split()
# FB13 and WN11 have duplicate triples for classification,
# here we keep the correct triple
if len(values) > 3 and values[3] == '-1':
continue
head, rel, tail = line.split()[:3]
entities.update([head, tail])
relations.add(rel)
triples.append([ent_ids[head], ent_ids[tail], rel_ids[rel]])
self.triples = torch.tensor(triples, dtype=torch.long)
self.rel_categories = torch.zeros(len(rel_ids), dtype=torch.long)
rel_categories_file = osp.join(directory, 'relations-cat.txt')
self.has_rel_categories = False
if osp.exists(rel_categories_file):
with open(rel_categories_file) as f:
for line in f:
rel, cat = line.strip().split()
self.rel_categories[rel_ids[rel]] = CATEGORY_IDS[cat]
self.has_rel_categories = True
# Save maps for reuse
torch.save({'ent_ids': ent_ids, 'rel_ids': rel_ids}, maps_path)
self.num_ents = len(entities)
self.num_rels = len(relations)
self.entities = torch.tensor([ent_ids[ent] for ent in entities])
self.num_triples = self.triples.shape[0]
self.directory = directory
self.maps_path = maps_path
self.neg_samples = neg_samples
self.num_devices = num_devices
def __getitem__(self, index):
return self.triples[index]
def __len__(self):
return self.num_triples
def collate_fn(self, data_list):
"""Given a batch of triples, return it together with a batch of
corrupted triples where either the subject or object are replaced
by a random entity. Use as a collate_fn for a DataLoader.
"""
batch_size = len(data_list)
pos_pairs, rels = torch.stack(data_list).split(2, dim=1)
neg_idx = get_negative_sampling_indices(batch_size, self.neg_samples)
return pos_pairs, rels, neg_idx
class TextGraphDataset(GraphDataset):
"""A dataset storing a graph, and textual descriptions of its entities.
Args:
triples_file: str, path to the file containing triples. This is a
text file where each line contains a triple of the form
'subject predicate object'
max_len: int, maximum number of tokens to read per description.
neg_samples: int, number of negative samples to get per triple
tokenizer: transformers.PreTrainedTokenizer or GloVeTokenizer, used
to tokenize the text.
drop_stopwords: bool, if set to True, punctuation and stopwords are
dropped from entity descriptions.
write_maps_file: bool, if set to True, dictionaries mapping
entities and relations to IDs are saved to disk (for reuse with
other datasets).
drop_stopwords: bool
"""
def __init__(self, triples_file, neg_samples, max_len, tokenizer,
drop_stopwords, write_maps_file=False, use_cached_text=False,
num_devices=1):
super().__init__(triples_file, neg_samples, write_maps_file,
num_devices)
maps = torch.load(self.maps_path)
ent_ids = maps['ent_ids']
if max_len is None:
max_len = tokenizer.max_len
cached_text_path = osp.join(self.directory, 'text_data.pt')
need_to_load_text = True
if use_cached_text:
logger = logging.getLogger()
if osp.exists(cached_text_path):
self.text_data = torch.load(cached_text_path)
logger.info(f'Loaded cached text data for'
f' {self.text_data.shape[0]} entities,'
f' and maximum length {self.text_data.shape[1]}.')
need_to_load_text = False
else:
logger.info(f'Cached text data not found.')
if need_to_load_text:
self.text_data = torch.zeros((len(ent_ids), max_len + 1),
dtype=torch.long)
read_entities = set()
progress = tqdm(desc='Reading entity descriptions',
total=len(ent_ids), mininterval=5)
for text_file in ('entity2textlong.txt', 'entity2text.txt'):
file_path = osp.join(self.directory, text_file)
if not osp.exists(file_path):
continue
with open(file_path) as f:
for line in f:
values = line.strip().split('\t')
entity = values[0]
text = ' '.join(values[1:])
if entity not in ent_ids:
continue
if entity in read_entities:
continue
read_entities.add(entity)
ent_id = ent_ids[entity]
if drop_stopwords:
tokens = nltk.word_tokenize(text)
text = ' '.join([t for t in tokens if
t.lower() not in DROPPED])
text_tokens = tokenizer.encode(text,
max_length=max_len,
return_tensors='pt')
text_len = text_tokens.shape[1]
# Starting slice of row contains token IDs
self.text_data[ent_id, :text_len] = text_tokens
# Last cell contains sequence length
self.text_data[ent_id, -1] = text_len
progress.update()
progress.close()
if len(read_entities) != len(ent_ids):
raise ValueError(f'Read {len(read_entities):,} descriptions,'
f' but {len(ent_ids):,} were expected.')
if self.text_data[:, -1].min().item() < 1:
raise ValueError(f'Some entries in text_data contain'
f' length-0 descriptions.')
torch.save(self.text_data,
osp.join(self.directory, 'text_data.pt'))
def get_entity_description(self, ent_ids):
"""Get entity descriptions for a tensor of entity IDs."""
text_data = self.text_data[ent_ids]
text_end_idx = text_data.shape[-1] - 1
# Separate tokens from lengths
text_tok, text_len = text_data.split(text_end_idx, dim=-1)
max_batch_len = text_len.max()
# Truncate batch
text_tok = text_tok[..., :max_batch_len]
text_mask = (text_tok > 0).float()
return text_tok, text_mask, text_len
def collate_fn(self, data_list):
"""Given a batch of triples, return it in the form of
entity descriptions, and the relation types between them.
Use as a collate_fn for a DataLoader.
"""
batch_size = len(data_list) // self.num_devices
if batch_size <= 1:
raise ValueError('collate_text can only work with batch sizes'
' larger than 1.')
pos_pairs, rels = torch.stack(data_list).split(2, dim=1)
text_tok, text_mask, text_len = self.get_entity_description(pos_pairs)
neg_idx = get_negative_sampling_indices(batch_size, self.neg_samples,
repeats=self.num_devices)
return text_tok, text_mask, rels, neg_idx
class GloVeTokenizer:
def __init__(self, vocab_dict_file, uncased=True):
self.word2idx = torch.load(vocab_dict_file)
self.uncased = uncased
def encode(self, text, max_length, return_tensors):
if self.uncased:
text = text.lower()
tokens = nltk.word_tokenize(text)
encoded = [self.word2idx.get(t, self.word2idx[UNK]) for t in tokens]
encoded = [encoded[:max_length]]
if return_tensors:
encoded = torch.tensor(encoded)
return encoded
def batch_encode_plus(self, batch, max_length, **kwargs):
batch_tokens = []
for text in batch:
tokens = self.encode(text, max_length, return_tensors=False)[0]
if len(tokens) < max_length:
tokens += [0] * (max_length - len(tokens))
batch_tokens.append(tokens)
batch_tokens = torch.tensor(batch_tokens, dtype=torch.long)
batch_masks = (batch_tokens > 0).float()
tokenized_data = {'input_ids': batch_tokens,
'attention_mask': batch_masks}
return tokenized_data
def test_text_graph_dataset():
from torch.utils.data import DataLoader
tok = transformers.AlbertTokenizer.from_pretrained('albert-base-v2')
gtr = TextGraphDataset('data/wikifb15k237/train-triples.txt', max_len=32,
neg_samples=32, tokenizer=tok, drop_stopwords=False)
loader = DataLoader(gtr, batch_size=8, collate_fn=gtr.collate_fn)
data = next(iter(loader))
print('Done')
if __name__ == '__main__':
test_text_graph_dataset()
| 13,290 | 36.866097 | 79 | py |
blp | blp-master/retrieval.py | import os
import os.path as osp
from collections import defaultdict
import torch
import torch.nn.functional as F
from tqdm import tqdm
from transformers import BertTokenizer
from logging import Logger
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.run import Run
import json
import pytrec_eval
import numpy as np
import scipy.stats
import nltk
from data import DROPPED
from data import GloVeTokenizer
import utils
OUT_PATH = 'output/'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ex = Experiment()
ex.logger = utils.get_logger()
# Set up database logs
uri = os.environ.get('DB_URI')
database = os.environ.get('DB_NAME')
if all([uri, database]):
ex.observers.append(MongoObserver(uri, database))
def remove_stopwords(text):
tokens = nltk.word_tokenize(text)
text = ' '.join([t for t in tokens if
t.lower() not in DROPPED])
return text
@ex.config
def config():
dim = 128
model = 'bert-dkrl'
rel_model = 'transe'
max_len = 64
emb_batch_size = 512
checkpoint = 'output/model-348.pt'
run_file = 'data/DBpedia-Entity/runs/v2/bm25f-ca_v2.run'
queries_file = 'data/DBpedia-Entity/collection/v2/queries-v2_stopped.txt'
descriptions_file = 'data/DBpedia-Entity/runs/v2/' \
'bm25f-ca_v2-descriptions.txt'
qrels_file = 'data/DBpedia-Entity/collection/v2/qrels-v2.txt'
folds_file = 'data/DBpedia-Entity/collection/v2/folds/all_queries.json'
@ex.capture
def embed_entities(dim, model, rel_model, max_len, emb_batch_size, checkpoint,
run_file, descriptions_file, drop_stopwords, _log: Logger):
def encode_batch(batch):
tokenized_data = tokenizer.batch_encode_plus(batch,
max_length=max_len,
pad_to_max_length=True,
return_token_type_ids=False,
return_tensors='pt')
tokens = tokenized_data['input_ids'].to(device)
masks = tokenized_data['attention_mask'].float().to(device)
return encoder.encode(tokens.to(device), masks.to(device))
if model.startswith('bert') or model == 'blp':
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
else:
tokenizer = GloVeTokenizer('data/glove/glove.6B.300d-maps.pt')
encoder = utils.get_model(model, dim, rel_model,
encoder_name='bert-base-cased',
loss_fn='margin', num_entities=0,
num_relations=1, regularizer=0.0).to(device)
encoder = torch.nn.DataParallel(encoder)
state_dict = torch.load(checkpoint, map_location=device)
# We don't need relation embeddings for this task
state_dict.pop('module.rel_emb.weight', None)
encoder.load_state_dict(state_dict, strict=False)
encoder = encoder.module
for param in encoder.parameters():
param.requires_grad = False
# Encode entity descriptions
run_file_name = osp.splitext(osp.basename(run_file))[0]
get_entity_embeddings = True
qent_checkpoint = osp.join(osp.dirname(checkpoint),
f'{run_file_name}-qent-{osp.basename(checkpoint)}')
if osp.exists(qent_checkpoint):
_log.info(f'Loading entity embeddings from {qent_checkpoint}')
ent_embeddings = torch.load(qent_checkpoint, map_location=device)
get_entity_embeddings = False
else:
ent_embeddings = []
entity2idx = dict()
descriptions_batch = []
progress = tqdm(desc='Encoding entity descriptions',
disable=not get_entity_embeddings)
with open(descriptions_file) as f:
for i, line in enumerate(f):
values = line.strip().split('\t')
entity = values[0]
entity2idx[entity] = i
if get_entity_embeddings:
text = ' '.join(values[1:])
if drop_stopwords:
text = remove_stopwords(text)
descriptions_batch.append(text)
if len(descriptions_batch) == emb_batch_size:
embedding = encode_batch(descriptions_batch)
ent_embeddings.append(embedding)
descriptions_batch = []
progress.update(emb_batch_size)
if get_entity_embeddings:
if len(descriptions_batch) > 0:
embedding = encode_batch(descriptions_batch)
ent_embeddings.append(embedding)
ent_embeddings = torch.cat(ent_embeddings)
torch.save(ent_embeddings, qent_checkpoint)
_log.info(f'Saved entity embeddings to {qent_checkpoint}')
progress.close()
return ent_embeddings, entity2idx, encoder, tokenizer
def rerank_on_fold(fold, qrels, baseline_run, id2query, tokenizer, encoder,
entity2idx, ent_embeddings, alpha, drop_stopwords):
train_run = dict()
qrel_run = dict()
for query_id in fold:
results = baseline_run[query_id]
# Encode query
query = id2query[query_id]
if drop_stopwords:
query = remove_stopwords(query)
query_tokens = tokenizer.encode(query, return_tensors='pt',
max_length=64)
query_embedding = encoder.encode(query_tokens.to(device),
text_mask=None)
# Get embeddings of entities to rerank for this query
ent_ids_to_rerank = []
original_scores = []
selected_results = []
missing_results = []
missing_scores = []
for entity, orig_score in results.items():
if entity in entity2idx:
ent_ids_to_rerank.append(entity2idx[entity])
original_scores.append(orig_score)
selected_results.append(entity)
else:
missing_results.append(entity)
missing_scores.append(orig_score)
candidate_embeddings = ent_embeddings[ent_ids_to_rerank]
candidate_embeddings = F.normalize(candidate_embeddings, dim=-1)
query_embedding = F.normalize(query_embedding, dim=-1)
# Compute relevance
scores = candidate_embeddings @ query_embedding.t()
scores = scores.flatten().cpu().tolist() + [0] * len(missing_scores)
results_scores = zip(selected_results + missing_results,
scores,
original_scores + missing_scores)
results_scores = [[result, alpha * s1 + (1 - alpha) * s2] for
result, s1, s2 in results_scores]
train_run[query_id] = {r: s for r, s in results_scores}
qrel_run[query_id] = qrels[query_id]
evaluator = pytrec_eval.RelevanceEvaluator(qrel_run, {'ndcg_cut_100'})
train_results = evaluator.evaluate(train_run)
mean = np.mean([res['ndcg_cut_100'] for res in train_results.values()])
return mean, train_run
@ex.automain
def rerank(model, rel_model, run_file, queries_file, qrels_file, folds_file,
_run: Run, _log: Logger):
drop_stopwords = model in {'bert-bow', 'bert-dkrl',
'glove-bow', 'glove-dkrl'}
ent_embeddings, entity2idx, encoder, tokenizer = embed_entities(
drop_stopwords=drop_stopwords)
# Read queries
id2query = dict()
with open(queries_file) as f:
for line in f:
values = line.strip().split('\t')
query_id = values[0]
query = ' '.join(values[1:])
id2query[query_id] = query
# Read baseline and ground truth rankings
baseline_run = defaultdict(dict)
qrels = defaultdict(dict)
for query_dict, file in ((baseline_run, run_file),
(qrels, qrels_file)):
with open(file) as f:
for line in f:
values = line.strip().split()
if len(values) >= 6:
query_id, q0, entity, rank, score, *_ = values
score = float(score)
else:
query_id, q0, entity, score = values
score = int(score)
query_dict[query_id][entity] = score
# Read query folds
with open(folds_file) as f:
folds = json.load(f)
# Keep only query type of interest
new_baseline_run = {}
new_qrels = {}
for f in folds.values():
relevant_queries = f['testing']
for query_id in relevant_queries:
new_baseline_run.update({query_id: baseline_run[query_id]})
new_qrels.update({query_id: qrels[query_id]})
baseline_run = new_baseline_run
qrels = new_qrels
# Choose best reranking on training set
alpha_choices = np.linspace(0, 1, 20)
test_run = dict()
for i, (idx, fold) in enumerate(folds.items()):
train_queries = fold['training']
best_result = 0.0
best_alpha = alpha_choices[0]
for alpha in alpha_choices:
result, _ = rerank_on_fold(train_queries, qrels,
baseline_run, id2query, tokenizer,
encoder, entity2idx, ent_embeddings,
alpha, drop_stopwords)
if result > best_result:
best_result = result
best_alpha = alpha
_log.info(f'[Fold {i + 1}/{len(folds)}]'
f' Best training result: {best_result:.3f}'
f' with alpha={best_alpha:.3}')
test_queries = fold['testing']
fold_mean, fold_run = rerank_on_fold(test_queries, qrels,
baseline_run, id2query,
tokenizer, encoder, entity2idx,
ent_embeddings, best_alpha,
drop_stopwords)
_log.info(f'Test fold result: {fold_mean:.3f}')
test_run.update(fold_run)
_log.info(f'Finished hyperparameter search')
_log.info(f'Saving run file')
output_run_path = osp.join(OUT_PATH, f'{_run._id}.run')
with open(output_run_path, 'w') as f:
for query, results in test_run.items():
ranking = sorted(results.items(), key=lambda x: x[1], reverse=True)
for i, (entity, score) in enumerate(ranking):
f.write(
f'{query} Q0 {entity} {i + 1} {score} {model}-{rel_model}\n')
metrics = {'ndcg_cut_10', 'ndcg_cut_100'}
evaluator = pytrec_eval.RelevanceEvaluator(qrels, metrics)
baseline_results = evaluator.evaluate(baseline_run)
# This shouldn't be necessary, but there seems to be a bug that requires
# to instantiate the evaluator again, otherwise only one metric is obtained
# See https://github.com/cvangysel/pytrec_eval/issues/22
evaluator = pytrec_eval.RelevanceEvaluator(qrels, metrics)
test_results = evaluator.evaluate(test_run)
for metric in metrics:
baseline_mean = np.mean(
[res[metric] for res in baseline_results.values()])
test_mean = np.mean([res[metric] for res in test_results.values()])
_log.info(f'Metric: {metric}')
_log.info(f'Baseline result: {baseline_mean:.3f}')
_log.info(f'Test result: {test_mean:.3f}')
first_scores = [baseline_results[query_id][metric] for query_id in
baseline_results]
second_scores = [test_results[query_id][metric] for query_id in
baseline_results]
_log.info(scipy.stats.ttest_rel(first_scores, second_scores))
| 11,801 | 37.194175 | 82 | py |
blp | blp-master/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel
class LinkPrediction(nn.Module):
"""A general link prediction model with a lookup table for relation
embeddings."""
def __init__(self, dim, rel_model, loss_fn, num_relations, regularizer):
super().__init__()
self.dim = dim
self.normalize_embs = False
self.regularizer = regularizer
if rel_model == 'transe':
self.score_fn = transe_score
self.normalize_embs = True
elif rel_model == 'distmult':
self.score_fn = distmult_score
elif rel_model == 'complex':
self.score_fn = complex_score
elif rel_model == 'simple':
self.score_fn = simple_score
else:
raise ValueError(f'Unknown relational model {rel_model}.')
self.rel_emb = nn.Embedding(num_relations, self.dim)
nn.init.xavier_uniform_(self.rel_emb.weight.data)
if loss_fn == 'margin':
self.loss_fn = margin_loss
elif loss_fn == 'nll':
self.loss_fn = nll_loss
else:
raise ValueError(f'Unkown loss function {loss_fn}')
def encode(self, *args, **kwargs):
ent_emb = self._encode_entity(*args, **kwargs)
if self.normalize_embs:
ent_emb = F.normalize(ent_emb, dim=-1)
return ent_emb
def _encode_entity(self, *args, **kwargs):
raise NotImplementedError
def forward(self, *args, **kwargs):
raise NotImplementedError
def compute_loss(self, ent_embs, rels, neg_idx):
batch_size = ent_embs.shape[0]
# Scores for positive samples
rels = self.rel_emb(rels)
heads, tails = torch.chunk(ent_embs, chunks=2, dim=1)
pos_scores = self.score_fn(heads, tails, rels)
if self.regularizer > 0:
reg_loss = self.regularizer * l2_regularization(heads, tails, rels)
else:
reg_loss = 0
# Scores for negative samples
neg_embs = ent_embs.view(batch_size * 2, -1)[neg_idx]
heads, tails = torch.chunk(neg_embs, chunks=2, dim=2)
neg_scores = self.score_fn(heads.squeeze(), tails.squeeze(), rels)
model_loss = self.loss_fn(pos_scores, neg_scores)
return model_loss + reg_loss
class InductiveLinkPrediction(LinkPrediction):
"""Description-based Link Prediction (DLP)."""
def _encode_entity(self, text_tok, text_mask):
raise NotImplementedError
def forward(self, text_tok, text_mask, rels=None, neg_idx=None):
batch_size, _, num_text_tokens = text_tok.shape
# Encode text into an entity representation from its description
ent_embs = self.encode(text_tok.view(-1, num_text_tokens),
text_mask.view(-1, num_text_tokens))
if rels is None and neg_idx is None:
# Forward is being used to compute entity embeddings only
out = ent_embs
else:
# Forward is being used to compute link prediction loss
ent_embs = ent_embs.view(batch_size, 2, -1)
out = self.compute_loss(ent_embs, rels, neg_idx)
return out
class BertEmbeddingsLP(InductiveLinkPrediction):
"""BERT for Link Prediction (BLP)."""
def __init__(self, dim, rel_model, loss_fn, num_relations, encoder_name,
regularizer):
super().__init__(dim, rel_model, loss_fn, num_relations, regularizer)
self.encoder = BertModel.from_pretrained(encoder_name,
output_attentions=False,
output_hidden_states=False)
hidden_size = self.encoder.config.hidden_size
self.enc_linear = nn.Linear(hidden_size, self.dim, bias=False)
def _encode_entity(self, text_tok, text_mask):
# Extract BERT representation of [CLS] token
embs = self.encoder(text_tok, text_mask)[0][:, 0]
embs = self.enc_linear(embs)
return embs
class WordEmbeddingsLP(InductiveLinkPrediction):
"""Description encoder with pretrained embeddings, obtained from BERT or a
specified tensor file.
"""
def __init__(self, rel_model, loss_fn, num_relations, regularizer,
dim=None, encoder_name=None, embeddings=None):
if not encoder_name and not embeddings:
raise ValueError('Must provided one of encoder_name or embeddings')
if encoder_name is not None:
encoder = BertModel.from_pretrained(encoder_name)
embeddings = encoder.embeddings.word_embeddings
else:
emb_tensor = torch.load(embeddings)
num_embeddings, embedding_dim = emb_tensor.shape
embeddings = nn.Embedding(num_embeddings, embedding_dim)
embeddings.weight.data = emb_tensor
if dim is None:
dim = embeddings.embedding_dim
super().__init__(dim, rel_model, loss_fn, num_relations, regularizer)
self.embeddings = embeddings
def _encode_entity(self, text_tok, text_mask):
raise NotImplementedError
class BOW(WordEmbeddingsLP):
"""Bag-of-words (BOW) description encoder, with BERT low-level embeddings.
"""
def _encode_entity(self, text_tok, text_mask=None):
if text_mask is None:
text_mask = torch.ones_like(text_tok, dtype=torch.float)
# Extract average of word embeddings
embs = self.embeddings(text_tok)
lengths = torch.sum(text_mask, dim=-1, keepdim=True)
embs = torch.sum(text_mask.unsqueeze(dim=-1) * embs, dim=1)
embs = embs / lengths
return embs
class DKRL(WordEmbeddingsLP):
"""Description-Embodied Knowledge Representation Learning (DKRL) with CNN
encoder, after
Zuo, Yukun, et al. "Representation learning of knowledge graphs with
entity attributes and multimedia descriptions."
"""
def __init__(self, dim, rel_model, loss_fn, num_relations, regularizer,
encoder_name=None, embeddings=None):
super().__init__(rel_model, loss_fn, num_relations, regularizer,
dim, encoder_name, embeddings)
emb_dim = self.embeddings.embedding_dim
self.conv1 = nn.Conv1d(emb_dim, self.dim, kernel_size=2)
self.conv2 = nn.Conv1d(self.dim, self.dim, kernel_size=2)
def _encode_entity(self, text_tok, text_mask):
if text_mask is None:
text_mask = torch.ones_like(text_tok, dtype=torch.float)
# Extract word embeddings and mask padding
embs = self.embeddings(text_tok) * text_mask.unsqueeze(dim=-1)
# Reshape to (N, C, L)
embs = embs.transpose(1, 2)
text_mask = text_mask.unsqueeze(1)
# Pass through CNN, adding padding for valid convolutions
# and masking outputs due to padding
embs = F.pad(embs, [0, 1])
embs = self.conv1(embs)
embs = embs * text_mask
if embs.shape[2] >= 4:
kernel_size = 4
elif embs.shape[2] == 1:
kernel_size = 1
else:
kernel_size = 2
embs = F.max_pool1d(embs, kernel_size=kernel_size)
text_mask = F.max_pool1d(text_mask, kernel_size=kernel_size)
embs = torch.tanh(embs)
embs = F.pad(embs, [0, 1])
embs = self.conv2(embs)
lengths = torch.sum(text_mask, dim=-1)
embs = torch.sum(embs * text_mask, dim=-1) / lengths
embs = torch.tanh(embs)
return embs
class TransductiveLinkPrediction(LinkPrediction):
def __init__(self, dim, rel_model, loss_fn, num_entities, num_relations,
regularizer):
super().__init__(dim, rel_model, loss_fn, num_relations, regularizer)
self.ent_emb = nn.Embedding(num_entities, dim)
nn.init.xavier_uniform_(self.ent_emb.weight.data)
def _encode_entity(self, entities):
return self.ent_emb(entities)
def forward(self, pos_pairs, rels, neg_idx):
embs = self.encode(pos_pairs)
return self.compute_loss(embs, rels, neg_idx)
def transe_score(heads, tails, rels):
return -torch.norm(heads + rels - tails, dim=-1, p=1)
def distmult_score(heads, tails, rels):
return torch.sum(heads * rels * tails, dim=-1)
def complex_score(heads, tails, rels):
heads_re, heads_im = torch.chunk(heads, chunks=2, dim=-1)
tails_re, tails_im = torch.chunk(tails, chunks=2, dim=-1)
rels_re, rels_im = torch.chunk(rels, chunks=2, dim=-1)
return torch.sum(rels_re * heads_re * tails_re +
rels_re * heads_im * tails_im +
rels_im * heads_re * tails_im -
rels_im * heads_im * tails_re,
dim=-1)
def simple_score(heads, tails, rels):
heads_h, heads_t = torch.chunk(heads, chunks=2, dim=-1)
tails_h, tails_t = torch.chunk(tails, chunks=2, dim=-1)
rel_a, rel_b = torch.chunk(rels, chunks=2, dim=-1)
return torch.sum(heads_h * rel_a * tails_t +
tails_h * rel_b * heads_t, dim=-1) / 2
def margin_loss(pos_scores, neg_scores):
loss = 1 - pos_scores + neg_scores
loss[loss < 0] = 0
return loss.mean()
def nll_loss(pos_scores, neg_scores):
return (F.softplus(-pos_scores).mean() + F.softplus(neg_scores).mean()) / 2
def l2_regularization(heads, tails, rels):
reg_loss = 0.0
for tensor in (heads, tails, rels):
reg_loss += torch.mean(tensor ** 2)
return reg_loss / 3.0
| 9,514 | 34.636704 | 79 | py |
blp | blp-master/train.py | import os
import os.path as osp
import networkx as nx
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from sacred.run import Run
from logging import Logger
from sacred import Experiment
from sacred.observers import MongoObserver
from transformers import BertTokenizer, get_linear_schedule_with_warmup
from collections import defaultdict
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, balanced_accuracy_score
import joblib
from data import CATEGORY_IDS
from data import GraphDataset, TextGraphDataset, GloVeTokenizer
import models
import utils
OUT_PATH = 'output/'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
ex = Experiment()
ex.logger = utils.get_logger()
# Set up database logs
uri = os.environ.get('DB_URI')
database = os.environ.get('DB_NAME')
if all([uri, database]):
ex.observers.append(MongoObserver(uri, database))
@ex.config
def config():
dataset = 'umls'
inductive = True
dim = 128
model = 'blp'
rel_model = 'transe'
loss_fn = 'margin'
encoder_name = 'bert-base-cased'
regularizer = 0
max_len = 32
num_negatives = 64
lr = 2e-5
use_scheduler = True
batch_size = 64
emb_batch_size = 512
eval_batch_size = 64
max_epochs = 40
checkpoint = None
use_cached_text = False
@ex.capture
@torch.no_grad()
def eval_link_prediction(model, triples_loader, text_dataset, entities,
epoch, emb_batch_size, _run: Run, _log: Logger,
prefix='', max_num_batches=None,
filtering_graph=None, new_entities=None,
return_embeddings=False):
compute_filtered = filtering_graph is not None
mrr_by_position = torch.zeros(3, dtype=torch.float).to(device)
mrr_pos_counts = torch.zeros_like(mrr_by_position)
rel_categories = triples_loader.dataset.rel_categories.to(device)
mrr_by_category = torch.zeros([2, 4], dtype=torch.float).to(device)
mrr_cat_count = torch.zeros([1, 4], dtype=torch.float).to(device)
hit_positions = [1, 3, 10]
k_values = torch.tensor([hit_positions], device=device)
hits_at_k = {pos: 0.0 for pos in hit_positions}
mrr = 0.0
mrr_filt = 0.0
hits_at_k_filt = {pos: 0.0 for pos in hit_positions}
if device != torch.device('cpu'):
model = model.module
if isinstance(model, models.InductiveLinkPrediction):
num_entities = entities.shape[0]
if compute_filtered:
max_ent_id = max(filtering_graph.nodes)
else:
max_ent_id = entities.max()
ent2idx = utils.make_ent2idx(entities, max_ent_id)
else:
# Transductive models have a lookup table of embeddings
num_entities = model.ent_emb.num_embeddings
ent2idx = torch.arange(num_entities)
entities = ent2idx
# Create embedding lookup table for evaluation
ent_emb = torch.zeros((num_entities, model.dim), dtype=torch.float,
device=device)
idx = 0
num_iters = np.ceil(num_entities / emb_batch_size)
iters_count = 0
while idx < num_entities:
# Get a batch of entity IDs and encode them
batch_ents = entities[idx:idx + emb_batch_size]
if isinstance(model, models.InductiveLinkPrediction):
# Encode with entity descriptions
data = text_dataset.get_entity_description(batch_ents)
text_tok, text_mask, text_len = data
batch_emb = model(text_tok.unsqueeze(1).to(device),
text_mask.unsqueeze(1).to(device))
else:
# Encode from lookup table
batch_emb = model(batch_ents)
ent_emb[idx:idx + batch_ents.shape[0]] = batch_emb
iters_count += 1
if iters_count % np.ceil(0.2 * num_iters) == 0:
_log.info(f'[{idx + batch_ents.shape[0]:,}/{num_entities:,}]')
idx += emb_batch_size
ent_emb = ent_emb.unsqueeze(0)
num_predictions = 0
_log.info('Computing metrics on set of triples')
total = len(triples_loader) if max_num_batches is None else max_num_batches
for i, triples in enumerate(triples_loader):
if max_num_batches is not None and i == max_num_batches:
break
heads, tails, rels = torch.chunk(triples, chunks=3, dim=1)
# Map entity IDs to positions in ent_emb
heads = ent2idx[heads].to(device)
tails = ent2idx[tails].to(device)
assert heads.min() >= 0
assert tails.min() >= 0
# Embed triple
head_embs = ent_emb.squeeze()[heads]
tail_embs = ent_emb.squeeze()[tails]
rel_embs = model.rel_emb(rels.to(device))
# Score all possible heads and tails
heads_predictions = model.score_fn(ent_emb, tail_embs, rel_embs)
tails_predictions = model.score_fn(head_embs, ent_emb, rel_embs)
pred_ents = torch.cat((heads_predictions, tails_predictions))
true_ents = torch.cat((heads, tails))
num_predictions += pred_ents.shape[0]
reciprocals, hits = utils.get_metrics(pred_ents, true_ents, k_values)
mrr += reciprocals.sum().item()
hits_sum = hits.sum(dim=0)
for j, k in enumerate(hit_positions):
hits_at_k[k] += hits_sum[j].item()
if compute_filtered:
filters = utils.get_triple_filters(triples, filtering_graph,
num_entities, ent2idx)
heads_filter, tails_filter = filters
# Filter entities by assigning them the lowest score in the batch
filter_mask = torch.cat((heads_filter, tails_filter)).to(device)
pred_ents[filter_mask] = pred_ents.min() - 1.0
reciprocals, hits = utils.get_metrics(pred_ents, true_ents, k_values)
mrr_filt += reciprocals.sum().item()
hits_sum = hits.sum(dim=0)
for j, k in enumerate(hit_positions):
hits_at_k_filt[k] += hits_sum[j].item()
reciprocals = reciprocals.squeeze()
if new_entities is not None:
by_position = utils.split_by_new_position(triples,
reciprocals,
new_entities)
batch_mrr_by_position, batch_mrr_pos_counts = by_position
mrr_by_position += batch_mrr_by_position
mrr_pos_counts += batch_mrr_pos_counts
if triples_loader.dataset.has_rel_categories:
by_category = utils.split_by_category(triples,
reciprocals,
rel_categories)
batch_mrr_by_cat, batch_mrr_cat_count = by_category
mrr_by_category += batch_mrr_by_cat
mrr_cat_count += batch_mrr_cat_count
if (i + 1) % int(0.2 * total) == 0:
_log.info(f'[{i + 1:,}/{total:,}]')
_log.info(f'The total number of predictions is {num_predictions:,}')
for hits_dict in (hits_at_k, hits_at_k_filt):
for k in hits_dict:
hits_dict[k] /= num_predictions
mrr = mrr / num_predictions
mrr_filt = mrr_filt / num_predictions
log_str = f'{prefix} mrr: {mrr:.4f} '
_run.log_scalar(f'{prefix}_mrr', mrr, epoch)
for k, value in hits_at_k.items():
log_str += f'hits@{k}: {value:.4f} '
_run.log_scalar(f'{prefix}_hits@{k}', value, epoch)
if compute_filtered:
log_str += f'mrr_filt: {mrr_filt:.4f} '
_run.log_scalar(f'{prefix}_mrr_filt', mrr_filt, epoch)
for k, value in hits_at_k_filt.items():
log_str += f'hits@{k}_filt: {value:.4f} '
_run.log_scalar(f'{prefix}_hits@{k}_filt', value, epoch)
_log.info(log_str)
if new_entities is not None and compute_filtered:
mrr_pos_counts[mrr_pos_counts < 1.0] = 1.0
mrr_by_position = mrr_by_position / mrr_pos_counts
log_str = ''
for i, t in enumerate((f'{prefix}_mrr_filt_both_new',
f'{prefix}_mrr_filt_head_new',
f'{prefix}_mrr_filt_tail_new')):
value = mrr_by_position[i].item()
log_str += f'{t}: {value:.4f} '
_run.log_scalar(t, value, epoch)
_log.info(log_str)
if compute_filtered and triples_loader.dataset.has_rel_categories:
mrr_cat_count[mrr_cat_count < 1.0] = 1.0
mrr_by_category = mrr_by_category / mrr_cat_count
for i, case in enumerate(['pred_head', 'pred_tail']):
log_str = f'{case} '
for cat, cat_id in CATEGORY_IDS.items():
log_str += f'{cat}_mrr: {mrr_by_category[i, cat_id]:.4f} '
_log.info(log_str)
if return_embeddings:
out = (mrr, ent_emb)
else:
out = (mrr, None)
return out
@ex.command
def link_prediction(dataset, inductive, dim, model, rel_model, loss_fn,
encoder_name, regularizer, max_len, num_negatives, lr,
use_scheduler, batch_size, emb_batch_size, eval_batch_size,
max_epochs, checkpoint, use_cached_text,
_run: Run, _log: Logger):
drop_stopwords = model in {'bert-bow', 'bert-dkrl',
'glove-bow', 'glove-dkrl'}
prefix = 'ind-' if inductive and model != 'transductive' else ''
triples_file = f'data/{dataset}/{prefix}train.tsv'
if device != torch.device('cpu'):
num_devices = torch.cuda.device_count()
if batch_size % num_devices != 0:
raise ValueError(f'Batch size ({batch_size}) must be a multiple of'
f' the number of CUDA devices ({num_devices})')
_log.info(f'CUDA devices used: {num_devices}')
else:
num_devices = 1
_log.info('Training on CPU')
if model == 'transductive':
train_data = GraphDataset(triples_file, num_negatives,
write_maps_file=True,
num_devices=num_devices)
else:
if model.startswith('bert') or model == 'blp':
tokenizer = BertTokenizer.from_pretrained(encoder_name)
else:
tokenizer = GloVeTokenizer('data/glove/glove.6B.300d-maps.pt')
train_data = TextGraphDataset(triples_file, num_negatives,
max_len, tokenizer, drop_stopwords,
write_maps_file=True,
use_cached_text=use_cached_text,
num_devices=num_devices)
train_loader = DataLoader(train_data, batch_size, shuffle=True,
collate_fn=train_data.collate_fn,
num_workers=0, drop_last=True)
train_eval_loader = DataLoader(train_data, eval_batch_size)
valid_data = GraphDataset(f'data/{dataset}/{prefix}dev.tsv')
valid_loader = DataLoader(valid_data, eval_batch_size)
test_data = GraphDataset(f'data/{dataset}/{prefix}test.tsv')
test_loader = DataLoader(test_data, eval_batch_size)
# Build graph with all triples to compute filtered metrics
if dataset != 'Wikidata5M':
graph = nx.MultiDiGraph()
all_triples = torch.cat((train_data.triples,
valid_data.triples,
test_data.triples))
graph.add_weighted_edges_from(all_triples.tolist())
train_ent = set(train_data.entities.tolist())
train_val_ent = set(valid_data.entities.tolist()).union(train_ent)
train_val_test_ent = set(test_data.entities.tolist()).union(train_val_ent)
val_new_ents = train_val_ent.difference(train_ent)
test_new_ents = train_val_test_ent.difference(train_val_ent)
else:
graph = None
train_ent = set(train_data.entities.tolist())
train_val_ent = set(valid_data.entities.tolist())
train_val_test_ent = set(test_data.entities.tolist())
val_new_ents = test_new_ents = None
_run.log_scalar('num_train_entities', len(train_ent))
train_ent = torch.tensor(list(train_ent))
train_val_ent = torch.tensor(list(train_val_ent))
train_val_test_ent = torch.tensor(list(train_val_test_ent))
model = utils.get_model(model, dim, rel_model, loss_fn,
len(train_val_test_ent), train_data.num_rels,
encoder_name, regularizer)
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint, map_location='cpu'))
if device != torch.device('cpu'):
model = torch.nn.DataParallel(model).to(device)
optimizer = Adam(model.parameters(), lr=lr)
total_steps = len(train_loader) * max_epochs
if use_scheduler:
warmup = int(0.2 * total_steps)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=warmup,
num_training_steps=total_steps)
best_valid_mrr = 0.0
checkpoint_file = osp.join(OUT_PATH, f'model-{_run._id}.pt')
for epoch in range(1, max_epochs + 1):
train_loss = 0
for step, data in enumerate(train_loader):
loss = model(*data).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if use_scheduler:
scheduler.step()
train_loss += loss.item()
if step % int(0.05 * len(train_loader)) == 0:
_log.info(f'Epoch {epoch}/{max_epochs} '
f'[{step}/{len(train_loader)}]: {loss.item():.6f}')
_run.log_scalar('batch_loss', loss.item())
_run.log_scalar('train_loss', train_loss / len(train_loader), epoch)
if dataset != 'Wikidata5M':
_log.info('Evaluating on sample of training set')
eval_link_prediction(model, train_eval_loader, train_data, train_ent,
epoch, emb_batch_size, prefix='train',
max_num_batches=len(valid_loader))
_log.info('Evaluating on validation set')
val_mrr, _ = eval_link_prediction(model, valid_loader, train_data,
train_val_ent, epoch,
emb_batch_size, prefix='valid')
# Keep checkpoint of best performing model (based on raw MRR)
if val_mrr > best_valid_mrr:
best_valid_mrr = val_mrr
torch.save(model.state_dict(), checkpoint_file)
# Evaluate with best performing checkpoint
if max_epochs > 0:
model.load_state_dict(torch.load(checkpoint_file))
if dataset == 'Wikidata5M':
graph = nx.MultiDiGraph()
graph.add_weighted_edges_from(valid_data.triples.tolist())
_log.info('Evaluating on validation set (with filtering)')
eval_link_prediction(model, valid_loader, train_data, train_val_ent,
max_epochs + 1, emb_batch_size, prefix='valid',
filtering_graph=graph,
new_entities=val_new_ents)
if dataset == 'Wikidata5M':
graph = nx.MultiDiGraph()
graph.add_weighted_edges_from(test_data.triples.tolist())
_log.info('Evaluating on test set')
_, ent_emb = eval_link_prediction(model, test_loader, train_data,
train_val_test_ent, max_epochs + 1,
emb_batch_size, prefix='test',
filtering_graph=graph,
new_entities=test_new_ents,
return_embeddings=True)
# Save final entity embeddings obtained with trained encoder
torch.save(ent_emb, osp.join(OUT_PATH, f'ent_emb-{_run._id}.pt'))
torch.save(train_val_test_ent, osp.join(OUT_PATH, f'ents-{_run._id}.pt'))
@ex.command
def node_classification(dataset, checkpoint, _run: Run, _log: Logger):
ent_emb = torch.load(f'output/ent_emb-{checkpoint}.pt', map_location='cpu')
if isinstance(ent_emb, tuple):
ent_emb = ent_emb[0]
ent_emb = ent_emb.squeeze().numpy()
num_embs, emb_dim = ent_emb.shape
_log.info(f'Loaded {num_embs} embeddings with dim={emb_dim}')
emb_ids = torch.load(f'output/ents-{checkpoint}.pt', map_location='cpu')
ent2idx = utils.make_ent2idx(emb_ids, max_ent_id=emb_ids.max()).numpy()
maps = torch.load(f'data/{dataset}/maps.pt')
ent_ids = maps['ent_ids']
class2label = defaultdict(lambda: len(class2label))
splits = ['train', 'dev', 'test']
split_2data = dict()
for split in splits:
with open(f'data/{dataset}/{split}-ents-class.txt') as f:
idx = []
labels = []
for line in f:
entity, ent_class = line.strip().split()
entity_id = ent_ids[entity]
entity_idx = ent2idx[entity_id]
idx.append(entity_idx)
labels.append(class2label[ent_class])
x = ent_emb[idx]
y = np.array(labels)
split_2data[split] = (x, y)
x_train, y_train = split_2data['train']
x_dev, y_dev = split_2data['dev']
x_test, y_test = split_2data['test']
best_dev_metric = 0.0
best_c = 0
for k in range(-4, 2):
c = 10 ** -k
model = LogisticRegression(C=c, multi_class='multinomial',
max_iter=1000)
model.fit(x_train, y_train)
dev_preds = model.predict(x_dev)
dev_acc = accuracy_score(y_dev, dev_preds)
_log.info(f'{c:.3f} - {dev_acc:.3f}')
if dev_acc > best_dev_metric:
best_dev_metric = dev_acc
best_c = c
_log.info(f'Best regularization coefficient: {best_c:.4f}')
model = LogisticRegression(C=best_c, multi_class='multinomial',
max_iter=1000)
x_train_all = np.concatenate((x_train, x_dev))
y_train_all = np.concatenate((y_train, y_dev))
model.fit(x_train_all, y_train_all)
for metric_fn in (accuracy_score, balanced_accuracy_score):
train_preds = model.predict(x_train_all)
train_metric = metric_fn(y_train_all, train_preds)
test_preds = model.predict(x_test)
test_metric = metric_fn(y_test, test_preds)
_log.info(f'Train {metric_fn.__name__}: {train_metric:.3f}')
_log.info(f'Test {metric_fn.__name__}: {test_metric:.3f}')
id_to_class = {v: k for k, v in class2label.items()}
joblib.dump({'model': model,
'id_to_class': id_to_class},
osp.join('output', f'classifier-{checkpoint}.joblib'))
ex.run_commandline()
| 18,945 | 38.063918 | 83 | py |
blp | blp-master/data/utils.py | import sys
from tqdm import tqdm
from argparse import ArgumentParser
import networkx as nx
import random
import os.path as osp
from collections import Counter, defaultdict
import torch
import rdflib
def parse_triples(triples_file):
"""Read a file containing triples, with head, relation, and tail
separated by space. Returns list of lists."""
triples = []
rel_counts = Counter()
file = open(triples_file)
for line in file:
head, rel, tail = line.split()
triples.append([head, tail, rel])
rel_counts[rel] += 1
return triples, rel_counts
def read_entity_types(entity2type_file):
type2entities = defaultdict(set)
with open(entity2type_file) as f:
for line in f:
entity, label = line.strip().split()
type2entities[label].add(entity)
return dict(type2entities)
def get_safely_removed_edges(graph, node, rel_counts, min_edges_left=100):
"""Get counts of edge removed by type, after safely removing a given node.
Safely removing a node entails checking that no nodes are left
disconnected, and not removing edge types with count less than
a given amount.
"""
neighbors = set(nx.all_neighbors(graph, node))
removed_rel_counts = Counter()
removed_edges = []
for m in neighbors:
# Check if m has more than 2 neighbors (node, and potentially itself)
# before continuing
m_neighborhood = set(nx.all_neighbors(graph, m))
if len(m_neighborhood) > 2:
# Check edges in both directions between node and m
pair = [node, m]
for i in range(2):
edge_dict = graph.get_edge_data(*pair)
if edge_dict is not None:
# Check that removing the edges between node and m
# does not leave less than min_edges_left
edges = edge_dict.values()
for edge in edges:
rel = edge['weight']
edges_left = rel_counts[rel] - removed_rel_counts[rel]
if edges_left >= min_edges_left:
removed_rel_counts[rel] += 1
head, tail = pair
removed_edges.append((head, tail, rel))
else:
return None
# Don't count self-loops twice
if node == m:
break
pair = list(reversed(pair))
else:
return None
return removed_edges, removed_rel_counts
def drop_entities(triples_file, train_size=0.8, valid_size=0.1, test_size=0.1,
seed=0, types_file=None):
"""Drop entities from a graph, to create training, validation and test
splits.
Entities are dropped so that no disconnected nodes are left in the training
graph. Dropped entities are distributed between disjoint validation
and test sets.
"""
splits_sum = train_size + valid_size + test_size
if splits_sum < 0 or splits_sum > 1:
raise ValueError('Sum of split sizes must be between greater than 0'
' and less than or equal to 1.')
use_types = types_file is not None
if use_types:
type2entities = read_entity_types(types_file)
types = list(type2entities.keys())
random.seed(seed)
graph = nx.MultiDiGraph()
triples, rel_counts = parse_triples(triples_file)
graph.add_weighted_edges_from(triples)
original_num_edges = graph.number_of_edges()
original_num_nodes = graph.number_of_nodes()
print(f'Loaded graph with {graph.number_of_nodes():,} entities '
f'and {graph.number_of_edges():,} edges')
dropped_entities = []
dropped_edges = dict()
num_to_drop = int(original_num_nodes * (1 - train_size))
num_val = int(original_num_nodes * valid_size)
num_test = int(original_num_nodes * test_size)
print(f'Removing {num_to_drop:,} entities...')
progress = tqdm(total=num_to_drop, file=sys.stdout)
while len(dropped_entities) < num_to_drop:
if use_types:
# Sample an entity with probability proportional to its type count
# (minus 1 to keep at least one entity of any type)
weights = [len(type2entities[t]) - 1 for t in types]
rand_type = random.choices(types, weights, k=1)[0]
rand_ent = random.choice(list(type2entities[rand_type]))
else:
# Sample an entity uniformly at random
rand_ent = random.choice(list(graph.nodes))
removed_tuple = get_safely_removed_edges(graph, rand_ent, rel_counts)
if removed_tuple is not None:
removed_edges, removed_counts = removed_tuple
dropped_edges[rand_ent] = removed_edges
graph.remove_node(rand_ent)
dropped_entities.append(rand_ent)
rel_counts.subtract(removed_counts)
if use_types:
type2entities[rand_type].remove(rand_ent)
progress.update(1)
progress.close()
# Are there indeed no disconnected nodes?
assert len(list(nx.isolates(graph))) == 0
# Did we keep track of removed edges correctly?
num_removed_edges = sum(map(len, dropped_edges.values()))
assert num_removed_edges + graph.number_of_edges() == original_num_edges
# Test entities MUST come from first slice! This guarantees that
# validation entities don't have edges with them (because nodes were
# removed in sequence)
test_ents = set(dropped_entities[:num_test])
val_ents = set(dropped_entities[num_test:num_test + num_val])
train_ents = set(graph.nodes())
# Check that entity sets are disjoint
assert len(train_ents.intersection(val_ents)) == 0
assert len(train_ents.intersection(test_ents)) == 0
assert len(val_ents.intersection(test_ents)) == 0
# Check that validation graph does not contain test entities
val_graph = nx.MultiDiGraph()
val_edges = []
for entity in val_ents:
val_edges += dropped_edges[entity]
val_graph.add_weighted_edges_from(val_edges)
assert len(set(val_graph.nodes()).intersection(test_ents)) == 0
names = ('train', 'dev', 'test')
dirname = osp.dirname(triples_file)
prefix = 'ind-'
for entity_set, set_name in zip((train_ents, val_ents, test_ents), names):
# Save file with entities for set
with open(osp.join(dirname, f'{set_name}-ents.txt'), 'w') as file:
file.writelines('\n'.join(entity_set))
if set_name == 'train':
# Triples for train split are saved later
continue
# Save file with triples for entities in set
with open(osp.join(dirname, f'{prefix}{set_name}.tsv'), 'w') as file:
for entity in entity_set:
triples = dropped_edges[entity]
for head, tail, rel in triples:
file.write(f'{head}\t{rel}\t{tail}\n')
with open(osp.join(dirname, f'{prefix}train.tsv'), 'w') as train_file:
for head, tail, rel in graph.edges(data=True):
train_file.write(f'{head}\t{rel["weight"]}\t{tail}\n')
print(f'Dropped {len(val_ents):,} entities for validation'
f' and {len(test_ents):,} for test.')
print(f'{graph.number_of_nodes():,} entities are left for training.')
print(f'Saved output files to {dirname}/')
def load_embeddings(embs_file):
"""Read a file containing a word followed by its embedding, as float values
separated by whitespace.
Args:
embs_file: str, path to file
Returns:
tensor of shape (vocabulary, embedding_dimension), type torch.float
dict, mapping words (str) to id (int).
"""
filename, _ = osp.splitext(embs_file)
word2idx = {}
word_embeddings = []
progress = tqdm()
with open(embs_file) as file:
for i, line in enumerate(file):
word, *embedding = line.split(' ')
word2idx[word] = i
word_embeddings.append([float(e) for e in embedding])
progress.update(1)
progress.close()
word_embeddings = torch.tensor(word_embeddings)
# Add embedding for out-of-vocabulary words
unk_emb = torch.mean(word_embeddings, dim=0, keepdim=True)
word_embeddings = torch.cat((word_embeddings, unk_emb))
word2idx['[UNK]'] = len(word2idx)
torch.save(word_embeddings, f'{filename}.pt')
torch.save(word2idx, f'{filename}-maps.pt')
def categorize_relations(triples_file):
"""Given a set of triples, assign a category to a relation from the
following:
1 to 1
1 to many
many to 1
many to many
Results are saved back to disk.
Args:
triples_file: str, file containing triples of the form
head relation tail
"""
graph = nx.MultiDiGraph()
triples, rel_counts = parse_triples(triples_file)
graph.add_weighted_edges_from(triples)
rel2heads_count = defaultdict(list)
rel2tails_count = defaultdict(list)
for entity in graph.nodes:
rel2heads_entity_count = Counter()
# Fix entity as tail, and check all heads
in_edges = graph.in_edges(entity, data=True)
for u, v, edge in in_edges:
rel2heads_entity_count[edge['weight']] += 1
for rel, counts in rel2heads_entity_count.items():
rel2heads_count[rel].append(counts)
rel2tails_entity_count = Counter()
# Fix entity as head, and check all tails
out_edges = graph.out_edges(entity, data=True)
for u, v, edge in out_edges:
rel2tails_entity_count[edge['weight']] += 1
for rel, counts in rel2tails_entity_count.items():
rel2tails_count[rel].append(counts)
rel2category = dict()
for rel in rel2heads_count:
head_counts = rel2heads_count[rel]
tail_counts = rel2tails_count[rel]
head_avg = sum(head_counts)/len(head_counts)
tail_avg = sum(tail_counts)/len(tail_counts)
head_category = '1' if head_avg < 1.5 else 'many'
tail_category = '1' if tail_avg < 1.5 else 'many'
rel2category[rel] = f'{head_category}-to-{tail_category}'
print('Relation category statistics:')
cat_counts = Counter(rel2category.values())
for category, count in cat_counts.items():
proportion = 100 * count/len(rel2category)
print(f'{category:13} {count:3} {proportion:4.1f}%')
dirname = osp.dirname(triples_file)
output_path = osp.join(dirname, 'relations-cat.txt')
with open(output_path, 'w') as f:
for relation, category in rel2category.items():
f.write(f'{relation}\t{category}\n')
print(f'Saved relation categories to {output_path}')
def get_ranking_descriptions(run_file, dbpedia_file, redirects_file):
# Read run file and get unique set of entities
print('Reading unique entities from run file...')
entities = set()
with open(run_file) as f:
for line in f:
values = line.strip().split()
entities.add(values[2])
basename = osp.splitext(osp.basename(run_file))[0]
output_file = osp.join(osp.dirname(run_file),
basename + '-descriptions.txt')
missing_file = osp.join(osp.dirname(run_file), basename + '-missing.txt')
dbpedia_ns = 'http://dbpedia.org/resource/'
dbpedia_prefix = 'dbpedia:'
print('Reading redirects...')
redir2entities = defaultdict(set)
with open(redirects_file) as f:
for line in f:
values = line.strip().split()
norm_uri = values[0].replace(dbpedia_ns, dbpedia_prefix, 1)
redirect = values[2]
if norm_uri in entities:
redir2entities[redirect].add(norm_uri)
# Iterate over DBpedia dump and keep required descriptions
print('Retrieving descriptions of entities...')
read_entities = set()
progress = tqdm(file=sys.stdout)
with open(dbpedia_file) as f, open(output_file, 'w') as out:
for line in f:
g = rdflib.Graph().parse(data=line, format='n3')
for (page, rel, description) in g:
norm_uri = f'<{page.replace(dbpedia_ns, dbpedia_prefix, 1)}>'
if norm_uri in entities and norm_uri not in read_entities:
read_entities.add(norm_uri)
out.write(f'{norm_uri}\t{description.value}\n')
page_n3_format = page.n3()
if page_n3_format in redir2entities:
for entity in redir2entities[page_n3_format]:
if entity not in read_entities:
read_entities.add(entity)
out.write(f'{entity}\t{description.value}\n')
if len(read_entities) == len(entities):
break
progress.update()
progress.close()
with open(missing_file, 'w') as f:
for entity in entities.difference(read_entities):
f.write(f'{entity}\n')
print(f'Retrieved {len(read_entities):,} descriptions, out of'
f' {len(entities):,} entities.')
print(f'Descriptions saved in {output_file}')
print(f'Entities with missing descriptions saved in {missing_file}')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('command', choices=['drop_entities', 'load_embs',
'categorize',
'get_ranking_descriptions'])
parser.add_argument('--file', help='Input file')
parser.add_argument('--dbp_file', help='DBpedia ttl file with rdf:comment'
' field for entities')
parser.add_argument('--redirects_file', help='File redirecting entities')
parser.add_argument('--types_file', help='Tab-separated file of entities'
' and their type', default=None)
parser.add_argument('--train_size', help='Fraction of entities used for'
' training.', default=0.8, type=float)
parser.add_argument('--seed', help='Random seed', default=0)
args = parser.parse_args()
if args.command == 'drop_entities':
drop_entities(args.file, train_size=args.train_size, seed=args.seed,
types_file=args.types_file)
elif args.command == 'load_embs':
load_embeddings(args.file)
elif args.command == 'categorize':
categorize_relations(args.file)
elif args.command == 'get_ranking_descriptions':
if args.file is None or args.dbp_file is None:
raise ValueError('--file and --dbp_file must be provided to'
' get_ranking_descriptions')
get_ranking_descriptions(args.file, args.dbp_file, args.redirects_file)
| 14,869 | 36.455919 | 79 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/normalize_sketch.py | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is sample Python 2.7 code that uses the tiltbrush.tilt module
to scale, rotate, and translate the sketch so that the resetting the
transform will bring you back to the initial size, orientation, and
position. But the environment size, orientation, and position will also
be reset."""
import os
import shutil
import sys
try:
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))), 'Python'))
from tiltbrush.tilt import Tilt
except ImportError:
print >>sys.stderr, "Please put the 'Python' directory in your PYTHONPATH"
sys.exit(1)
def _quaternion_multiply_quaternion(q0, q1):
x0, y0, z0, w0 = q0
x1, y1, z1, w1 = q1
return [
w0*x1 + x0*w1 + y0*z1 - z0*y1,
w0*y1 + y0*w1 + z0*x1 - x0*z1,
w0*z1 + z0*w1 + x0*y1 - y0*x1,
w0*w1 - x0*x1 - y0*y1 - z0*z1]
def _quaternion_conjugate(q):
x, y, z, w = q
return [-x, -y, -z, w]
def _quaternion_multiply_vector(q, v):
qv = v + [0]
return _quaternion_multiply_quaternion(_quaternion_multiply_quaternion(q, qv), _quaternion_conjugate(q))[:3]
def _transform_point(scene_translation, scene_rotation, scene_scale, pos):
pos = [scene_scale * i for i in pos]
pos = _quaternion_multiply_vector(scene_rotation, pos)
pos = [i+j for i,j in zip(scene_translation, pos)]
return pos
def _adjust_guide(scene_translation, scene_rotation, scene_scale, guide):
guide[u'Extents'] = [scene_scale * b for b in guide[u'Extents']]
_adjust_transform(scene_translation, scene_rotation, scene_scale, guide[u'Transform'])
def _adjust_transform(scene_translation, scene_rotation, scene_scale, transform):
scaledTranslation = [scene_scale * b for b in transform[0]]
rotatedTranslation = _quaternion_multiply_vector(scene_rotation, scaledTranslation)
translatedTranslation = [b+a for a,b in zip(scene_translation, rotatedTranslation)]
transform[0] = translatedTranslation
transform[1] = _quaternion_multiply_quaternion(scene_rotation, transform[1])
transform[2] = scene_scale * transform[2]
def normalize_tilt_file(tilt_file):
scene_translation = tilt_file.metadata[u'SceneTransformInRoomSpace'][0]
scene_rotation = tilt_file.metadata[u'SceneTransformInRoomSpace'][1]
scene_scale = tilt_file.metadata[u'SceneTransformInRoomSpace'][2]
# Normalize strokes
for stroke in tilt_file.sketch.strokes:
if stroke.has_stroke_extension('scale'):
stroke.scale *= scene_scale
else:
stroke.scale = scene_scale
for cp in stroke.controlpoints:
pos = cp.position
pos = _transform_point(scene_translation, scene_rotation, scene_scale, pos)
cp.position = pos
cp.orientation = _quaternion_multiply_quaternion(scene_rotation, cp.orientation)
with tilt_file.mutable_metadata() as metadata:
# Reset scene transform to be identity.
metadata[u'SceneTransformInRoomSpace'][0] = [0., 0., 0.]
metadata[u'SceneTransformInRoomSpace'][1] = [0., 0., 0., 1.]
metadata[u'SceneTransformInRoomSpace'][2] = 1.
# Adjust guide transforms to match.
if u'GuideIndex' in metadata:
for guide_type in metadata[u'GuideIndex']:
for guide in guide_type[u'States']:
_adjust_guide(scene_translation, scene_rotation, scene_scale, guide)
# Adjust model transforms to match.
if u'ModelIndex' in metadata:
for model_type in metadata[u'ModelIndex']:
for transform in model_type[u'Transforms']:
_adjust_transform(scene_translation, scene_rotation, scene_scale, transform)
# Adjust image transforms to match.
if u'ImageIndex' in metadata:
for image_type in metadata[u'ImageIndex']:
for transform in image_type[u'Transforms']:
_adjust_transform(scene_translation, scene_rotation, scene_scale, transform)
# Adjust lights to match.
if u'Lights' in metadata:
metadata[u'Lights'][u'Shadow'][u'Orientation'] = _quaternion_multiply_quaternion(scene_rotation, metadata[u'Lights'][u'Shadow'][u'Orientation'])
metadata[u'Lights'][u'NoShadow'][u'Orientation'] = _quaternion_multiply_quaternion(scene_rotation, metadata[u'Lights'][u'NoShadow'][u'Orientation'])
# Adjust environment to match.
if u'Environment' in metadata:
metadata[u'Environment'][u'FogDensity'] /= scene_scale
metadata[u'Environment'][u'GradientSkew'] = _quaternion_multiply_quaternion(scene_rotation, metadata[u'Environment'][u'GradientSkew'])
# u'Mirror' and u'ThumbnailCameraTransformInRoomSpace' are in room space so don't need to be normalized.
def main():
import argparse
parser = argparse.ArgumentParser(description=
"Create a normalized version of the sketch (with 'Normalized' appended to\
the file name) which is scaled, rotated, and translated so that resetting the\
transform will bring you back to the initial size, orientation, and position.\
But the environment size, orientation, and position will also be reset.")
parser.add_argument('files', type=str, nargs='+', help="Sketches to normalize")
args = parser.parse_args()
for filename in args.files:
name, ext = os.path.splitext(filename)
filename_normalized = name + 'Normalized' + ext
shutil.copy(filename, filename_normalized)
tilt_file = Tilt(filename_normalized)
normalize_tilt_file(tilt_file)
tilt_file.write_sketch()
print 'WARNING: Environment position has changed in ' + filename + '.'
if __name__ == '__main__':
main()
| 6,027 | 39.72973 | 154 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/dump_tilt.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is sample Python 2.7 code that uses the tiltbrush.tilt module
to view raw Tilt Brush data."""
import os
import pprint
import sys
try:
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))), 'Python'))
from tiltbrush.tilt import Tilt
except ImportError:
print >>sys.stderr, "Please put the 'Python' directory in your PYTHONPATH"
sys.exit(1)
def dump_sketch(sketch):
"""Prints out some rough information about the strokes.
Pass a tiltbrush.tilt.Sketch instance."""
cooky, version, unused = sketch.header[0:3]
print 'Cooky:0x%08x Version:%s Unused:%s Extra:(%d bytes)' % (
cooky, version, unused, len(sketch.additional_header))
# Create dicts that are the union of all the stroke-extension and
# control-point-extension # lookup tables.
union_stroke_extension = {}
union_cp_extension = {}
for stroke in sketch.strokes:
union_stroke_extension.update(stroke.stroke_ext_lookup)
union_cp_extension.update(stroke.cp_ext_lookup)
print "Stroke Ext: %s" % ', '.join(union_stroke_extension.keys())
print "CPoint Ext: %s" % ', '.join(union_cp_extension.keys())
for (i, stroke) in enumerate(sketch.strokes):
print "%3d: " % i,
dump_stroke(stroke)
def dump_stroke(stroke):
"""Prints out some information about the stroke."""
if len(stroke.controlpoints) and 'timestamp' in stroke.cp_ext_lookup:
cp = stroke.controlpoints[0]
timestamp = stroke.cp_ext_lookup['timestamp']
start_ts = ' t:%6.1f' % (cp.extension[timestamp] * .001)
else:
start_ts = ''
try:
scale = stroke.extension[stroke.stroke_ext_lookup['scale']]
except KeyError:
scale = 1
if 'group' in stroke.stroke_ext_lookup:
group = stroke.extension[stroke.stroke_ext_lookup['group']]
else: group = '--'
if 'seed' in stroke.stroke_ext_lookup:
seed = '%08x' % stroke.extension[stroke.stroke_ext_lookup['seed']]
else: seed = '-none-'
print "B:%2d S:%.3f C:#%02X%02X%02X g:%2s s:%8s %s [%4d]" % (
stroke.brush_idx, stroke.brush_size * scale,
int(stroke.brush_color[0] * 255),
int(stroke.brush_color[1] * 255),
int(stroke.brush_color[2] * 255),
#stroke.brush_color[3],
group, seed,
start_ts,
len(stroke.controlpoints))
def main():
import argparse
parser = argparse.ArgumentParser(description="View information about a .tilt")
parser.add_argument('--strokes', action='store_true', help="Dump the sketch strokes")
parser.add_argument('--metadata', action='store_true', help="Dump the metadata")
parser.add_argument('files', type=str, nargs='+', help="Files to examine")
args = parser.parse_args()
if not (args.strokes or args.metadata):
print "You should pass at least one of --strokes or --metadata"
for filename in args.files:
t = Tilt(filename)
if args.strokes:
dump_sketch(t.sketch)
if args.metadata:
pprint.pprint(t.metadata)
if __name__ == '__main__':
main()
| 3,576 | 31.816514 | 87 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/tilt_to_strokes_dae.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import collections
import xml.etree.ElementTree as ET
try:
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))), 'Python'))
from tiltbrush.tilt import Tilt
except ImportError:
print >>sys.stderr, "Please put the 'Python' directory in your PYTHONPATH"
sys.exit(1)
def Element(tag, children=None, text=None, **attribs):
"""Wrapper around ET.Element that makes adding children and text easier"""
child = ET.Element(tag, **attribs)
if text is not None:
child.text = text
if children is not None:
child.extend(children)
return child
def _indent(elem, level=0):
"""Pretty-print indent an ElementTree.Element instance"""
i = "\n" + level*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class ColladaFile(object):
def __init__(self):
self.next_ids = collections.defaultdict(int)
self.root = ET.Element(
'COLLADA',
xmlns="http://www.collada.org/2008/03/COLLADASchema",
version="1.5.0")
self.tree = ET.ElementTree(self.root)
self._init_asset()
self.library_effects = ET.SubElement(self.root, 'library_effects')
self.library_materials = ET.SubElement(self.root, 'library_materials')
self.library_geometries = ET.SubElement(self.root, 'library_geometries')
self.library_visual_scenes = ET.SubElement(self.root, 'library_visual_scenes')
self.material = self._init_material()
self.visual_scene = self._init_scene()
def _init_asset(self):
import datetime
now = datetime.datetime.now()
self.root.append(
Element('asset', children=[
Element('contributor', children=[
Element('authoring_tool', text='Tilt Brush COLLADA stroke converter')
]),
Element('created', text=now.isoformat()),
Element('modified', text=now.isoformat()),
Element('unit', meter='.1', name='decimeter'),
Element('up_axis', text='Y_UP')
])
)
def _init_material(self):
effect = ET.SubElement(self.library_effects, 'effect', id=self.make_id('effect_'))
effect.append(
Element('profile_COMMON', children=[
Element('technique', sid='COMMON', children=[
Element('blinn', children=[
Element('diffuse', children=[
Element('color', text='0.8 0.8 0.8 1'),
]),
Element('specular', children=[
Element('color', text='0.2 0.2 0.2 1'),
]),
Element('shininess', children=[Element('float', text='0.5')])
])
])
])
)
material = ET.SubElement(
self.library_materials, 'material', id=self.make_id('material_'),
name="Mat")
ET.SubElement(material, 'instance_effect', url='#' + effect.get('id'))
return material
def _init_scene(self):
visual_scene = ET.SubElement(self.library_visual_scenes, 'visual_scene',
id=self.make_id('scene_'))
self.root.append(
Element('scene', children=[
Element('instance_visual_scene', url='#' + visual_scene.get('id'))
])
)
return visual_scene
def make_id(self, prefix='ID'):
val = self.next_ids[prefix]
self.next_ids[prefix] += 1
new_id = prefix + str(val)
return new_id
def write(self, filename):
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
_indent(self.root)
with file(filename, 'wb') as outf:
outf.write(header)
self.tree.write(outf)
def add_stroke(self, stroke):
geometry = self._add_stroke_geometry(stroke)
self._add_stroke_node(geometry)
def _add_stroke_geometry(self, stroke):
def flatten(lst):
for elt in lst:
for subelt in elt:
yield subelt
def get_rh_positions(stroke):
for cp in stroke.controlpoints:
yield (-cp.position[0], cp.position[1], cp.position[2])
def iter_positions(stroke):
for cp in stroke.controlpoints:
# Switch from left-handed (unity) to right-handed
yield -cp.position[0]
yield cp.position[1]
yield cp.position[2]
raw_floats = list(flatten(get_rh_positions(stroke)))
assert len(raw_floats) % 3 == 0
geom_id = self.make_id('stroke_')
source_id = geom_id + '_src'
floats_id = geom_id + '_fs'
verts_id = geom_id + '_vs'
geometry = ET.SubElement(self.library_geometries, 'geometry', id=geom_id)
geometry.append(
Element('mesh', children=[
Element('source', id=source_id, children=[
Element('float_array', id=floats_id,
count=str(len(raw_floats)),
text=' '.join(map(str, raw_floats))),
Element('technique_common', children=[
Element('accessor',
count=str(len(raw_floats)/3), stride='3',
source='#' + floats_id,
children=[
Element('param', name='X', type='float'),
Element('param', name='Y', type='float'),
Element('param', name='Z', type='float')
])
])
]),
Element('vertices', id=verts_id, children=[
Element('input', semantic='POSITION', source='#' + source_id)
]),
Element('linestrips', count='1', material='Material1', children=[
Element('input', offset='0', semantic='VERTEX', set='0', source='#' + verts_id),
Element('p', text=' '.join(map(str, xrange(len(raw_floats) / 3))))
])
])
)
return geometry
def _add_stroke_node(self, geometry):
name = 'Spline.' + geometry.get('id')
self.visual_scene.append(
Element('node', id=self.make_id('node_'), name=name, children=[
Element('instance_geometry', url='#' + geometry.get('id'), children=[
Element('bind_material', children=[
Element('technique_common', children=[
Element('instance_material', symbol='Material1',
target='#' + self.material.get('id'),
children=[
Element('bind_vertex_input',
semantic='UVSET0',
input_semantic='TEXCOORD',
input_set='0')
])
])
])
])
])
)
def main(args):
import argparse
parser = argparse.ArgumentParser(description="Converts .tilt files to a Collada .dae containing spline data.")
parser.add_argument('files', type=str, nargs='*', help="Files to convert to dae")
args = parser.parse_args(args)
for filename in args.files:
t = Tilt(filename)
outf_name = os.path.splitext(os.path.basename(filename))[0] + '.dae'
dae = ColladaFile()
for stroke in t.sketch.strokes:
dae.add_stroke(stroke)
dae.write(outf_name)
print 'Wrote', outf_name
if __name__ == '__main__':
main(sys.argv[1:])
| 7,840 | 31.945378 | 112 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/concatenate_tilt.py | #!/usr/bin/env python
import os
import pprint
import shutil
import sys
from tiltbrush import tilt
def destroy(filename):
try:
os.unlink(filename)
except OSError:
pass
def increment_timestamp(stroke, increment):
"""Adds *increment* to all control points in stroke."""
timestamp_idx = stroke.cp_ext_lookup['timestamp']
for cp in stroke.controlpoints:
cp.extension[timestamp_idx] += increment
def merge_metadata_from_tilt(tilt_dest, tilt_source):
"""Merges data from tilt_source into tilt_dest:
- BrushIndex
- ModelIndex
- ImageIndex"""
with tilt_dest.mutable_metadata() as md:
to_append = set(tilt_source.metadata['BrushIndex']) - set(md['BrushIndex'])
md['BrushIndex'].extend(sorted(to_append))
if 'ImageIndex' in tilt_source.metadata:
tilt_dest.metadata['ImageIndex'] = tilt_dest.metadata.get('ImageIndex', []) + \
tilt_source.metadata['ImageIndex']
if 'ModelIndex' in tilt_source.metadata:
tilt_dest.metadata['ModelIndex'] = tilt_dest.metadata.get('ModelIndex', []) + \
tilt_source.metadata['ModelIndex']
def concatenate(file_1, file_2, file_out):
"""Concatenate two .tilt files.
file_out may be the same as one of the input files."""
file_tmp = file_out + "__tmp"
destroy(file_tmp)
shutil.copyfile(file_1, file_tmp)
tilt_out = tilt.Tilt(file_tmp)
tilt_2 = tilt.Tilt(file_2)
merge_metadata_from_tilt(tilt_out, tilt_2)
tilt_out._guid_to_idx = dict(
(guid, index)
for (index, guid) in enumerate(tilt_out.metadata['BrushIndex']))
final_stroke = tilt_out.sketch.strokes[-1]
final_timestamp = final_stroke.get_cp_extension(final_stroke.controlpoints[-1], 'timestamp')
timestamp_offset = final_timestamp + .03
for stroke in tilt_2.sketch.strokes:
copy = stroke.clone()
# Convert brush index to one that works for tilt_out
stroke_guid = tilt_2.metadata['BrushIndex'][stroke.brush_idx]
copy.brush_idx = tilt_out._guid_to_idx[stroke_guid]
tilt_out.sketch.strokes.append(copy)
# Adjust timestamps to keep stroke times from overlapping.
increment_timestamp(stroke, timestamp_offset)
tilt_out.write_sketch()
destroy(file_out)
os.rename(file_tmp, file_out)
def main():
import argparse
parser = argparse.ArgumentParser(
usage='%(prog)s -f FILE1 -f FILE2 ... -o OUTPUT_FILE'
)
parser.add_argument('-f', dest='files', metavar='FILE', action='append',
required=True,
help='A file to concatenate. May pass multiple times')
parser.add_argument('-o', metavar='OUTPUT_FILE', dest='output_file',
required=True,
help='The name of the output file')
args = parser.parse_args()
if len(args.files) < 2:
parser.error("Pass at least two files")
concatenate(args.files[0], args.files[1], args.output_file)
for filename in args.files[2:]:
concatenate(args.output_file, filename, args.output_file)
print "Wrote", args.output_file
if __name__ == '__main__':
main()
| 3,076 | 29.77 | 94 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/geometry_json_to_obj.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Historical sample code that converts Tilt Brush '.json' exports to .obj.
# This script is superseded by Tilt Brush native .fbx exports.
#
# There are various possible ways you might want the .obj file converted:
#
# - Should the entire sketch be converted to a single mesh? Or all
# strokes that use the same brush? Or maybe one mesh per stroke?
# - Should backfaces be kept or removed?
# - Should vertices be welded? How aggressively?
#
# This sample keeps backfaces, merges all strokes into a single mesh,
# and does no vertex welding. It can also be easily customized to do any
# of the above.
import argparse
import os
import sys
try:
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))), 'Python'))
from tiltbrush.export import iter_meshes, TiltBrushMesh, SINGLE_SIDED_FLAT_BRUSH
except ImportError:
print >>sys.stderr, "Please put the 'Python' directory in your PYTHONPATH"
sys.exit(1)
def write_obj(mesh, outf_name, use_color):
"""Emits a TiltBrushMesh as a .obj file.
If use_color, emit vertex color as a non-standard .obj extension."""
from cStringIO import StringIO
tmpf = StringIO()
if use_color:
for v, c32 in zip(mesh.v, mesh.c):
r = ( (c32 >> 0) & 0xff ) / 255.0
g = ( (c32 >> 8) & 0xff ) / 255.0
b = ( (c32 >>16) & 0xff ) / 255.0
tmpf.write("v %f %f %f %f %f %f\n" % (v[0], v[1], v[2], r, g, b))
tmpf.write("vc %f %f %f\n" % (r, g, b))
else:
for v in mesh.v:
tmpf.write("v %f %f %f\n" % v)
has_uv = any(uv is not None for uv in mesh.uv0)
if has_uv:
has_uv = True
for uv in mesh.uv0:
if uv is not None:
tmpf.write("vt %f %f\n" % (uv[0], uv[1]))
else:
tmpf.write("vt 0 0\n")
has_n = any(n is not None for n in mesh.n)
if has_n:
for n in mesh.n:
if n is not None:
tmpf.write("vn %f %f %f\n" % n)
else:
tmpf.write("vn 0 0 0\n")
if has_n and has_uv:
for (t1, t2, t3) in mesh.tri:
t1 += 1; t2 += 1; t3 += 1
tmpf.write("f %d/%d/%d %d/%d/%d %d/%d/%d\n" % (t1,t1,t1, t2,t2,t2, t3,t3,t3))
elif has_n:
for (t1, t2, t3) in mesh.tri:
t1 += 1; t2 += 1; t3 += 1
tmpf.write("f %d//%d %d//%d %d//%d\n" % (t1,t1, t2,t2, t3,t3))
elif has_uv:
for (t1, t2, t3) in mesh.tri:
t1 += 1; t2 += 1; t3 += 1
tmpf.write("f %d/%d %d/%d %d/%d\n" % (t1,t1, t2,t2, t3,t3))
else:
for (t1, t2, t3) in mesh.tri:
t1 += 1; t2 += 1; t3 += 1
tmpf.write("f %d %d %d\n" % (t1, t2, t3))
with file(outf_name, 'wb') as outf:
outf.write(tmpf.getvalue())
def main():
import argparse
parser = argparse.ArgumentParser(description="Converts Tilt Brush '.json' exports to .obj.")
parser.add_argument('filename', help="Exported .json files to convert to obj")
parser.add_argument('--cooked', action='store_true', dest='cooked', default=True,
help="(default) Strip geometry of normals, weld verts, and give single-sided triangles corresponding backfaces.")
parser.add_argument('--color', action='store_true',
help="Add vertex color to 'v' and 'vc' elements. WARNING: May produce incompatible .obj files.")
parser.add_argument('--raw', action='store_false', dest='cooked',
help="Emit geometry just as it comes from Tilt Brush. Depending on the brush, triangles may not have backfaces, adjacent triangles will mostly not share verts.")
parser.add_argument('-o', dest='output_filename', metavar='FILE',
help="Name of output file; defaults to <filename>.obj")
args = parser.parse_args()
if args.output_filename is None:
args.output_filename = os.path.splitext(args.filename)[0] + '.obj'
meshes = list(iter_meshes(args.filename))
for mesh in meshes:
mesh.remove_degenerate()
if args.cooked:
for mesh in meshes:
if mesh.brush_guid in SINGLE_SIDED_FLAT_BRUSH:
mesh.add_backfaces()
mesh = TiltBrushMesh.from_meshes(meshes)
mesh.collapse_verts(ignore=('uv0', 'uv1', 'c', 't'))
mesh.remove_degenerate()
else:
mesh = TiltBrushMesh.from_meshes(meshes)
write_obj(mesh, args.output_filename, args.color)
print "Wrote", args.output_filename
if __name__ == '__main__':
main()
| 4,888 | 35.214815 | 183 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/unpack_tilt.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
try:
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))), 'Python'))
import tiltbrush.unpack
except ImportError:
print >>sys.stderr, "Please put the 'Python' directory in your PYTHONPATH"
sys.exit(1)
def convert(in_name, compress):
if os.path.isdir(in_name):
tiltbrush.unpack.convert_dir_to_zip(in_name, compress)
print "Converted %s to zip format" % in_name
elif os.path.isfile(in_name):
tiltbrush.unpack.convert_zip_to_dir(in_name)
print "Converted %s to directory format" % in_name
else:
raise tiltbrush.unpack.ConversionError("%s doesn't exist" % in_name)
def main():
import argparse
parser = argparse.ArgumentParser(description="Converts .tilt files from packed format (zip) to unpacked format (directory), optionally applying compression.")
parser.add_argument('files', type=str, nargs='+',
help="Files to convert to the other format")
parser.add_argument('--compress', action='store_true',
help="Use compression (default: off)")
args = parser.parse_args()
for arg in args.files:
try:
convert(arg, args.compress)
except tiltbrush.unpack.ConversionError as e:
print "ERROR: %s" % e
if __name__ == '__main__':
main()
| 1,925 | 33.392857 | 160 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/bin/geometry_json_to_fbx.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Historical sample code that converts Tilt Brush '.json' exports to .fbx.
# This script is superseded by Tilt Brush native .fbx exports.
#
# There are command-line options to fine-tune the fbx creation.
# The defaults are:
#
# - Weld vertices
# - Join strokes using the same brush into a single mesh
# - Don't create backface geometry for single-sided brushes"""
import argparse
from itertools import groupby
import os
import platform
import sys
try:
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))), 'Python'))
from tiltbrush.export import iter_meshes, TiltBrushMesh, SINGLE_SIDED_FLAT_BRUSH
except ImportError:
print >>sys.stderr, "Please put the 'Python' directory in your PYTHONPATH"
sys.exit(1)
arch = 'x64' if '64' in platform.architecture()[0] else 'x86'
dir = 'c:/Program Files/Autodesk/FBX/FBX Python SDK'
versions = sorted(os.listdir(dir), reverse=True)
found = False
for version in versions:
path = '{0}/{1}/lib/Python27_{2}'.format(dir, version, arch)
if os.path.exists(path):
sys.path.append(path)
try:
from fbx import *
found = True
except ImportError:
print >>sys.stderr, "Failed trying to import fbx from {0}".format(path)
sys.exit(1)
break
if not found:
print >>sys.stderr, "Please install the Python FBX SDK: http://www.autodesk.com/products/fbx/"
# ----------------------------------------------------------------------
# Utils
# ----------------------------------------------------------------------
def as_fvec4(tup, scale=1):
if len(tup) == 3:
return FbxVector4(tup[0]*scale, tup[1]*scale, tup[2]*scale)
else:
return FbxVector4(tup[0]*scale, tup[1]*scale, tup[2]*scale, tup[3]*scale)
def as_fvec2(tup):
return FbxVector2(tup[0], tup[1])
def as_fcolor(abgr_int, memo={}):
try:
return memo[abgr_int]
except KeyError:
a = (abgr_int >> 24) & 0xff
b = (abgr_int >> 16) & 0xff
g = (abgr_int >> 8) & 0xff
r = (abgr_int ) & 0xff
scale = 1.0 / 255.0
memo[abgr_int] = val = FbxColor(r * scale, g * scale, b * scale, a * scale)
return val
# ----------------------------------------------------------------------
# Export
# ----------------------------------------------------------------------
def write_fbx_meshes(meshes, outf_name):
"""Emit a TiltBrushMesh as a .fbx file"""
import FbxCommon
(sdk, scene) = FbxCommon.InitializeSdkObjects()
docInfo = FbxDocumentInfo.Create(sdk, 'DocInfo')
docInfo.Original_ApplicationVendor.Set('Google')
docInfo.Original_ApplicationName.Set('Tilt Brush')
docInfo.LastSaved_ApplicationVendor.Set('Google')
docInfo.LastSaved_ApplicationName.Set('Tilt Brush')
scene.SetDocumentInfo(docInfo)
for mesh in meshes:
add_mesh_to_scene(sdk, scene, mesh)
FbxCommon.SaveScene(sdk, scene, outf_name)
def create_fbx_layer(fbx_mesh, data, converter_fn, layer_class,
allow_index=False, allow_allsame=False):
"""Returns an instance of layer_class populated with the passed data,
or None if the passed data is empty/nonexistent.
fbx_mesh FbxMesh
data list of Python data
converter_fn Function converting data -> FBX data
layer_class FbxLayerElementXxx class
allow_index Allow the use of eIndexToDirect mode. Useful if the data
has many repeated values. Unity3D doesn't seem to like it
when this is used for vertex colors, though.
allow_allsame Allow the use of eAllSame mode. Useful if the data might
be entirely identical. This allows passing an empty data list,
in which case FBX will use a default value."""
# No elements, or all missing data.
if not allow_allsame and (len(data) == 0 or data[0] == None):
return None
layer_elt = layer_class.Create(fbx_mesh, "")
direct = layer_elt.GetDirectArray()
index = layer_elt.GetIndexArray()
if allow_allsame or allow_index:
unique_data = sorted(set(data))
# Something about this eIndexToDirect code isn't working for vertex colors and UVs.
# Do it the long-winded way for now, I guess.
allow_index = False
if allow_allsame and len(unique_data) <= 1:
layer_elt.SetMappingMode(FbxLayerElement.eAllSame)
layer_elt.SetReferenceMode(FbxLayerElement.eDirect)
if len(unique_data) == 1:
direct.Add(converter_fn(unique_data[0]))
elif allow_index and len(unique_data) <= len(data) * .7:
layer_elt.SetMappingMode(FbxLayerElement.eByControlPoint)
layer_elt.SetReferenceMode(FbxLayerElement.eIndexToDirect)
for datum in unique_data:
direct.Add(converter_fn(datum))
for i in range(len(data)-len(unique_data)-5):
direct.Add(converter_fn(unique_data[0]))
data_to_index = dict((d, i) for (i, d) in enumerate(unique_data))
for i,datum in enumerate(data):
#index.Add(data_to_index[datum])
index.Add(data_to_index[datum])
else:
layer_elt.SetMappingMode(FbxLayerElement.eByControlPoint)
layer_elt.SetReferenceMode(FbxLayerElement.eDirect)
for datum in data:
direct.Add(converter_fn(datum))
return layer_elt
def add_mesh_to_scene(sdk, scene, mesh):
"""Emit a TiltBrushMesh as a .fbx file"""
name = mesh.name or 'Tilt Brush'
# Todo: pass scene instead?
fbx_mesh = FbxMesh.Create(sdk, name)
fbx_mesh.CreateLayer()
layer0 = fbx_mesh.GetLayer(0)
# Verts
fbx_mesh.InitControlPoints(len(mesh.v))
for i, v in enumerate(mesh.v):
fbx_mesh.SetControlPointAt(as_fvec4(v, scale=100), i)
layer_elt = create_fbx_layer(
fbx_mesh, mesh.n, as_fvec4, FbxLayerElementNormal)
if layer_elt is not None:
layer0.SetNormals(layer_elt)
layer_elt = create_fbx_layer(
fbx_mesh, mesh.c, as_fcolor, FbxLayerElementVertexColor,
allow_index = True,
allow_allsame = True)
if layer_elt is not None:
layer0.SetVertexColors(layer_elt)
# Tilt Brush may have 3- or 4-element UV channels, and may have multiple
# UV channels. This only handles the standard case of 2-component UVs
layer_elt = create_fbx_layer(
fbx_mesh, mesh.uv0, as_fvec2, FbxLayerElementUV,
allow_index = True)
if layer_elt is not None:
layer0.SetUVs(layer_elt, FbxLayerElement.eTextureDiffuse)
pass
layer_elt = create_fbx_layer(
fbx_mesh, mesh.t, as_fvec4, FbxLayerElementTangent,
allow_index = True)
if layer_elt is not None:
layer0.SetTangents(layer_elt)
# Unity's FBX import requires Binormals to be present in order to import the
# tangents but doesn't actually use them, so we just output some dummy data.
layer_elt = create_fbx_layer(
fbx_mesh, ((0, 0, 0, 0),), as_fvec4, FbxLayerElementBinormal,
allow_allsame = True)
if layer_elt is not None:
layer0.SetBinormals(layer_elt)
layer_elt = create_fbx_layer(
fbx_mesh, (), lambda x: x, FbxLayerElementMaterial, allow_allsame = True)
if layer_elt is not None:
layer0.SetMaterials(layer_elt)
# Polygons
for triplet in mesh.tri:
fbx_mesh.BeginPolygon(-1, -1, False)
fbx_mesh.AddPolygon(triplet[0])
fbx_mesh.AddPolygon(triplet[1])
fbx_mesh.AddPolygon(triplet[2])
fbx_mesh.EndPolygon()
material = FbxSurfaceLambert.Create(sdk, mesh.brush_name)
# Node tree
root = scene.GetRootNode()
node = FbxNode.Create(sdk, name)
node.SetNodeAttribute(fbx_mesh)
node.AddMaterial(material)
node.SetShadingMode(FbxNode.eTextureShading) # Hmm
root.AddChild(node)
# ----------------------------------------------------------------------
# main
# ----------------------------------------------------------------------
def main():
import argparse
parser = argparse.ArgumentParser(description="""Converts Tilt Brush '.json' exports to .fbx.""")
parser.add_argument('filename', help="Exported .json files to convert to fbx")
grp = parser.add_argument_group(description="Merging and optimization")
grp.add_argument('--merge-stroke', action='store_true',
help="Merge all strokes into a single mesh")
grp.add_argument('--merge-brush', action='store_true',
help="(default) Merge strokes that use the same brush into a single mesh")
grp.add_argument('--no-merge-brush', action='store_false', dest='merge_brush',
help="Turn off --merge-brush")
grp.add_argument('--weld-verts', action='store_true',
help="(default) Weld vertices")
grp.add_argument('--no-weld-verts', action='store_false', dest='weld_verts',
help="Turn off --weld-verts")
parser.add_argument('--add-backface', action='store_true',
help="Add backfaces to strokes that don't have them")
parser.add_argument('-o', dest='output_filename', metavar='FILE',
help="Name of output file; defaults to <filename>.fbx")
parser.set_defaults(merge_brush=True, weld_verts=True)
args = parser.parse_args()
if args.output_filename is None:
args.output_filename = os.path.splitext(args.filename)[0] + '.fbx'
meshes = list(iter_meshes(args.filename))
for mesh in meshes:
mesh.remove_degenerate()
if args.add_backface and mesh.brush_guid in SINGLE_SIDED_FLAT_BRUSH:
mesh.add_backface()
if args.merge_stroke:
meshes = [ TiltBrushMesh.from_meshes(meshes, name='strokes') ]
elif args.merge_brush:
def by_guid(m): return (m.brush_guid, m.brush_name)
meshes = [ TiltBrushMesh.from_meshes(list(group), name='All %s' % (key[1], ))
for (key, group) in groupby(sorted(meshes, key=by_guid), key=by_guid) ]
if args.weld_verts:
for mesh in meshes:
# We don't write out tangents, so it's safe to ignore them when welding
mesh.collapse_verts(ignore=('t',))
mesh.remove_degenerate()
write_fbx_meshes(meshes, args.output_filename)
print "Wrote", args.output_filename
if __name__ == '__main__':
main()
| 10,455 | 34.686007 | 98 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/tests/test_tilt.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import shutil
import unittest
from tiltbrush.tilt import Tilt
@contextlib.contextmanager
def copy_of_tilt(tilt_file='data/sketch1.tilt', as_filename=False):
"""Returns a mutate-able copy of tilt_file, and removes it when done."""
base = os.path.abspath(os.path.dirname(__file__))
full_filename = os.path.join(base, tilt_file)
tmp_filename = os.path.splitext(full_filename)[0] + '_tmp.tilt'
shutil.copy(src=full_filename, dst=tmp_filename)
try:
if as_filename:
yield tmp_filename
else:
yield Tilt(tmp_filename)
finally:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
def as_float32(f):
import struct
return struct.unpack('f', struct.pack('f', f))[0]
class TestTiltMutations(unittest.TestCase):
def test_as_directory(self):
# Test Tilt.as_directory
with copy_of_tilt(as_filename=True) as tilt_filename:
with Tilt.as_directory(tilt_filename):
self.assertTrue(os.path.isdir(tilt_filename))
self.assertTrue(os.path.exists(os.path.join(tilt_filename, 'metadata.json')))
def test_can_mutate_metadata(self):
import uuid
random_guid = str(uuid.uuid4())
with copy_of_tilt() as tilt:
with tilt.mutable_metadata() as dct:
# Check that they are different references
dct['EnvironmentPreset'] = random_guid
self.assertNotEqual(
tilt.metadata['EnvironmentPreset'], dct['EnvironmentPreset'])
# Check that it's copied back on exit from mutable_metadata
self.assertEqual(tilt.metadata['EnvironmentPreset'], random_guid)
# Check that the mutations persist
tilt2 = Tilt(tilt.filename)
self.assertEqual(tilt2.metadata['EnvironmentPreset'], random_guid)
def test_can_del_sketch(self):
# Test that "del tilt.sketch" forces it to re-load from disk
with copy_of_tilt() as tilt:
stroke = tilt.sketch.strokes[0]
del tilt.sketch
stroke2 = tilt.sketch.strokes[0]
assert stroke is not stroke2
def test_mutate_control_point(self):
# Test that control point mutations are saved
with copy_of_tilt() as tilt:
stroke = tilt.sketch.strokes[0]
new_y = as_float32(stroke.controlpoints[0].position[1] + 3)
stroke.controlpoints[0].position[1] = new_y
tilt.write_sketch()
del tilt.sketch
self.assertEqual(tilt.sketch.strokes[0].controlpoints[0].position[1], new_y)
def test_stroke_extension(self):
# Test that control point extensions can be added and removed
with copy_of_tilt() as tilt:
stroke = tilt.sketch.strokes[0]
# This sketch was made before stroke scale was a thing
self.assertEqual(stroke.flags, 0)
self.assertRaises(AttributeError, (lambda: stroke.scale))
# Test adding some extension data
stroke.scale = 1.25
self.assertEqual(stroke.scale, 1.25)
# Test removing extension data
del stroke.flags
self.assertRaises(AttributeError (lambda: stroke.flags))
# Test that the changes survive a save+load
tilt.write_sketch()
stroke2 = Tilt(tilt.filename).sketch.strokes[0]
self.assertEqual(stroke2.scale, 1.25)
self.assertRaises(AttributeError (lambda: stroke2.flags))
if __name__ == '__main__':
unittest.main()
| 3,871 | 34.851852 | 85 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/Python/tiltbrush/export.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for parsing Tilt Brush's json-based geometry export format.
Typically you should prefer the .fbx exported straight out of Tilt Brush.
See:
iter_strokes()
class TiltBrushMesh"""
import base64
from itertools import izip_longest
import json
import struct
from uuid import UUID
SINGLE_SIDED_FLAT_BRUSH = set([
UUID("cb92b597-94ca-4255-b017-0e3f42f12f9e"), # Fire
UUID("cf019139-d41c-4eb0-a1d0-5cf54b0a42f3"), # Highlighter
UUID("e8ef32b1-baa8-460a-9c2c-9cf8506794f5"), # Hypercolor
UUID("2241cd32-8ba2-48a5-9ee7-2caef7e9ed62"), # Light
UUID("c33714d1-b2f9-412e-bd50-1884c9d46336"), # Plasma
UUID("ad1ad437-76e2-450d-a23a-e17f8310b960"), # Rainbow
UUID("44bb800a-fbc3-4592-8426-94ecb05ddec3"), # Streamers
UUID("d229d335-c334-495a-a801-660ac8a87360"), # Velvet Ink
])
def _grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def iter_meshes(filename):
"""Given a Tilt Brush .json export, yields TiltBrushMesh instances."""
obj = json.load(file(filename, 'rb'))
lookup = obj['brushes']
for dct in lookup:
dct['guid'] = UUID(dct['guid'])
for json_stroke in obj['strokes']:
yield TiltBrushMesh._from_json(json_stroke, lookup)
class TiltBrushMesh(object):
"""Geometry for a single stroke/mesh.
Public attributes:
.brush_name Roughly analagous to a material
.brush_guid
.v list of positions (3-tuples)
.n list of normals (3-tuples, or None if missing)
.uv0 list of uv0 (2-, 3-, 4-tuples, or None if missing)
.uv1 see uv0
.c list of colors, as a uint32. abgr little-endian, rgba big-endian
.t list of tangents (4-tuples, or None if missing)
.tri list of triangles (3-tuples of ints)
"""
VERTEX_ATTRIBUTES = [
# Attribute name, type code
('v', 'f', None),
('n', 'f', 3),
('uv0','f', None),
('uv1','f', None),
('c', 'I', 1),
('t', 'f', 4),
]
@classmethod
def _from_json(cls, obj, brush_lookup):
"""Factory method: For use by iter_meshes."""
empty = None
stroke = TiltBrushMesh()
brush = brush_lookup[obj['brush']]
stroke.brush_name = brush['name']
stroke.brush_guid = UUID(str(brush['guid']))
# Vertex attributes
# If stroke is non-empty, 'v' is always present, and always comes first
num_verts = 0
for attr, typechar, expected_stride in cls.VERTEX_ATTRIBUTES:
if attr in obj:
data_bytes = base64.b64decode(obj[attr])
if len(data_bytes) == 0:
data_grouped = []
else:
fmt = "<%d%c" % (len(data_bytes) / 4, typechar)
data_words = struct.unpack(fmt, data_bytes)
if attr == 'v':
num_verts = len(data_words) / 3
assert (len(data_words) % num_verts) == 0
stride_words = len(data_words) / num_verts
assert (expected_stride is None) or (stride_words == expected_stride)
if stride_words > 1:
data_grouped = list(_grouper(stride_words, data_words))
else:
data_grouped = list(data_words)
setattr(stroke, attr, data_grouped)
else:
# For convenience, fill in with an empty array
if empty is None:
empty = [None,] * num_verts
setattr(stroke, attr, empty)
# Triangle indices. 'tri' might not exist, if empty
if 'tri' in obj:
data_bytes = base64.b64decode(obj['tri'])
data_words = struct.unpack("<%dI" % (len(data_bytes) / 4), data_bytes)
assert len(data_words) % 3 == 0
stroke.tri = list(_grouper(3, data_words))
else:
stroke.tri = []
return stroke
@classmethod
def from_meshes(cls, strokes, name=None):
"""Collapses multiple TiltBrushMesh instances into one.
Pass an iterable of at least 1 stroke.
Uses the brush from the first stroke."""
stroke_list = list(strokes)
dest = TiltBrushMesh()
dest.name = name
dest.brush_name = stroke_list[0].brush_name
dest.brush_guid = stroke_list[0].brush_guid
dest.v = []
dest.n = []
dest.uv0 = []
dest.uv1 = []
dest.c = []
dest.t = []
dest.tri = []
for stroke in stroke_list:
offset = len(dest.v)
dest.v.extend(stroke.v)
dest.n.extend(stroke.n)
dest.uv0.extend(stroke.uv0)
dest.uv1.extend(stroke.uv1)
dest.c.extend(stroke.c)
dest.t.extend(stroke.t)
dest.tri.extend([ (t[0] + offset, t[1] + offset, t[2] + offset)
for t in stroke.tri ])
return dest
def __init__(self):
self.name = None
self.brush_name = self.brush_guid = None
self.v = self.n = self.uv0 = self.uv1 = self.c = self.t = None
self.tri = None
def collapse_verts(self, ignore=None):
"""Collapse verts with identical data.
Put triangle indices into a canonical order, with lowest index first.
*ignore* is a list of attribute names to ignore when comparing."""
# Convert from SOA to AOS
compare = set(('n', 'uv0', 'uv1', 'c', 't'))
if ignore is not None:
compare -= set(ignore)
compare = sorted(compare)
compare.insert(0, 'v')
struct_of_arrays = []
for attr_name in sorted(compare):
struct_of_arrays.append(getattr(self, attr_name))
vert_structs = zip(*struct_of_arrays)
vert_struct_to_new_index = {}
old_index_to_new_index = []
new_index_to_old_index = []
for i_old, v in enumerate(vert_structs):
i_next = len(vert_struct_to_new_index)
i_new = vert_struct_to_new_index.setdefault(v, i_next)
if i_next == i_new:
# New vertex seen
new_index_to_old_index.append(i_old)
old_index_to_new_index.append(i_new)
def permute(old_lst, new_to_old=new_index_to_old_index):
# Returns content of old_lst in a new order
return [old_lst[i_old] for (i_new, i_old) in enumerate(new_to_old)]
def remap_tri((t0, t1, t2), old_to_new=old_index_to_new_index):
# Remaps triangle indices; remapped triangle indices will be
# rotated so that the lowest vert index comes first.
t0 = old_to_new[t0]
t1 = old_to_new[t1]
t2 = old_to_new[t2]
if t0 <= t1 and t0 <= t2:
return (t0, t1, t2)
elif t1 <= t2:
return (t1, t2, t0)
else:
return (t2, t0, t1)
self.v = permute(self.v)
self.n = permute(self.n)
self.uv0 = permute(self.uv0)
self.uv1 = permute(self.uv1)
self.c = permute(self.c)
self.t = permute(self.t)
self.tri = map(remap_tri, self.tri)
def add_backfaces(self):
"""Double the number of triangles by adding an oppositely-wound
triangle for every existing triangle."""
num_verts = len(self.v)
def flip_vec3(val):
if val is None: return None
return (-val[0], -val[1], -val[2])
# Duplicate vert data, flipping normals
# This is safe because the values are tuples (and immutable)
self.v *= 2
self.n += map(flip_vec3, self.n)
self.uv0 *= 2
self.uv1 *= 2
self.c *= 2
self.t *= 2
more_tris = []
for tri in self.tri:
more_tris.append((num_verts + tri[0],
num_verts + tri[2],
num_verts + tri[1]))
self.tri += more_tris
def remove_backfaces(self):
"""Remove backfaces, defined as any triangle that follows
an oppositely-wound triangle using the same indices.
Assumes triangle indices are in canonical order."""
# (also removes duplicates, if any exist)
seen = set()
new_tri = []
for tri in self.tri:
# Since triangle indices are in a canonical order, the reverse
# winding will always be t[0], t[2], t[1]
if tri in seen or (tri[0], tri[2], tri[1]) in seen:
pass
else:
seen.add(tri)
new_tri.append(tri)
self.tri = new_tri
def remove_degenerate(self):
"""Removes degenerate triangles."""
def is_degenerate((t0, t1, t2)):
return t0==t1 or t1==t2 or t2==t0
self.tri = [t for t in self.tri if not is_degenerate(t)]
def add_backfaces_if_necessary(self):
"""Try to detect geometry that is missing backface geometry"""
def recenter(self):
a0 = sum(v[0] for v in self.v) / len(self.v)
a1 = sum(v[1] for v in self.v) / len(self.v)
a2 = sum(v[2] for v in self.v) / len(self.v)
for i,v in enumerate(self.v):
self.v[i] = (v[0]-a0, v[1]-a1, v[2]-a2)
def dump(self, verbose=False):
print " Brush: %s, %d verts, %d tris" % (self.brush_guid, len(self.v), len(self.tri)/3)
if verbose:
print ' v'
for v in self.v:
print ' ',v
print ' t'
for t in self.tri:
print ' ',t
| 9,329 | 31.968198 | 92 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/Python/tiltbrush/tilt.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reads and writes .tilt files. The main export is 'class Tilt'."""
import os
import math
import json
import uuid
import struct
import contextlib
from collections import defaultdict
from cStringIO import StringIO
__all__ = ('Tilt', 'Sketch', 'Stroke', 'ControlPoint',
'BadTilt', 'BadMetadata', 'MissingKey')
# Format characters are as for struct.pack/unpack, with the addition of
# '@' which is a 4-byte-length-prefixed data blob.
STROKE_EXTENSION_BITS = {
0x1: ('flags', 'I'),
0x2: ('scale', 'f'),
0x4: ('group', 'I'),
0x8: ('seed', 'I'),
'unknown': lambda bit: ('stroke_ext_%d' % math.log(bit, 2),
'I' if (bit & 0xffff) else '@')
}
STROKE_EXTENSION_BY_NAME = dict(
(info[0], (bit, info[1]))
for (bit, info) in STROKE_EXTENSION_BITS.iteritems()
if bit != 'unknown'
)
CONTROLPOINT_EXTENSION_BITS = {
0x1: ('pressure', 'f'),
0x2: ('timestamp', 'I'),
'unknown': lambda bit: ('cp_ext_%d' % math.log(bit, 2), 'I')
}
#
# Internal utils
#
class memoized_property(object):
"""Modeled after @property, but runs the getter exactly once"""
def __init__(self, fget):
self.fget = fget
self.name = fget.__name__
def __get__(self, instance, owner):
if instance is None:
return None
value = self.fget(instance)
# Since this isn't a data descriptor (no __set__ method),
# instance attributes take precedence over the descriptor.
setattr(instance, self.name, value)
return value
class binfile(object):
# Helper for parsing
def __init__(self, inf):
self.inf = inf
def read(self, n):
return self.inf.read(n)
def write(self, data):
return self.inf.write(data)
def read_length_prefixed(self):
n, = self.unpack("<I")
return self.inf.read(n)
def write_length_prefixed(self, data):
self.pack("<I", len(data))
self.inf.write(data)
def unpack(self, fmt):
n = struct.calcsize(fmt)
data = self.inf.read(n)
return struct.unpack(fmt, data)
def pack(self, fmt, *args):
data = struct.pack(fmt, *args)
return self.inf.write(data)
class BadTilt(Exception): pass
class BadMetadata(BadTilt): pass
class MissingKey(BadMetadata): pass
def validate_metadata(dct):
def lookup((path, parent), key):
child_path = '%s.%s' % (path, key)
if key not in parent:
raise MissingKey('Missing %s' % child_path)
return (child_path, parent[key])
def check_string((path, val)):
if not isinstance(val, (str, unicode)):
raise BadMetadata('Not string: %s' % path)
def check_float((path, val)):
if not isinstance(val, (float, int, long)):
raise BadMetadata('Not number: %s' % path)
def check_array((path, val), desired_len=None, typecheck=None):
if not isinstance(val, (list, tuple)):
raise BadMetadata('Not array: %s' % path)
if desired_len and len(val) != desired_len:
raise BadMetadata('Not length %d: %s' % (desired_len, path))
if typecheck is not None:
for i, child_val in enumerate(val):
child_path = '%s[%s]' % (path, i)
typecheck((child_path, child_val))
def check_guid((path, val)):
try:
uuid.UUID(val)
except Exception as e:
raise BadMetadata('Not UUID: %s %s' % (path, e))
def check_xform(pathval):
check_array(lookup(pathval, 'position'), 3, check_float)
check_array(lookup(pathval, 'orientation'), 4, check_float)
root = ('metadata', dct)
try: check_xform(lookup(root, 'ThumbnailCameraTransformInRoomSpace'))
except MissingKey: pass
try: check_xform(lookup(root, 'SceneTransformInRoomSpace'))
except MissingKey: pass
try: check_xform(lookup(root, 'CanvasTransformInSceneSpace'))
except MissingKey: pass
check_array(lookup(root, 'BrushIndex'), None, check_guid)
check_guid(lookup(root, 'EnvironmentPreset'))
if 'Authors' in dct:
check_array(lookup(root, 'Authors'), None, check_string)
#
# External
#
class Tilt(object):
"""Class representing a .tilt file. Attributes:
.sketch A tilt.Sketch instance. NOTE: this is read lazily.
.metadata A dictionary of data.
To modify the sketch, see XXX.
To modify the metadata, see mutable_metadata()."""
@staticmethod
@contextlib.contextmanager
def as_directory(tilt_file):
"""Temporarily convert *tilt_file* to directory format."""
if os.path.isdir(tilt_file):
yield Tilt(tilt_file)
else:
import tiltbrush.unpack as unpack
compressed = unpack.convert_zip_to_dir(tilt_file)
try:
yield Tilt(tilt_file)
finally:
unpack.convert_dir_to_zip(tilt_file, compressed)
@staticmethod
def iter(directory):
for r,ds,fs in os.walk(directory):
for f in ds+fs:
if f.endswith('.tilt'):
try:
yield Tilt(os.path.join(r,f))
except BadTilt:
pass
def __init__(self, filename):
self.filename = filename
self._sketch = None # lazily-loaded
with self.subfile_reader('metadata.json') as inf:
self.metadata = json.load(inf)
try:
validate_metadata(self.metadata)
except BadMetadata as e:
print 'WARNING: %s' % e
def write_sketch(self):
if False:
# Recreate BrushIndex. Not tested and not strictly necessary, so not enabled
old_index_to_brush = list(self.metadata['BrushIndex'])
old_brushes = set( old_index_to_brush )
new_brushes = set( old_index_to_brush[s.brush_idx] for s in self.sketch.strokes )
if old_brushes != new_brushes:
new_index_to_brush = sorted(new_brushes)
brush_to_new_index = dict( (b, i) for (i, b) in enumerate(new_index_to_brush) )
old_index_to_new_index = map(brush_to_new_index.get, old_index_to_brush)
for stroke in self.sketch.strokes:
stroke.brush_idx = brush_to_new_index[old_index_to_brush[stroke.brush_idx]]
with self.mutable_metadata() as dct:
dct['BrushIndex'] = new_index_to_brush
self.sketch.write(self)
@contextlib.contextmanager
def subfile_reader(self, subfile):
if os.path.isdir(self.filename):
with file(os.path.join(self.filename, subfile), 'rb') as inf:
yield inf
else:
from zipfile import ZipFile
with ZipFile(self.filename, 'r') as inzip:
with inzip.open(subfile) as inf:
yield inf
@contextlib.contextmanager
def subfile_writer(self, subfile):
# Kind of a large hammer, but it works
if os.path.isdir(self.filename):
with file(os.path.join(self.filename, subfile), 'wb') as outf:
yield outf
else:
with Tilt.as_directory(self.filename) as tilt2:
with tilt2.subfile_writer(subfile) as outf:
yield outf
@contextlib.contextmanager
def mutable_metadata(self):
"""Return a mutable copy of the metadata.
When the context manager exits, the updated metadata will
validated and written to disk."""
import copy
mutable_dct = copy.deepcopy(self.metadata)
yield mutable_dct
validate_metadata(mutable_dct)
if self.metadata != mutable_dct:
# Copy into self.metadata, preserving topmost reference
for k in list(self.metadata.keys()):
del self.metadata[k]
for k,v in mutable_dct.iteritems():
self.metadata[k] = copy.deepcopy(v)
new_contents = json.dumps(
mutable_dct, ensure_ascii=True, allow_nan=False,
indent=2, sort_keys=True, separators=(',', ': '))
with self.subfile_writer('metadata.json') as outf:
outf.write(new_contents)
@memoized_property
def sketch(self):
# Would be slightly more consistent semantics to do the data read
# in __init__, and parse it here; but this is probably good enough.
return Sketch(self)
def _make_ext_reader(ext_bits, ext_mask):
"""Helper for Stroke and ControlPoint parsing.
Returns:
- function reader(file) -> list<extension values>
- function writer(file, values)
- dict mapping extension_name -> extension_index
"""
infos = []
while ext_mask:
bit = ext_mask & ~(ext_mask-1)
ext_mask = ext_mask ^ bit
try: info = ext_bits[bit]
except KeyError: info = ext_bits['unknown'](bit)
infos.append(info)
if len(infos) == 0:
return (lambda f: [], lambda f,vs: None, {})
fmt = '<' + ''.join(info[1] for info in infos)
names = [info[0] for info in infos]
if '@' in fmt:
# struct.unpack isn't general enough to do the job
print fmt, names, infos
fmts = ['<'+info[1] for info in infos]
def reader(f, fmts=fmts):
values = [None] * len(fmts)
for i,fmt in enumerate(fmts):
if fmt == '<@':
nbytes, = struct.unpack('<I', f.read(4))
values[i] = f.read(nbytes)
else:
values[i], = struct.unpack(fmt, f.read(4))
else:
def reader(f, fmt=fmt, nbytes=len(infos)*4):
values = list(struct.unpack(fmt, f.read(nbytes)))
return values
def writer(f, values, fmt=fmt):
return f.write(struct.pack(fmt, *values))
lookup = dict( (name,i) for (i,name) in enumerate(names) )
return reader, writer, lookup
def _make_stroke_ext_reader(ext_mask, memo={}):
try:
ret = memo[ext_mask]
except KeyError:
ret = memo[ext_mask] = _make_ext_reader(STROKE_EXTENSION_BITS, ext_mask)
return ret
def _make_cp_ext_reader(ext_mask, memo={}):
try:
ret = memo[ext_mask]
except KeyError:
ret = memo[ext_mask] = _make_ext_reader(CONTROLPOINT_EXTENSION_BITS, ext_mask)
return ret
class Sketch(object):
"""Stroke data from a .tilt file. Attributes:
.strokes List of tilt.Stroke instances
.filename Filename if loaded from file, but usually None
.header Opaque header data"""
def __init__(self, source):
"""source is either a file name, a file-like instance, or a Tilt instance."""
if isinstance(source, Tilt):
with source.subfile_reader('data.sketch') as inf:
self.filename = None
self._parse(binfile(inf))
elif hasattr(source, 'read'):
self.filename = None
self._parse(binfile(source))
else:
self.filename = source
with file(source, 'rb') as inf:
self._parse(binfile(inf))
def write(self, destination):
"""destination is either a file name, a file-like instance, or a Tilt instance."""
tmpf = StringIO()
self._write(binfile(tmpf))
data = tmpf.getvalue()
if isinstance(destination, Tilt):
with destination.subfile_writer('data.sketch') as outf:
outf.write(data)
elif hasattr(destination, 'write'):
destination.write(data)
else:
with file(destination, 'wb') as outf:
outf.write(data)
def _parse(self, b):
# b is a binfile instance
# mutates self
self.header = list(b.unpack("<3I"))
self.additional_header = b.read_length_prefixed()
(num_strokes, ) = b.unpack("<i")
assert 0 <= num_strokes < 300000, num_strokes
self.strokes = [Stroke.from_file(b) for i in xrange(num_strokes)]
def _write(self, b):
# b is a binfile instance.
b.pack("<3I", *self.header)
b.write_length_prefixed(self.additional_header)
b.pack("<i", len(self.strokes))
for stroke in self.strokes:
stroke._write(b)
class Stroke(object):
"""Data for a single stroke from a .tilt file. Attributes:
.brush_idx Index into Tilt.metadata['BrushIndex']; tells you the brush GUID
.brush_color RGBA color, as 4 floats in the range [0, 1]
.brush_size Brush size, in decimeters, as a float. Multiply by
get_stroke_extension('scale') to get a true size.
.controlpoints List of tilt.ControlPoint instances.
.flags Wrapper around get/set_stroke_extension('flags')
.scale Wrapper around get/set_stroke_extension('scale')
Also see has_stroke_extension(), get_stroke_extension(), set_stroke_extension()."""
# Stroke extension data:
# self.extension is a list of optional per-stroke data.
# self.stroke_ext_lookup maps the name of an extension field
# (eg, "flags") to an index in that list.
#
# Control point extension data:
# ControlPoint.extension is a list of optional per-CP data.
# The layout of this list is guaranteed to be identical to the
# layout of all other control points in the stroke.
#
# Because of this homogeneity, the lookup table is stored in
# stroke.cp_ext_lookup, not in the control point.
@classmethod
def from_file(cls, b):
inst = cls()
inst._parse(b)
return inst
def clone(self):
"""Returns a deep copy of the stroke."""
inst = self.shallow_clone()
inst.controlpoints = map(ControlPoint.clone, inst.controlpoints)
return inst
def __getattr__(self, name):
if name in STROKE_EXTENSION_BY_NAME:
try:
return self.get_stroke_extension(name)
except LookupError:
raise AttributeError("%s (extension attribute)" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if name in STROKE_EXTENSION_BY_NAME:
return self.set_stroke_extension(name, value)
return super(Stroke, self).__setattr__(name, value)
def __delattr__(self, name):
if name in STROKE_EXTENSION_BY_NAME:
try:
self.delete_stroke_extension(name)
return
except LookupError:
raise AttributeError("%s (extension attribute)" % name)
raise AttributeError(name)
def shallow_clone(self):
"""Clone everything but the control points themselves."""
inst = self.__class__()
for attr in ('brush_idx', 'brush_color', 'brush_size', 'stroke_mask', 'cp_mask',
'stroke_ext_writer', 'stroke_ext_lookup', 'cp_ext_writer', 'cp_ext_lookup'):
setattr(inst, attr, getattr(self, attr))
inst.extension = list(self.extension)
inst.controlpoints = list(self.controlpoints)
return inst
def _parse(self, b):
# b is a binfile instance
(self.brush_idx, ) = b.unpack("<i")
self.brush_color = b.unpack("<4f")
(self.brush_size, self.stroke_mask, self.cp_mask) = b.unpack("<fII")
stroke_ext_reader, self.stroke_ext_writer, self.stroke_ext_lookup = \
_make_stroke_ext_reader(self.stroke_mask)
self.extension = stroke_ext_reader(b)
cp_ext_reader, self.cp_ext_writer, self.cp_ext_lookup = \
_make_cp_ext_reader(self.cp_mask)
(num_cp, ) = b.unpack("<i")
assert num_cp < 10000, num_cp
# Read the raw data up front, but parse it lazily
bytes_per_cp = 4 * (3 + 4 + len(self.cp_ext_lookup))
self._controlpoints = (cp_ext_reader, num_cp, b.inf.read(num_cp * bytes_per_cp))
@memoized_property
def controlpoints(self):
(cp_ext_reader, num_cp, raw_data) = self.__dict__.pop('_controlpoints')
b = binfile(StringIO(raw_data))
return [ControlPoint.from_file(b, cp_ext_reader) for i in xrange(num_cp)]
def has_stroke_extension(self, name):
"""Returns true if this stroke has the requested extension data.
The current stroke extensions are:
scale Non-negative float. The size of the player when making this stroke.
Multiply this by the brush size to get a true stroke size."""
return name in self.stroke_ext_lookup
def get_stroke_extension(self, name):
"""Returns the requested extension stroke data.
Raises LookupError if it doesn't exist."""
idx = self.stroke_ext_lookup[name]
return self.extension[idx]
def set_stroke_extension(self, name, value):
"""Sets stroke extension data.
This method can be used to add extension data."""
idx = self.stroke_ext_lookup.get(name, None)
if idx is not None:
self.extension[idx] = value
else:
# Convert from idx->value to name->value
name_to_value = dict( (name, self.extension[idx])
for (name, idx) in self.stroke_ext_lookup.iteritems() )
name_to_value[name] = value
bit, exttype = STROKE_EXTENSION_BY_NAME[name]
self.stroke_mask |= bit
_, self.stroke_ext_writer, self.stroke_ext_lookup = \
_make_stroke_ext_reader(self.stroke_mask)
# Convert back to idx->value
self.extension = [None] * len(self.stroke_ext_lookup)
for (name, idx) in self.stroke_ext_lookup.iteritems():
self.extension[idx] = name_to_value[name]
def delete_stroke_extension(self, name):
"""Remove stroke extension data.
Raises LookupError if it doesn't exist."""
idx = self.stroke_ext_lookup[name]
# Convert from idx->value to name->value
name_to_value = dict( (name, self.extension[idx])
for (name, idx) in self.stroke_ext_lookup.iteritems() )
del name_to_value[name]
bit, exttype = STROKE_EXTENSION_BY_NAME[name]
self.stroke_mask &= ~bit
_, self.stroke_ext_writer, self.stroke_ext_lookup = \
_make_stroke_ext_reader(self.stroke_mask)
# Convert back to idx->value
self.extension = [None] * len(self.stroke_ext_lookup)
for (name, idx) in self.stroke_ext_lookup.iteritems():
self.extension[idx] = name_to_value[name]
def has_cp_extension(self, name):
"""Returns true if control points in this stroke have the requested extension data.
All control points in a stroke are guaranteed to use the same set of extensions.
The current control point extensions are:
timestamp In seconds
pressure From 0 to 1"""
return name in self.cp_ext_lookup
def get_cp_extension(self, cp, name):
"""Returns the requested extension data, or raises LookupError if it doesn't exist."""
idx = self.cp_ext_lookup[name]
return cp.extension[idx]
def set_cp_extension(self, cp, name, value):
"""Sets the requested extension data, or raises LookupError if it doesn't exist."""
idx = self.cp_ext_lookup[name]
cp.extension[idx] = value
def _write(self, b):
b.pack("<i", self.brush_idx)
b.pack("<4f", *self.brush_color)
b.pack("<fII", self.brush_size, self.stroke_mask, self.cp_mask)
self.stroke_ext_writer(b, self.extension)
b.pack("<i", len(self.controlpoints))
for cp in self.controlpoints:
cp._write(b, self.cp_ext_writer)
class ControlPoint(object):
"""Data for a single control point from a stroke. Attributes:
.position Position as 3 floats. Units are decimeters.
.orientation Orientation of controller as a quaternion (x, y, z, w)."""
@classmethod
def from_file(cls, b, cp_ext_reader):
# b is a binfile instance
# reader reads controlpoint extension data from the binfile
inst = cls()
inst.position = list(b.unpack("<3f"))
inst.orientation = list(b.unpack("<4f"))
inst.extension = cp_ext_reader(b)
return inst
def clone(self):
inst = self.__class__()
for attr in ('position', 'orientation', 'extension'):
setattr(inst, attr, list(getattr(self, attr)))
return inst
def _write(self, b, cp_ext_writer):
p = self.position; o = self.orientation
b.pack("<7f", p[0], p[1], p[2], o[0], o[1], o[2], o[3])
cp_ext_writer(b, self.extension)
| 19,616 | 33.235602 | 93 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/Python/tiltbrush/__init__.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 600 | 39.066667 | 74 | py |
tilt-brush-toolkit | tilt-brush-toolkit-master/Python/tiltbrush/unpack.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a .tilt file from packed format to unpacked format,
and vice versa. Applies sanity checks when packing."""
from cStringIO import StringIO
import os
import sys
import struct
import zipfile
__all__ = ('ConversionError', 'convert_zip_to_dir', 'convert_dir_to_zip')
HEADER_FMT = '<4sHH'
HEADER_V1_FMT = HEADER_FMT + 'II'
STANDARD_FILE_ORDER = [
'header.bin',
'thumbnail.png',
'metadata.json',
'main.json',
'data.sketch'
]
STANDARD_FILE_ORDER = dict( (n,i) for (i,n) in enumerate(STANDARD_FILE_ORDER) )
class ConversionError(Exception):
"""An error occurred in the zip <-> directory conversion process"""
pass
def _destroy(file_or_dir):
"""Ensure that *file_or_dir* does not exist in the filesystem,
deleting it if necessary."""
import stat
if os.path.isfile(file_or_dir):
os.chmod(file_or_dir, stat.S_IWRITE)
os.unlink(file_or_dir)
elif os.path.isdir(file_or_dir):
import shutil, stat
for r,ds,fs in os.walk(file_or_dir, topdown=False):
for f in fs:
os.chmod(os.path.join(r, f), stat.S_IWRITE)
os.unlink(os.path.join(r, f))
for d in ds:
os.rmdir(os.path.join(r, d))
os.rmdir(file_or_dir)
if os.path.exists(file_or_dir):
raise Exception("'%s' is not empty" % file_or_dir)
def _read_and_check_header(inf):
"""Returns header bytes, or raise ConversionError if header looks invalid."""
base_bytes = inf.read(struct.calcsize(HEADER_FMT))
try:
(sentinel, headerSize, headerVersion) = struct.unpack(HEADER_FMT, base_bytes)
except struct.error as e:
raise ConversionError("Unexpected header error: %s" % (e,))
if sentinel != 'tilT':
raise ConversionError("Sentinel looks weird: %r" % sentinel)
more = headerSize - len(base_bytes)
if more < 0:
raise ConversionError("Strange header size %s" % headerSize)
more_bytes = inf.read(more)
if len(more_bytes) < more:
raise ConversionError("Bad header size (claim %s, actual %s)" % (more, len(more_bytes)))
zip_sentinel = inf.read(4)
if zip_sentinel != '' and zip_sentinel != 'PK\x03\x04':
raise ConversionError("Don't see zip sentinel after header: %r" % (zip_sentinel,))
if headerVersion != 1:
raise ConversionError("Bogus version %s" % headerVersion)
return base_bytes + more_bytes
def convert_zip_to_dir(in_name):
"""Returns True if compression was used"""
with file(in_name, 'rb') as inf:
header_bytes = _read_and_check_header(inf)
compression = False
out_name = in_name + '._part'
if os.path.exists(out_name):
raise ConversionError("Remove %s first" % out_name)
try:
os.makedirs(out_name)
with zipfile.ZipFile(in_name) as zf:
for member in zf.infolist():
if member.compress_size != member.file_size:
compression = True
zf.extract(member, out_name)
with file(os.path.join(out_name, 'header.bin'), 'wb') as outf:
outf.write(header_bytes)
tmp = in_name + '._prev'
os.rename(in_name, tmp)
os.rename(out_name, in_name)
_destroy(tmp)
return compression
finally:
_destroy(out_name)
def convert_dir_to_zip(in_name, compress):
in_name = os.path.normpath(in_name) # remove trailing '/' if any
out_name = in_name + '.part'
if os.path.exists(out_name):
raise ConversionError("Remove %s first" % out_name)
def by_standard_order(filename):
lfile = filename.lower()
try:
idx = STANDARD_FILE_ORDER[lfile]
except KeyError:
raise ConversionError("Unknown file %s; this is probably not a .tilt" % filename)
return (idx, lfile)
# Make sure metadata.json looks like valid utf-8 (rather than latin-1
# or something else that will cause mojibake)
try:
with file(os.path.join(in_name, 'metadata.json')) as inf:
import json
json.load(inf)
except IOError as e:
raise ConversionError("Cannot validate metadata.json: %s" % e)
except UnicodeDecodeError as e:
raise ConversionError("metadata.json is not valid utf-8: %s" % e)
except ValueError as e:
raise ConversionError("metadata.json is not valid json: %s" % e)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
try:
header_bytes = None
zipf = StringIO()
with zipfile.ZipFile(zipf, 'a', compression, False) as zf:
for (r, ds, fs) in os.walk(in_name):
fs.sort(key=by_standard_order)
for f in fs:
fullf = os.path.join(r, f)
if f == 'header.bin':
header_bytes = file(fullf).read()
continue
arcname = fullf[len(in_name)+1:]
zf.write(fullf, arcname, compression)
if header_bytes is None:
print "Missing header; using default"
header_bytes = struct.pack(HEADER_V1_FMT, 'tilT', struct.calcsize(HEADER_V1_FMT), 1, 0, 0)
if not _read_and_check_header(StringIO(header_bytes)):
raise ConversionError("Invalid header.bin")
with file(out_name, 'wb') as outf:
outf.write(header_bytes)
outf.write(zipf.getvalue())
tmp = in_name + '._prev'
os.rename(in_name, tmp)
os.rename(out_name, in_name)
_destroy(tmp)
finally:
_destroy(out_name)
| 5,727 | 30.300546 | 96 | py |
wikitables | wikitables-master/test.py | import json
import unittest
import mwparserfromhell as mwp
from wikitables import ftag, WikiTable
class TestWikiTables(unittest.TestCase):
def _load(self, source, lang='en'):
raw_tables = mwp.parse(source).filter_tags(matches=ftag('table'))
return WikiTable("Test Table", raw_tables[0], lang)
def _compare(self, table, expected):
self.assertEqual(len(table.rows), len(expected))
self.assertSetEqual(set(table.head), set(expected[0].keys()))
rowsdata = json.loads(table.json())
for expected_value, row_data in zip(expected, rowsdata):
self.assertDictEqual(expected_value, row_data)
def test_simple_table(self):
source = """
{| class="wikitable"
|-
! Column 1 header !! Column 2 header
|-
| Row 1 Column 1 || Row 1 Column 2
|-
| Row 2 Column 1 || Row 2 Column 1
|}
"""
expected = [
{
"Column 1 header": "Row 1 Column 1",
"Column 2 header": "Row 1 Column 2"
},
{
"Column 1 header": "Row 2 Column 1",
"Column 2 header": "Row 2 Column 1"
}
]
table = self._load(source)
self._compare(table, expected)
def test_complex_table(self):
source = """
{| class="wikitable sortable"
! 2018<br>rank
! [[Municipalities of Brazil|City]]
! [[States of Brazil|State]]
! 2018<br>Estimate
! 2010<br>Census
! Change
|-
! 1
|'''''[[São Paulo]]'''''
| {{flag|São Paulo}}
| {{change|invert=on|12176866|10659386}}
|-
! 2
| '''''[[Rio de Janeiro]]'''''
| {{flag|Rio de Janeiro}}
| {{change|invert=on|6688927|5940224}}
|}
"""
expected = [
{
"2018rank": 1,
"City": "São Paulo",
"State": "São Paulo",
"2018Estimate": 12176866,
"2010Census": 10659386,
"Change": 14.236092022561152
},
{
"2018rank": 2,
"City": "Rio de Janeiro",
"State": "Rio de Janeiro",
"2018Estimate": 6688927,
"2010Census": 5940224,
"Change": 12.603952308869172
}
]
table = self._load(source)
self._compare(table, expected)
def test_flag_template(self):
source = """
{| class="wikitable"
! Year
! Name
! Nationality
! Citation
|-
| 1978
| [[Carl Djerassi]]
| {{AUT}} / {{USA}}
| for his work in bioorganic chemistry.
|-
| 1980
| [[Henry Eyring (chemist)|Henry Eyring]]
| {{MEX}} / {{USA}}
| for his development of absolute rate theory.
|}
"""
expected = [
{
"Year": 1978,
"Name": "Carl Djerassi",
"Nationality": "Austria / United States",
"Citation": "for his work in bioorganic chemistry."
},
{
"Year": 1980,
"Name": "Henry Eyring",
"Nationality": "Mexico / United States",
"Citation": "for his development of absolute rate theory."
}
]
table = self._load(source)
self._compare(table, expected)
def test_flag_template_other_language(self):
source = """
{| class="wikitable"
! Year
! Name
! Nationality
! Citation
|-
| 1978
| [[Carl Djerassi]]
| {{AUT}} / {{USA}}
| for his work in bioorganic chemistry.
|-
| 1980
| [[Henry Eyring (chemist)|Henry Eyring]]
| {{MEX}} / {{USA}}
| for his development of absolute rate theory.
|}
"""
expected = [
{
"Year": 1978,
"Name": "Carl Djerassi",
"Nationality": "Österreich / Vereinigte Staaten",
"Citation": "for his work in bioorganic chemistry."
},
{
"Year": 1980,
"Name": "Henry Eyring",
"Nationality": "Mexiko / Vereinigte Staaten",
"Citation": "for his development of absolute rate theory."
}
]
table = self._load(source, 'de')
self._compare(table, expected)
def test_empty_fields(self):
source = """
{| class="wikitable sortable" border="1" style="font-size:85%;"
! Archi-<br>tecture
! Bits
! Version
! Intro-<br>duced
! Max #<br>[[operand]]s
! Type
! Design <!-- Design Strategy/Philosophy -->
! [[Processor register|Registers]]<br>(excluding FP/vector)
! Instruction encoding
! [[Branch (computer science)|Branch]] evaluation
! [[Endianness|Endian-<br>ness]]
! Extensions
! Open
! Royalty<br>free
|-
| [[MOS Technology 6502|6502]]
| 8
|
| 1975
| 1
| Register Memory
| CISC
| 3
| Variable <small>(8- to 32-bit)</small>
| Condition register
| Little
|
|
|
|-
| 68000 / [[Motorola 68000 series|680x0]]
| 32
|
| 1979
| 2
| Register Memory
| [[Complex instruction set computer|CISC]]
| 8 data and 8 address
| Variable
| Condition register
| Big
|
|
|
|}
"""
expected = [
{
"Archi-tecture": 6502,
"Bits": 8,
"Branch evaluation": "Condition register",
"Design": "CISC",
"Endian-ness": "Little",
"Extensions": "",
"Instruction encoding": "Variable (8- to 32-bit)",
"Intro-duced": 1975,
"Max #operands": 1,
"Open": "",
"Registers(excluding FP/vector)": 3,
"Royaltyfree": "",
"Type": "Register Memory",
"Version": ""
},
{
"Archi-tecture": "68000 / 680x0",
"Bits": 32,
"Branch evaluation": "Condition register",
"Design": "CISC",
"Endian-ness": "Big",
"Extensions": "",
"Instruction encoding": "Variable",
"Intro-duced": 1979,
"Max #operands": 2,
"Open": "",
"Registers(excluding FP/vector)": "8 data and 8 address",
"Royaltyfree": "",
"Type": "Register Memory",
"Version": ""
}
]
table = self._load(source)
self._compare(table, expected)
if __name__ == '__main__':
unittest.main()
| 6,273 | 23.412451 | 74 | py |
wikitables | wikitables-master/setup.py | from setuptools import setup
exec(open('wikitables/version.py').read())
setup(name='wikitables',
version=version,
packages=['wikitables'],
description='Import tables from any Wikipedia article',
author='Bradley Cicenas',
author_email='[email protected]',
url='https://github.com/bcicen/wikitables',
install_requires=[
'mwparserfromhell>=0.4.3',
'requests>=2.9.1',
'pycountry>=20.7.3'
],
license='http://opensource.org/licenses/MIT',
classifiers=(
'Natural Language :: English',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
),
keywords='wikipedia data cli commandline',
entry_points={'console_scripts': ['wikitables = wikitables.cli:main']}
)
| 957 | 32.034483 | 76 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.