prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
'''
25 2D-Gaussian Simulation
Compare different Sampling methods and DRE methods
1. DRE method
1.1. By NN
DR models: MLP
Loss functions: uLISF, DSKL, BARR, SP (ours)
lambda for SP is selected by maximizing average denstity ratio on validation set
1.2. GAN property
2. Data Generation:
(1). Target Distribution p_r: A mixture of 25 2-D Gaussian
25 means which are combinations of any two points in [-2, -1, 0, 1, 2]
Each Gaussian has a covariance matrix sigma*diag(2), sigma=0.05
(2). Proposal Distribution p_g: GAN
NTRAIN = 50000, NVALID=50000, NTEST=10000
3. Sampling from GAN by None/RS/MH/SIR
4. Evaluation on a held-out test set
Prop. of good samples (within 4 sd) and Prop. of recovered modes
'''
wd = './Simulation'
import os
os.chdir(wd)
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch import autograd
from torchvision.utils import save_image
from tqdm import tqdm
import gc
from itertools import groupby
import argparse
from sklearn.linear_model import LogisticRegression
import multiprocessing
from scipy.stats import multivariate_normal
from scipy.stats import ks_2samp
import pickle
import csv
from sklearn.model_selection import GridSearchCV
from sklearn import mixture
from utils import *
from models import *
from Train_DRE import train_DRE_GAN
from Train_GAN import *
#######################################################################################
''' Settings '''
#######################################################################################
parser = argparse.ArgumentParser(description='Simulation')
'''Overall Settings'''
parser.add_argument('--NSIM', type=int, default=1,
help = "How many times does this experiment need to be repeated?")
parser.add_argument('--DIM', type=int, default=2,
help = "Dimension of the Euclidean space of our interest")
parser.add_argument('--n_comp_tar', type=int, default=25,
help = "Number of mixture components in the target distribution")
parser.add_argument('--DRE', type=str, default='DRE_SP',
choices=['None', 'GT', 'DRE_uLSIF', 'DRE_DSKL', 'DRE_BARR', 'DRE_SP', 'disc', 'disc_MHcal', 'disc_KeepTrain'], #GT is ground truth
help='Density ratio estimation method; None means randomly sample from the proposal distribution or the trained GAN')
parser.add_argument('--Sampling', type=str, default='RS',
help='Sampling method; Candidiate: None, RS, MH, SIR') #RS: rejection sampling, MH: Metropolis-Hastings; SIR: Sampling-Importance Resampling
parser.add_argument('--seed', type=int, default=2019, metavar='S',
help='random seed (default: 2019)')
parser.add_argument('--show_reference', action='store_true', default=False,
help='Assign 1 as density ratios to all samples and compute errors')
parser.add_argument('--show_visualization', action='store_true', default=False,
help='Plot fake samples in 2D coordinate')
''' Data Generation '''
parser.add_argument('--NTRAIN', type=int, default=50000)
parser.add_argument('--NTEST', type=int, default=10000)
''' GAN settings '''
parser.add_argument('--epoch_gan', type=int, default=50) #default 50
parser.add_argument('--lr_gan', type=float, default=1e-3,
help='learning rate')
parser.add_argument('--dim_gan', type=int, default=2,
help='Latent dimension of GAN')
parser.add_argument('--batch_size_gan', type=int, default=512, metavar='N',
help='input batch size for training GAN')
parser.add_argument('--resumeTrain_gan', type=int, default=0)
parser.add_argument('--compute_disc_err', action='store_true', default=False,
help='Compute the distance between the discriminator and its optimality')
'''DRE Settings'''
parser.add_argument('--DR_Net', type=str, default='MLP5',
choices=['MLP3', 'MLP5', 'MLP7', 'MLP9',
'CNN5'],
help='DR Model') # DR models
parser.add_argument('--epoch_DRE', type=int, default=200)
parser.add_argument('--base_lr_DRE', type=float, default=1e-5,
help='learning rate')
parser.add_argument('--decay_lr_DRE', action='store_true', default=False,
help='decay learning rate')
parser.add_argument('--lr_decay_epochs_DRE', type=int, default=400)
parser.add_argument('--batch_size_DRE', type=int, default=512, metavar='N',
help='input batch size for training DRE')
parser.add_argument('--lambda_DRE', type=float, default=0.0, #BARR: lambda=10
help='penalty in DRE')
parser.add_argument('--weightdecay_DRE', type=float, default=0.0,
help='weight decay in DRE')
parser.add_argument('--resumeTrain_DRE', type=int, default=0)
parser.add_argument('--DR_final_ActFn', type=str, default='ReLU',
help='Final layer of the Density-ratio model; Candidiate: Softplus or ReLU')
parser.add_argument('--TrainPreNetDRE', action='store_true', default=False,
help='Pre-trained MLP for DRE in Feature Space')
parser.add_argument('--DRE_save_at_epoch', nargs='+', type=int)
parser.add_argument('--epoch_KeepTrain', type=int, default=20)
parser.add_argument('--compute_dre_err', action='store_true', default=False,
help='Compare the DRE method with the ground truth')
''' Mixture Gaussian (for density estimation) Settings '''
parser.add_argument('--gmm_nfake', type=int, default=100000)
# parser.add_argument('--gmm_ncomp', type=int, default=0) #gmm_ncomp is non-positive, then we do ncomp selection
parser.add_argument('--gmm_ncomp_nsim', nargs='+', type=int) #A list of ncomp for NSIM rounds. If gmm_ncomp is None, then we do ncomp selection
parser.add_argument('--gmm_ncomp_grid', nargs='+', type=int)
parser.add_argument('--gmm_ncomp_grid_lb', type=int, default=1)
parser.add_argument('--gmm_ncomp_grid_ub', type=int, default=100)
parser.add_argument('--gmm_ncomp_grid_step', type=int, default=1)
args = parser.parse_args()
#--------------------------------
# system
assert torch.cuda.is_available()
NGPU = torch.cuda.device_count()
device = torch.device("cuda" if NGPU>0 else "cpu")
cores= multiprocessing.cpu_count()
#--------------------------------
# Extra Data Generation Settings
n_comp_tar = args.n_comp_tar
n_features = args.DIM
mean_grid_tar = [-2, -1, 0, 1, 2]
sigma_tar = 0.05
n_classes = n_comp_tar
quality_threshold = sigma_tar*4 #good samples are within 4 standard deviation
#--------------------------------
# GAN Settings
epoch_GAN = args.epoch_gan
lr_GAN = args.lr_gan
batch_size_GAN = args.batch_size_gan
dim_GAN = args.dim_gan
plot_in_train = True
gan_Adam_beta1 = 0.5
gan_Adam_beta2 = 0.999
#--------------------------------
# Extra DRE Settings
DRE_Adam_beta1 = 0.5
DRE_Adam_beta2 = 0.999
comp_ratio_bs = 1000 #batch size for computing density ratios
base_lr_PreNetDRE = 1e-1
epoch_PreNetDRE = 100
DRE_save_at_epoch = args.DRE_save_at_epoch # save checkpoints at these epochs
# DRE_save_at_epoch = [20, 50, 100, 150, 200, 300, 400, 500, 800]
epoch_KeepTrain = args.epoch_KeepTrain #keep training for DRS
ckp_epoch_KeepTrain = [i for i in range(100) if i%5==0]
#--------------------------------
# Mixture Gaussian Setting
gmm_nfake = args.gmm_nfake
# gmm_ncomp = args.gmm_ncomp
gmm_ncomp_nsim = args.gmm_ncomp_nsim
# if gmm_ncomp_nsim is not None:
# assert len(gmm_ncomp_nsim) == args.NSIM
if args.gmm_ncomp_grid is not None:
gmm_ncomp_grid = args.gmm_ncomp_grid
else:
gmm_ncomp_grid = np.arange(args.gmm_ncomp_grid_lb, args.gmm_ncomp_grid_ub+args.gmm_ncomp_grid_step, args.gmm_ncomp_grid_step)
#--------------------------------
# Extra Sampling Settings
NFAKE = args.NTEST
samp_batch_size = 10000
MH_K = 100
MH_mute = True #do not print sampling progress
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
#-------------------------------
# output folders
save_models_folder = wd + '/Output/saved_models/'
os.makedirs(save_models_folder,exist_ok=True)
save_images_folder = wd + '/Output/saved_images/'
os.makedirs(save_images_folder,exist_ok=True)
save_traincurves_folder = wd + '/Output/Training_loss_fig/'
os.makedirs(save_traincurves_folder,exist_ok=True)
save_GANimages_InTrain_folder = wd + '/Output/saved_images/GAN_InTrain'
os.makedirs(save_GANimages_InTrain_folder,exist_ok=True)
save_objects_folder = wd + '/Output/saved_objects'
os.makedirs(save_objects_folder,exist_ok=True)
#######################################################################################
''' Start Experiment '''
#######################################################################################
#---------------------------------
# sampler for reference distribution
means_tar = np.zeros((1,n_features))
for i in mean_grid_tar:
for j in mean_grid_tar:
means_tar = np.concatenate((means_tar, np.array([i,j]).reshape(-1,n_features)), axis=0)
means_tar = means_tar[1:]
assert means_tar.shape[0] == n_comp_tar
assert means_tar.shape[1] == n_features
def generate_data_tar(nsamp):
return sampler_MixGaussian(nsamp, means_tar, sigma = sigma_tar, dim = n_features)
def p_r(samples): #pdf of the underlying distribution; samples is a n by n_features sample matrix
return pdf_MixGaussian(samples, means_tar, sigma_tar)
prop_recovered_modes = np.zeros(args.NSIM) # num of removed modes diveded by num of modes
prop_good_samples = np.zeros(args.NSIM) # num of good fake samples diveded by num of all fake samples
valid_densityratios_all = [] #store denstiy ratios for validation samples
train_densityratios_all = []
ks_test_results = np.zeros((args.NSIM,2))
dre_errors_all = np.zeros(args.NSIM) #compute density ratios on the test set (hold-out set) with each DRE method and the ground truth
dre_errors_hq = np.zeros(args.NSIM)
dre_errors_lq = np.zeros(args.NSIM)
esti_avg_densityratio = np.zeros((args.NSIM, 4)) #estimated density ratios of testing samples, NFAKE fake samples, HQ fake samples, LQ fake samples
true_avg_densityratio = np.zeros((args.NSIM, 4)) #true density ratios of testing samples, NFAKE fake samples, HQ fake samples, LQ fake samples
disc_errors_all = np.zeros(args.NSIM) #compute the distance between the discriminator and its optimality
nfake_in_train = np.zeros(args.NSIM)
print("\n Begin The Experiment. Sample from a GAN! >>>")
start = timeit.default_timer()
for nSim in range(args.NSIM):
print("Round %s" % (nSim))
np.random.seed(nSim) #set seed for current simulation
###############################################################################
# Data generation and dataloaders
###############################################################################
train_samples_tar, train_labels_tar = generate_data_tar(args.NTRAIN)
valid_samples_tar, valid_labels_tar = generate_data_tar(args.NTRAIN)
test_samples_tar, test_labels_tar = generate_data_tar(args.NTEST)
train_dataset_tar = custom_dataset(train_samples_tar, train_labels_tar)
test_dataset_tar = custom_dataset(test_samples_tar, test_labels_tar)
train_dataloader_tar = torch.utils.data.DataLoader(train_dataset_tar, batch_size=args.batch_size_DRE, shuffle=True, num_workers=0)
test_dataloader_tar = torch.utils.data.DataLoader(test_dataset_tar, batch_size=100, shuffle=False, num_workers=0)
# #compute the criterion for determing good smaples through train_samples_tar
# # for each mixture component, compute the average distance of real samples from this component to the mean
# l2_dis_train_samples = np.zeros(args.NTRAIN) #l2 distance between a fake sample and a mode
# for i in range(args.NTRAIN):
# indx_mean = int(train_labels_tar[i])
# l2_dis_train_samples[i] = np.sqrt(np.sum((train_samples_tar[i]-means_tar[indx_mean])**2))
# print(l2_dis_train_samples.max())
###############################################################################
# Train a GAN model
###############################################################################
Filename_GAN = save_models_folder + '/ckpt_GAN_epoch_' + str(args.epoch_gan) + '_SEED_' + str(args.seed) + '_nSim_' + str(nSim)
print("\n Begin Training GAN:")
#model initialization
netG = generator(ngpu=NGPU, nz=dim_GAN, out_dim=n_features)
netD = discriminator(ngpu=NGPU, input_dim = n_features)
if not os.path.isfile(Filename_GAN):
criterion = nn.BCELoss()
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_GAN, betas=(gan_Adam_beta1, gan_Adam_beta2))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_GAN, betas=(gan_Adam_beta1, gan_Adam_beta2))
# Start training
netG, netD, optimizerG, optimizerD = train_GAN(epoch_GAN, dim_GAN, train_dataloader_tar, netG, netD, optimizerG, optimizerD, criterion, save_models_folder = save_models_folder, ResumeEpoch = args.resumeTrain_gan, device=device, plot_in_train=plot_in_train, save_images_folder = save_GANimages_InTrain_folder, samples_tar = test_samples_tar)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, Filename_GAN)
torch.cuda.empty_cache()
else: #load pre-trained GAN
print("\n GAN exists! Loading Pretrained Model>>>")
checkpoint = torch.load(Filename_GAN)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
netG = netG.to(device)
netD = netD.to(device)
def fn_sampleGAN(nfake, batch_size=1000):
return SampGAN(netG, GAN_Latent_Length = args.dim_gan, NFAKE = nfake, batch_size = batch_size, device=device)
###############################################################################
# Construct a function to compute density-ratio
###############################################################################
# Approximate DR by NN
if args.DRE in ['DRE_uLSIF', 'DRE_DSKL', 'DRE_BARR', 'DRE_SP']:
# TRAIN DRE
DRE_loss_type = args.DRE[4:] #loss type
if args.DR_Net in ['MLP3', 'MLP5', 'MLP7', 'MLP9']:
netDR = DR_MLP(args.DR_Net, init_in_dim = n_features, ngpu=NGPU, final_ActFn=args.DR_final_ActFn)
elif args.DR_Net in ['CNN5']:
netDR = DR_CNN(args.DR_Net, init_in_dim = n_features, ngpu=NGPU, final_ActFn=args.DR_final_ActFn)
optimizer = torch.optim.Adam(netDR.parameters(), lr = args.base_lr_DRE, betas=(DRE_Adam_beta1, DRE_Adam_beta2), weight_decay=args.weightdecay_DRE)
#optimizer = torch.optim.RMSprop(netDR.parameters(), lr= args.base_lr_DRE, alpha=0.99, eps=1e-08, weight_decay=args.weightdecay_DRE, momentum=0.9, centered=False)
Filename_DRE = save_models_folder + '/ckpt_' + args.DRE +'_LAMBDA_' + str(args.lambda_DRE) + '_FinalActFn_' + args.DR_final_ActFn + '_epoch_' + str(args.epoch_DRE) \
+ "_PreNetDRE_" + str(args.TrainPreNetDRE) + '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
filename0 = save_traincurves_folder + '/TrainCurve_' + args.DRE +'_LAMBDA_' + str(args.lambda_DRE) + '_FinalActFn_' + args.DR_final_ActFn + '_epoch_' \
+ str(args.epoch_DRE) + "_PreNetDRE_" + str(args.TrainPreNetDRE) + '_SEED_' + str(args.seed) + "_nSim_" + str(nSim) + '_epochGAN_' + str(epoch_GAN) + "_TrainLoss"
plot_filename = filename0 + '.pdf'
npy_filename = filename0 + '.npy'
# Train a net to extract features for DR net
if args.TrainPreNetDRE:
print("\n Begin Training PreNetDRE Net:")
Filename_PreNetDRE = save_models_folder + '/ckpt_PreNetDRE_epochPreNetDRE_' + str(epoch_PreNetDRE) + '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
PreNetDRE_MLP = PreNetDRE_MLP(init_in_dim = n_features, ngpu=NGPU)
if not os.path.isfile(Filename_PreNetDRE):
criterion_PreNetDRE = nn.CrossEntropyLoss()
optimizer_PreNetDRE = torch.optim.SGD(PreNetDRE_MLP.parameters(), lr = base_lr_PreNetDRE, momentum= 0.9, weight_decay=1e-4)
PreNetDRE_MLP, _ = train_PreNetDRE(epoch_PreNetDRE, train_dataloader_tar, test_dataloader_tar, PreNetDRE_MLP, base_lr_PreNetDRE, optimizer_PreNetDRE, criterion_PreNetDRE, device=device)
# save model
torch.save({
'net_state_dict': PreNetDRE_MLP.state_dict(),
}, Filename_PreNetDRE)
else:
print("\n PreNetDRE Net exits and start loading:")
checkpoint_PreNetDRE_MLP = torch.load(Filename_PreNetDRE)
PreNetDRE_MLP.load_state_dict(checkpoint_PreNetDRE_MLP['net_state_dict'])
PreNetDRE_MLP = PreNetDRE_MLP.to(device)
def extract_features(samples):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
extracted_features = np.zeros((n_samples+batch_size_tmp, n_features))
PreNetDRE_MLP.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
_, batch_features = PreNetDRE_MLP(batch_samples)
extracted_features[tmp:(tmp+batch_size_tmp)] = batch_features.cpu().detach().numpy()
tmp += batch_size_tmp
#end while
return extracted_features[0:n_samples]
test_features_tar = extract_features(test_samples_tar)
plt.switch_backend('agg')
mpl.style.use('seaborn')
plt.figure()
plt.grid(b=True)
flag0 = 0; flag1=0
colors = ['b','g','r','c','m','y','k']
marker_styles = ['.', 'o', 'v', 's']
for nc in range(n_classes):
indx = np.where(test_labels_tar == nc)[0]
plt.scatter(test_features_tar[indx, 0], test_features_tar[indx, 1], c=colors[flag0], marker=marker_styles[flag1], s=8)
flag0 += 1
if flag0 % 7 ==0 :
flag0 = 0; flag1+=1
filename0 = save_images_folder + '/test.pdf'
plt.savefig(filename0)
plt.close()
if not os.path.isfile(Filename_DRE):
# Train
print("\n Begin Training DRE NET:")
if args.TrainPreNetDRE:
netDR, optimizer, avg_train_loss = train_DRE_GAN(net=netDR, optimizer=optimizer, BASE_LR_DRE=args.base_lr_DRE, EPOCHS_DRE=args.epoch_DRE, LAMBDA=args.lambda_DRE, tar_dataloader=train_dataloader_tar, netG=netG, dim_gan=dim_GAN, PreNetDRE = PreNetDRE_MLP, decay_lr=args.decay_lr_DRE, decay_epochs=args.lr_decay_epochs_DRE, loss_type=DRE_loss_type, save_models_folder = save_models_folder, ResumeEpoch=args.resumeTrain_DRE, NGPU=NGPU, device=device, save_at_epoch = DRE_save_at_epoch, current_nsim=nSim)
else:
netDR, optimizer, avg_train_loss = train_DRE_GAN(net=netDR, optimizer=optimizer, BASE_LR_DRE=args.base_lr_DRE, EPOCHS_DRE=args.epoch_DRE, LAMBDA=args.lambda_DRE, tar_dataloader=train_dataloader_tar, netG=netG, dim_gan=dim_GAN, decay_lr=args.decay_lr_DRE, decay_epochs=args.lr_decay_epochs_DRE, loss_type=DRE_loss_type, save_models_folder = save_models_folder, ResumeEpoch=args.resumeTrain_DRE, NGPU=NGPU, device=device, save_at_epoch = DRE_save_at_epoch, current_nsim=nSim)
# Plot loss
PlotLoss(avg_train_loss, plot_filename)
np.save(npy_filename, np.array(avg_train_loss))
# save model
torch.save({
'net_state_dict': netDR.state_dict(),
}, Filename_DRE)
else: #if the DR model is already trained, load the checkpoint
print("\n DRE NET exists and start loading:")
checkpoint_netDR = torch.load(Filename_DRE)
netDR.load_state_dict(checkpoint_netDR['net_state_dict'])
netDR = netDR.to(device)
def comp_density_ratio(samples, verbose=False):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
density_ratios = np.zeros((n_samples+batch_size_tmp, 1))
netDR.eval()
if args.TrainPreNetDRE:
PreNetDRE_MLP.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
if args.TrainPreNetDRE:
_, batch_features = PreNetDRE_MLP(batch_samples)
batch_weights = netDR(batch_features)
else:
batch_weights = netDR(batch_samples)
#density_ratios[tmp:(tmp+batch_size_tmp)] = batch_weights.cpu().detach().numpy()
density_ratios[tmp:(tmp+batch_size_tmp)] = batch_weights.cpu().numpy()
tmp += batch_size_tmp
if verbose:
print(batch_weights.cpu().numpy().mean())
#end while
return density_ratios[0:n_samples]+1e-14
###################
# DRE based on GAN property
elif args.DRE in ['disc', 'disc_MHcal', 'disc_KeepTrain']:
if args.DRE == 'disc': #use GAN property to compute density ratio; ratio=D/(1-D);
# function for computing a bunch of images
# def comp_density_ratio(samples, netD):
def comp_density_ratio(samples):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
density_ratios = np.zeros((n_samples+batch_size_tmp, 1))
# print("\n Begin computing density ratio for images >>")
netD.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
disc_probs = netD(batch_samples).cpu().detach().numpy()
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
density_ratios[tmp:(tmp+batch_size_tmp)] = np.divide(disc_probs, 1-disc_probs)
tmp += batch_size_tmp
#end while
# print("\n End computing density ratio.")
return density_ratios[0:n_samples]
#-----------------------------------
elif args.DRE == 'disc_MHcal': #use the calibration method in MH-GAN to calibrate disc
n_test = valid_samples_tar.shape[0]
batch_size_tmp = 1000
cal_labels_fake = np.zeros((n_test,1))
cal_labels_real = np.ones((n_test,1))
cal_samples_fake = fn_sampleGAN(nfake=n_test, batch_size=batch_size_tmp)
dataset_fake = custom_dataset(cal_samples_fake)
dataloader_fake = torch.utils.data.DataLoader(dataset_fake, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
dataset_real = custom_dataset(valid_samples_tar)
dataloader_real = torch.utils.data.DataLoader(dataset_real, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
del cal_samples_fake; gc.collect()
# get the output of disc before the final sigmoid layer; the \tilde{D} in Eq.(4) in "Discriminator Rejection Sampling"
# def comp_disc_scores(samples_dataloader, netD):
def comp_disc_scores(samples_dataloader):
# samples_dataloader: the data loader for images
n_samples = len(samples_dataloader.dataset)
data_iter = iter(samples_dataloader)
batch_size_tmp = samples_dataloader.batch_size
disc_scores = np.zeros((n_samples+batch_size_tmp, 1))
netD.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
disc_probs = netD(batch_samples).cpu().detach().numpy()
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
disc_scores[tmp:(tmp+batch_size_tmp)] = np.log(np.divide(disc_probs, 1-disc_probs))
tmp += batch_size_tmp
#end while
return disc_scores[0:n_samples]
cal_disc_scores_fake = comp_disc_scores(dataloader_fake) #discriminator scores for fake images
cal_disc_scores_real = comp_disc_scores(dataloader_real) #discriminator scores for real images
# Train a logistic regression model
X_train = np.concatenate((cal_disc_scores_fake, cal_disc_scores_real),axis=0).reshape(-1,1)
y_train = np.concatenate((cal_labels_fake, cal_labels_real), axis=0).reshape(-1)
#del cal_disc_scores_fake, cal_disc_scores_real; gc.collect()
cal_logReg = LogisticRegression(solver="liblinear").fit(X_train, y_train)
# function for computing a bunch of images
# def comp_density_ratio(samples, netD):
def comp_density_ratio(samples):
#samples: an numpy array
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
disc_scores = comp_disc_scores(dataloader_tmp)
disc_probs = (cal_logReg.predict_proba(disc_scores))[:,1] #second column corresponds to the real class
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
density_ratios = np.divide(disc_probs, 1-disc_probs)
return density_ratios.reshape(-1,1)
#---------------------------------------------
# disc_KeepTrain
elif args.DRE == "disc_KeepTrain":
batch_size_KeepTrain = 256
Filename_KeepTrain_Disc = save_models_folder + '/ckpt_KeepTrainDisc_epoch_'+str(epoch_KeepTrain)+ '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
if not os.path.isfile(Filename_KeepTrain_Disc):
print("Resume training Discriminator for %d epochs" % epoch_KeepTrain)
# keep train the discriminator
n_heldout = valid_samples_tar.data.shape[0]
batch_size_tmp = 500
cal_labels = np.concatenate((np.zeros((n_heldout,1)), np.ones((n_heldout,1))), axis=0)
cal_imgs_fake = fn_sampleGAN(nfake=n_heldout, batch_size=batch_size_tmp)
cal_imgs = np.concatenate((cal_imgs_fake, valid_samples_tar), axis=0)
del cal_imgs_fake; gc.collect()
cal_dataset = custom_dataset(cal_imgs, cal_labels)
cal_dataloader = torch.utils.data.DataLoader(cal_dataset, batch_size=batch_size_KeepTrain, shuffle=True, num_workers=0)
criterion_KeepTrain = nn.BCELoss()
# optimizerD_KeepTrain = torch.optim.SGD(netD.parameters(), lr = 1e-3, momentum= 0.9, weight_decay=1e-4)
optimizerD_KeepTrain = torch.optim.Adam(netD.parameters(), lr=lr_GAN, betas=(gan_Adam_beta1, gan_Adam_beta2))
# optimizerD_KeepTrain = torch.optim.Adam(netD.parameters(), lr=lr_GAN/2, betas=(gan_Adam_beta1, gan_Adam_beta2))
for epoch in range(epoch_KeepTrain):
netD.train()
train_loss = 0
for batch_idx, (batch_train_samples, batch_train_labels) in enumerate(cal_dataloader):
batch_train_samples = batch_train_samples.type(torch.float).cuda()
batch_train_labels = batch_train_labels.type(torch.float).cuda()
#Forward pass
outputs = netD(batch_train_samples)
loss = criterion_KeepTrain(outputs, batch_train_labels)
#backward pass
optimizerD_KeepTrain.zero_grad()
loss.backward()
optimizerD_KeepTrain.step()
train_loss += loss.cpu().item()
#end for batch_idx
print('KeepTrain netD: [epoch %d/%d] train_loss:%.3f' % (epoch+1, epoch_KeepTrain, train_loss/(batch_idx+1)))
if epoch+1 in ckp_epoch_KeepTrain:
filename_tmp = save_models_folder + '/ckpt_KeepTrainDisc_epoch_'+str(epoch+1)+ '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
torch.save({
'net_state_dict': netD.state_dict(),
}, filename_tmp)
#end for epoch
# save model
torch.save({
'net_state_dict': netD.state_dict(),
}, Filename_KeepTrain_Disc)
del batch_train_samples, batch_train_labels, cal_dataset, cal_dataloader; gc.collect()
torch.cuda.empty_cache()
else:
checkpoint = torch.load(Filename_KeepTrain_Disc)
netD.load_state_dict(checkpoint['net_state_dict'])
# def comp_density_ratio(imgs, netD):
def comp_density_ratio(samples):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
density_ratios = np.zeros((n_samples+batch_size_tmp, 1))
# print("\n Begin computing density ratio for images >>")
netD.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
disc_probs = netD(batch_samples).cpu().detach().numpy().reshape((-1,1))
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
density_ratios[tmp:(tmp+batch_size_tmp)] = np.divide(disc_probs, 1-disc_probs)
tmp += batch_size_tmp
#end while
# print("\n End computing density ratio.")
return density_ratios[0:n_samples]
###############################################################################
# Fitting Mixture Gaussians to estimate p_g
###############################################################################
if args.DRE=="GT" or args.compute_dre_err or args.compute_disc_err:
#---------------------------------------------------------------------------
# Select the optimal gmm_ncomp by minimizing BIC
if gmm_ncomp_nsim is None:
print("\n Select the optimal ncomp by minimizing BIC >>>")
fake_samples = fn_sampleGAN(gmm_nfake, samp_batch_size) #draw many samples from GAN directly
lowest_bic = np.infty
gmm_ncomp = gmm_ncomp_grid[0]
bic = []
for n_components in gmm_ncomp_grid:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components)
gmm.fit(fake_samples)
bic.append(gmm.bic(fake_samples))
print("ncomp: %d; bic: %f; lowest bic: %f" % (n_components, bic[-1], lowest_bic))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
gmm_ncomp = n_components
print("\n The optimal gmm_ncomp is %d with BIC %f" % (gmm_ncomp, lowest_bic))
# fit gmm on all training data
p_g_gmm = mixture.GaussianMixture(n_components=gmm_ncomp)
p_g_gmm.fit(fake_samples)
gmm_filename = save_objects_folder + "/fitted_gmm_nfake_" + str(gmm_nfake) + "_ncomp_" + str(gmm_ncomp) + "_epochGAN_" + str(epoch_GAN) + "_nSim_" + str(nSim) + ".pkl"
with open(gmm_filename, 'wb') as f:
pickle.dump(p_g_gmm, f)
del fake_samples; gc.collect()
#---------------------------------------------------------------------------
# Fit Mixture Guassians with specified gmm_ncomp
else:
gmm_ncomp = gmm_ncomp_nsim[nSim]
gmm_filename = save_objects_folder + "/fitted_gmm_nfake_" + str(gmm_nfake) + "_ncomp_" + str(gmm_ncomp) + "_epochGAN_" + str(epoch_GAN) + "_nSim_" + str(nSim) + ".pkl"
if not os.path.isfile(gmm_filename):
fake_samples = fn_sampleGAN(gmm_nfake, samp_batch_size) #draw many samples from GAN directly
p_g_gmm = mixture.GaussianMixture(n_components=gmm_ncomp)
p_g_gmm.fit(fake_samples)
with open(gmm_filename, 'wb') as f:
pickle.dump(p_g_gmm, f)
else:
with open(gmm_filename, 'rb') as f:
p_g_gmm = pickle.load(f)
# generate NFAKE samples from fitted gmm
filename_visual_gmm = save_images_folder + "/sample_gmm_nfake_" + str(args.NTEST) + "_ncomp_" + str(gmm_ncomp) + "_epochGAN_" + str(epoch_GAN) + "_nSim_" + str(nSim) + ".pdf"
gmm_fake_samples = p_g_gmm.sample(args.NTEST)[0]
ScatterPoints(test_samples_tar, gmm_fake_samples, filename_visual_gmm, plot_real_samples = False)
# ## DEBUG:
# fake_samples = fn_sampleGAN(10000, samp_batch_size)
# out = np.exp(p_g_gmm.score_samples(fake_samples[0:10]))
# print(out)
# function for evalute the fitted GMM on data X
def gmm_eval(X):
return p_g_gmm.score_samples(X)
def gmm_eval_parallel(X):
pool = multiprocessing.Pool(processes=cores)
nsamples_eval = len(X)
batch_size = nsamples_eval//cores
batch_size_left = nsamples_eval-batch_size*cores
## split data into chunks
chunks = []
total_got = 0; flag=1
while total_got<nsamples_eval:
if flag<cores:
chunks.append(X[total_got:(total_got+batch_size)])
total_got += batch_size
else:
chunks.append(X[total_got:(total_got+batch_size+batch_size_left)])
total_got += batch_size+batch_size_left
flag+=1
gmm_testscores = []
for result in pool.imap(gmm_eval, chunks):
gmm_testscores.extend(list(result))
gmm_testscores = np.array(gmm_testscores)
del chunks; gc.collect()
pool.close()
return gmm_testscores
#---------------------------------------------------------------------------
# Compare each DRE method with the ground truth (p_r/p_g, where p_g is estimated by KDE);
if args.DRE not in ["None", "GT"] and args.compute_dre_err:
dre_errors_comp_truedensityratios_filename = save_objects_folder + "/dre_error_comp_truedensityratios_epochGAN_" + str(epoch_GAN) + "_nfake_" + str(NFAKE) + "_gmm_ncomp_" + str(gmm_ncomp) + "_nSim_" + str(nSim) + ".pkl"
dre_errors_comp_fakesamples_filename = save_objects_folder + "/dre_error_comp_fakesamples_epochGAN_" + str(epoch_GAN) + "_nSim_" + str(nSim) + ".pkl"
#evaluate the optimal discriminator on NFAKE fake images
if not os.path.isfile(dre_errors_comp_fakesamples_filename):
fake_samples = fn_sampleGAN(NFAKE, samp_batch_size) #draw many samples from GAN directly
with open(dre_errors_comp_fakesamples_filename, "wb") as file:
pickle.dump(fake_samples, file)
else:
with open(dre_errors_comp_fakesamples_filename, "rb") as file:
fake_samples = pickle.load(file)
if not os.path.isfile(dre_errors_comp_truedensityratios_filename):
gmm_fakescore = gmm_eval_parallel(fake_samples)
gmm_testscore = gmm_eval_parallel(test_samples_tar)
fake_densityratios_true = np.divide(p_r(fake_samples), np.exp(gmm_fakescore)+1e-20)
test_densityratios_true = np.divide(p_r(test_samples_tar), np.exp(gmm_testscore)+1e-20)
with open(dre_errors_comp_truedensityratios_filename, "wb") as file:
temp_densityratios_true = {"fake_densityratios_true":fake_densityratios_true, "test_densityratios_true":test_densityratios_true}
pickle.dump(temp_densityratios_true, file)
del temp_densityratios_true; gc.collect()
else:
with open(dre_errors_comp_truedensityratios_filename, "rb") as file:
temp_densityratios_true = pickle.load(file)
fake_densityratios_true = temp_densityratios_true["fake_densityratios_true"]
test_densityratios_true = temp_densityratios_true["test_densityratios_true"]
del temp_densityratios_true; gc.collect()
fake_densityratios_esti = comp_density_ratio(fake_samples)
test_densityratios_esti = comp_density_ratio(test_samples_tar)
# # DEBUG
# temp_filename = save_objects_folder + "/sum_dr_versus_epochs.csv"
# with open(temp_filename, 'a') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow([args.epoch_DRE, np.sum(fake_densityratios_true), np.sum(fake_densityratios_esti)])
# deno_true = np.sum(fake_densityratios_true) #sum
# deno_esti = np.sum(fake_densityratios_esti) #sum
deno_true = 1 #no standarization
deno_esti = 1
dre_errors_all[nSim] = np.sqrt(np.mean((fake_densityratios_true/deno_true - fake_densityratios_esti/deno_esti)**2))
l2_dis_fake_samples = np.zeros((NFAKE, n_comp_tar)) #l2 distance between a fake sample and a mode
for i in tqdm(range(NFAKE)):
for j in range(n_comp_tar):
l2_dis_fake_samples[i,j] = np.sqrt(np.sum((fake_samples[i]-means_tar[j])**2))
min_l2_dis_fake_samples = np.min(l2_dis_fake_samples, axis=1)
index_hq = np.where(min_l2_dis_fake_samples<quality_threshold)[0]
index_lq = np.where(min_l2_dis_fake_samples>=quality_threshold)[0]
dre_errors_hq[nSim] = np.sqrt(np.mean((fake_densityratios_true[index_hq]/deno_true - fake_densityratios_esti[index_hq]/deno_esti)**2))
dre_errors_lq[nSim] = np.sqrt(np.mean((fake_densityratios_true[index_lq]/deno_true - fake_densityratios_esti[index_lq]/deno_esti)**2))
# print("True avg. density ratios, HQ %f, LQ %f" % (np.mean(fake_densityratios_true[index_hq]), np.mean(fake_densityratios_true[index_lq])))
# print("Esti avg. density ratios, HQ %f, LQ %f" % (np.mean(fake_densityratios_esti[index_hq]), np.mean(fake_densityratios_esti[index_lq])))
## estimated/true density ratios of testing samples, NFAKE fake samples, HQ fake samples, LQ fake samples
''' mean of density ratios on 10K real/fake samples '''
esti_avg_densityratio[nSim] = np.array([np.mean(test_densityratios_esti), np.mean(fake_densityratios_esti), np.mean(fake_densityratios_esti[index_hq]), np.mean(fake_densityratios_esti[index_lq])])
true_avg_densityratio[nSim] = np.array([np.mean(test_densityratios_true), np.mean(fake_densityratios_true), np.mean(fake_densityratios_true[index_hq]), np.mean(fake_densityratios_true[index_lq])])
# deno_test_esti = np.sum(test_densityratios_esti); deno_test_true = np.sum(test_densityratios_true)
# deno_fake_esti = np.sum(fake_densityratios_esti); deno_fake_true = np.sum(fake_densityratios_true)
# esti_avg_densityratio[nSim] = np.array([np.mean(test_densityratios_esti/deno_test_esti), np.mean(fake_densityratios_esti/deno_fake_esti), np.mean(fake_densityratios_esti[index_hq]/deno_fake_esti), np.mean(fake_densityratios_esti[index_lq]/deno_fake_esti)])
# true_avg_densityratio[nSim] = np.array([np.mean(test_densityratios_true/deno_test_true), np.mean(fake_densityratios_true/deno_fake_true), np.mean(fake_densityratios_true[index_hq]/deno_fake_true), np.mean(fake_densityratios_true[index_lq]/deno_fake_true)])
#-----------------------------------------------------------------------------
# compute the distance between the disc and its optimality
if args.DRE not in ["None", "GT"] and args.compute_disc_err:
disc_errors_comp_truediscscores_filename = save_objects_folder + "/disc_error_comp_truediscscores_epochGAN_" + str(epoch_GAN) + "_nfake_" + str(NFAKE) + "_gmm_ncomp_" + str(gmm_ncomp) + "_nSim_" + str(nSim) + ".pkl"
disc_errors_comp_fakesamples_filename = save_objects_folder + "/disc_error_comp_fakesamples_epochGAN_" + str(epoch_GAN) + "_nSim_" + str(nSim) + ".pkl"
#evaluate the optimal discriminator on NFAKE fake images
if not os.path.isfile(disc_errors_comp_fakesamples_filename):
fake_samples = fn_sampleGAN(NFAKE, samp_batch_size) #draw many samples from GAN directly
with open(disc_errors_comp_fakesamples_filename, "wb") as file:
pickle.dump(fake_samples, file)
else:
with open(disc_errors_comp_fakesamples_filename, "rb") as file:
fake_samples = pickle.load(file)
if not os.path.isfile(disc_errors_comp_truediscscores_filename):
gmm_fakescore = gmm_eval_parallel(fake_samples)
fake_disc_true = np.divide(p_r(fake_samples), p_r(fake_samples) + np.exp(gmm_fakescore))
with open(disc_errors_comp_truediscscores_filename, "wb") as file:
pickle.dump(fake_disc_true, file)
else:
with open(disc_errors_comp_truediscscores_filename, "rb") as file:
fake_disc_true = pickle.load(file)
n_samples = fake_samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(fake_samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
fake_disc_esti = np.zeros((n_samples+batch_size_tmp, 1))
# print("\n Begin computing density ratio for images >>")
netD.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
disc_probs = netD(batch_samples).cpu().detach().numpy()
# disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
fake_disc_esti[tmp:(tmp+batch_size_tmp)] = disc_probs
tmp += batch_size_tmp
#end while
fake_disc_esti = (fake_disc_esti[0:n_samples]).reshape(-1)
disc_errors_all[nSim] = np.sqrt(np.mean((fake_disc_true - fake_disc_esti)**2))
#-----------------------------------------------------------------------------
# Ground truth DRE
if args.DRE=="GT":
def comp_density_ratio(samples):
return np.divide(p_r(samples), np.exp(gmm_eval_parallel(samples))+1e-20)
###############################################################################
# Different sampling method
###############################################################################
##########################################
# Rejection Sampling: "Discriminator Rejection Sampling"; based on https://github.com/shinseung428/DRS_Tensorflow/blob/master/config.py
if args.Sampling == "RS":
def fn_enhanceSampler(nfake, batch_size=samp_batch_size):
## Burn-in Stage
n_burnin = 50000
burnin_samples = fn_sampleGAN(n_burnin, batch_size=samp_batch_size)
burnin_densityratios = comp_density_ratio(burnin_samples)
print((burnin_densityratios.min(),np.median(burnin_densityratios),burnin_densityratios.max()))
M_bar = np.max(burnin_densityratios)
del burnin_samples, burnin_densityratios; gc.collect()
torch.cuda.empty_cache()
## Burn-in with real images
burnin_densityratios2 = comp_density_ratio(train_samples_tar)
print((burnin_densityratios2.min(),np.median(burnin_densityratios2),burnin_densityratios2.max()))
# M_bar = np.max([np.max(burnin_densityratios),M_bar])
## Rejection sampling
enhanced_samples = np.zeros((1, n_features)) #initilize
pb = SimpleProgressBar()
num_samples = 0
while num_samples < nfake:
batch_samples = fn_sampleGAN(batch_size, batch_size)
batch_ratios = comp_density_ratio(batch_samples)
M_bar = np.max([M_bar, np.max(batch_ratios)])
#threshold
if args.DRE in ["disc", "disc_MHcal","disc_KeepTrain"]:
epsilon_tmp = 1e-8;
D_tilde_M = np.log(M_bar)
batch_F = np.log(batch_ratios) - D_tilde_M - np.log(1-np.exp(np.log(batch_ratios)-D_tilde_M-epsilon_tmp))
gamma_tmp = np.percentile(batch_F, 95) #80 percentile of each batch; follow DRS's setting
batch_F_hat = batch_F - gamma_tmp
batch_p = 1/(1+np.exp(-batch_F_hat))
else:
batch_p = batch_ratios/M_bar
batch_psi = np.random.uniform(size=batch_size).reshape(-1,1)
indx_accept = np.where((batch_psi<=batch_p)==True)[0]
if len(indx_accept)>0:
enhanced_samples = np.concatenate((enhanced_samples, batch_samples[indx_accept]))
num_samples=len(enhanced_samples)-1
del batch_samples, batch_ratios; gc.collect()
torch.cuda.empty_cache()
pb.update(np.min([float(num_samples)*100/nfake, 100]))
return enhanced_samples[1:(nfake+1)] #remove the first all zero array
##########################################
# MCMC, Metropolis-Hastings algorithm: MH-GAN
elif args.Sampling == "MH":
# trainloader_MH = torch.utils.data.DataLoader(train_samples_tar, batch_size=samp_batch_size, shuffle=True)
trainloader_MH = torch.utils.data.DataLoader(test_samples_tar, batch_size=samp_batch_size, shuffle=True)
def fn_enhanceSampler(nfake, batch_size=samp_batch_size):
# enhanced_samples = np.zeros((1, n_features)) #initilize
enhanced_samples = np.zeros((nfake, n_features)) #initilize
num_samples = 0
while num_samples < nfake:
data_iter = iter(trainloader_MH)
batch_samples_new = data_iter.next()
batch_samples_new = batch_samples_new.cpu().detach().numpy()
batch_update_flags = np.zeros(batch_size) #if a sample in a batch is updated during MH, replace corresponding entry with 1
for k in tqdm(range(MH_K)):
if not MH_mute:
print((k, num_samples))
batch_samples_old = fn_sampleGAN(batch_size, batch_size)
batch_U = np.random.uniform(size=batch_size).reshape(-1,1)
batch_ratios_old = comp_density_ratio(batch_samples_old)
batch_ratios_new = comp_density_ratio(batch_samples_new)
# print(np.concatenate((batch_ratios_old[0:10], batch_ratios_new[0:10]), axis=1))
batch_p = batch_ratios_old/(batch_ratios_new+1e-14)
batch_p[batch_p>1]=1
indx_accept = np.where((batch_U<=batch_p)==True)[0]
if len(indx_accept)>0:
batch_samples_new[indx_accept] = batch_samples_old[indx_accept]
batch_update_flags[indx_accept] = 1 #if a sample in a batch is updated during MH, replace corresponding entry with 1
indx_updated = np.where(batch_update_flags==1)[0]
# enhanced_samples = np.concatenate((enhanced_samples, batch_samples_new[indx_updated]))
# num_samples=len(enhanced_samples)-1
enhanced_samples[num_samples:(num_samples+len(indx_updated))] = batch_samples_new[indx_updated]
num_samples += len(indx_updated)
del batch_samples_new, batch_samples_old; gc.collect()
torch.cuda.empty_cache()
# return enhanced_samples[1:(nfake+1)] #remove the first all zero array
return enhanced_samples #remove the first all zero array
##########################################
# Sampling-Importance Resampling
elif args.Sampling == "SIR":
def fn_enhanceSampler(nfake, batch_size=samp_batch_size):
enhanced_samples = fn_sampleGAN(nfake*10, batch_size)
enhanced_ratios = comp_density_ratio(enhanced_samples)
weights = enhanced_ratios / np.sum(enhanced_ratios) #normlaize to [0,1]
resampl_indx = np.random.choice(a = np.arange(len(weights)), size = nfake, replace = True, p = weights.reshape(weights.shape[0]))
enhanced_samples = enhanced_samples[resampl_indx]
return enhanced_samples
###############################################################################
# Hyper-parameter selection
###############################################################################
#compute average density ratio on validaiton set; for selecting lambda
if (args.DRE !="None" or args.Sampling != "None") and args.DRE != "GT":
valid_densityratios = comp_density_ratio(valid_samples_tar)
valid_densityratios_all.extend(list(valid_densityratios))
train_densityratios = comp_density_ratio(train_samples_tar)
train_densityratios_all.extend(list(train_densityratios))
ks_test = ks_2samp(train_densityratios.reshape(-1), valid_densityratios.reshape(-1))
ks_test_results[nSim,0] = ks_test.statistic
ks_test_results[nSim,1] = ks_test.pvalue
###############################################################################
# Evaluation each subsampling method
###############################################################################
if args.DRE == "None" or args.Sampling == "None":
print("Directly sampling from GAN >>>")
fake_samples = fn_sampleGAN(NFAKE, samp_batch_size) #fake images before re-selecting
else:
print("Start enhanced sampling >>>")
fake_samples = fn_enhanceSampler(NFAKE, batch_size=samp_batch_size)
#-------------------------------------------
# Visualization
if args.show_visualization:
if args.DRE in ['DRE_BARR', 'DRE_SP']:
filename = save_images_folder + '/'+ args.DRE + "+" + args.Sampling + "_epochGAN_" + str(args.epoch_gan) + "_epochDRE_" + str(args.epoch_DRE) + "_lambda_" + str(args.lambda_DRE)+ "_PreNetDRE_" + str(args.TrainPreNetDRE) + "_" + args.DR_final_ActFn + '_nSim_' + str(nSim) +'.pdf'
elif args.DRE in ['DRE_uLSIF', 'DRE_DSKL']:
filename = save_images_folder + '/' + args.DRE + "+" + args.Sampling + "_epochGAN_" + str(args.epoch_gan) + "_epochDRE_" + str(args.epoch_DRE) + "_PreNetDRE_" + str(args.TrainPreNetDRE) + "_" + args.DR_final_ActFn + '_nSim_' + str(nSim) + '.pdf'
elif args.DRE == "GT":
filename = save_images_folder + '/' + args.DRE + "+" + args.Sampling + "_gmm_ncomp_" + str(gmm_ncomp) + "_epochGAN_" + str(args.epoch_gan) + '_nSim_' + str(nSim) + '.pdf'
else:
filename = save_images_folder + '/' + args.DRE + "+" + args.Sampling + "_epochGAN_" + str(args.epoch_gan) + '_nSim_' + str(nSim) + '.pdf'
ScatterPoints(test_samples_tar, fake_samples, filename, plot_real_samples = False)
#-------------------------------------------
# Compute number of recovered modes and number of good fake samples
l2_dis_fake_samples = np.zeros((NFAKE, n_comp_tar)) #l2 distance between a fake sample and a mode
for i in tqdm(range(NFAKE)):
for j in range(n_comp_tar):
l2_dis_fake_samples[i,j] = np.sqrt(np.sum((fake_samples[i]-means_tar[j])**2))
min_l2_dis_fake_samples = np.min(l2_dis_fake_samples, axis=1)
indx_cloeset_modes = np.argmin(l2_dis_fake_samples, axis=1) #indx of the closet mode in 25 modes for each sample
prop_good_samples[nSim] = sum(min_l2_dis_fake_samples<quality_threshold)/NFAKE*100 #proportion of good fake samples
prop_recovered_modes[nSim] = len(list(set(indx_cloeset_modes)))/n_comp_tar*100
#-------------------------------------------
# Check how many fake samples actually appear in the training set
train_samples_tar = np.round(train_samples_tar,decimals=6)
fake_samples = np.round(fake_samples,decimals=6)
for i in fake_samples:
if i in train_samples_tar:
nfake_in_train[nSim]+=1
# end for nSim
stop = timeit.default_timer()
print("\n Time elapses: {}s".format(stop - start))
if args.DRE != "None" or args.Sampling != "None":
train_densityratios_all = np.array(train_densityratios_all)
#print("Avg/Med/STD of density ratio on training set over %d Sims: %.3f, %.3f, %.3f" % (args.NSIM, np.median(train_densityratios_all), np.mean(train_densityratios_all), np.std(train_densityratios_all)))
valid_densityratios_all = np.array(valid_densityratios_all)
#print("Avg/Med/STD of density ratio on validation set over %d Sims: %.3f, %.3f, %.3f" % (args.NSIM, np.median(valid_densityratios_all), np.mean(valid_densityratios_all), np.std(valid_densityratios_all)))
#print("Kolmogorov-Smirnov test in %d Sims: avg. stat. %.3E, avg. pval %.3E" % (args.NSIM, np.mean(ks_test_results[:,0]), np.mean(ks_test_results[:,1])))
print("\n KS test resutls (test_stat, p_value) >>>\n")
print(ks_test_results)
if args.DRE not in ["None", "GT"] and args.compute_dre_err:
#print("\n DRE errors on the test set in %d rounds >>>" % args.NSIM)
#print(dre_errors_all)
print("\n Avg. DRE erros: %e" % np.mean(dre_errors_all))
print("\n Avg. HQ DRE erros: %e" % np.mean(dre_errors_hq))
print("\n Avg. LQ DRE erros: %e" % np.mean(dre_errors_lq))
temp_filename = save_objects_folder + "/dre_errors_comp_" + args.DRE + ".csv"
with open(temp_filename, 'a') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow([args.epoch_DRE, | np.mean(dre_errors_all) | numpy.mean |
import os
import scipy.io
import scipy.ndimage
import numpy as np
from PIL import Image
def label2d_array_nn_scaling(label2d, new_h, new_w):
"""
implement nearest neighbor scaling for 2d array
:param label2d: [H, W]
:return: label_new: [new_h, new_w]
"""
scale_h = new_h / label2d.shape[0]
scale_w = new_w / label2d.shape[1]
label_new = scipy.ndimage.zoom(label2d, zoom=[scale_h, scale_w], order=0)
return label_new
def generate_edgelist(image_id, data_type, sketchyscene_base_dir, edgelist_base_dir):
assert data_type in ['test', 'val']
drawing_base_path = os.path.join(sketchyscene_base_dir, data_type, 'DRAWING_GT')
edgelist_mat_path = os.path.join(edgelist_base_dir, data_type, 'edgelist_' + str(image_id) + '.mat')
edge_lists = scipy.io.loadmat(edgelist_mat_path)['labelededgeim']
edge_lists = np.array(edge_lists, dtype=np.float32) # (750, 750), [0, nEdges]
image_path = os.path.join(drawing_base_path, 'L0_sample' + str(image_id) + '.png')
sketch = Image.open(image_path).convert('RGB')
sketch_mask = np.array(sketch, dtype=np.uint8)[:, :, 0]
sketch_edge_list = np.zeros(sketch_mask.shape, dtype=np.float32)
for i in range(sketch_mask.shape[0]):
for j in range(sketch_mask.shape[1]):
if sketch_mask[i][j] != 0:
continue
# foreach black pixel, find the nearest edgelist label
pixel_edge_label = []
neighbor = 0
while True:
neighbor += 1
for m in range(-neighbor, neighbor + 1):
for n in range(-neighbor, neighbor + 1):
pos_y = min(max(i + m, 0), sketch_mask.shape[0] - 1)
pos_x = min(max(j + n, 0), sketch_mask.shape[1] - 1)
if abs(pos_y - i) == neighbor or abs(pos_x - j) == neighbor:
if edge_lists[pos_y][pos_x] != 0:
pixel_edge_label.append(edge_lists[pos_y][pos_x])
if len(pixel_edge_label) != 0:
break
assert len(pixel_edge_label) != 0
if len(pixel_edge_label) > 0:
if j - 1 >= 0 and sketch_edge_list[i][j - 1] in pixel_edge_label:
sketch_edge_list[i][j] = sketch_edge_list[i][j - 1]
elif i - 1 >= 0 and sketch_edge_list[i - 1][j] in pixel_edge_label:
sketch_edge_list[i][j] = sketch_edge_list[i - 1][j]
else:
sketch_edge_list[i][j] = pixel_edge_label[0]
else:
sketch_edge_list[i][j] = pixel_edge_label[0]
return sketch_edge_list
def refine_mask_with_edgelist(image_id, dataset_type, data_base_dir, edgelist_result_dir,
origin_mask, ori_boxes, pixels_percent_threshold=0.5):
"""
very slow!!
:param origin_mask: (768, 768, nRoIs)
:param ori_boxes: (nRoIs, (y1, x1, y2, x2))
:param pixels_percent_threshold: only with pixels_percent more than this threshold
can be regarded as the same segment
:return:
"""
## first generate edgelist
edgelist_label = generate_edgelist(image_id, dataset_type, data_base_dir, edgelist_result_dir) # (750, 750)
edgelist_label = label2d_array_nn_scaling(edgelist_label,
origin_mask.shape[0], origin_mask.shape[1]) # (768, 768)
nEdgeList = int(np.max(edgelist_label))
print('nEdgeList', nEdgeList)
ori_mask = np.transpose(origin_mask, [2, 0, 1]) # (nRoIs, 768, 768)
refined_mask = []
for i in range(ori_mask.shape[0]):
## foreach ROI
single_mask = ori_mask[i]
single_mask_new = single_mask.copy()
y1, x1, y2, x2 = ori_boxes[i]
single_edgelist = edgelist_label.copy()
box_bin_mask = | np.zeros(single_edgelist.shape, dtype=np.uint8) | numpy.zeros |
import pytest
import numpy as np
from jellium.energy import energy
from numpy.testing import assert_array_less
from jellium.mcmc import mcmc
from jellium.bridge import bridge
from jellium.utils import d_tor, torify
def test_mcmc():
seed = 42
np.random.seed(seed)
EPS = 1e-2
size = int(2e1)
beta = 2
pair_pot = lambda x: - beta * d_tor(x, size=size)
n_iter = int(1e3)
init_val = np.random.rand(size) * size - size/2
torify = lambda x: x - size * np.floor( (x/size+.5))
propose = lambda x: torify(x + np.random.randn(size))
#energy decrease factor
decrease_factor = 2
#size of rolling window for moving averages
window = 50
trace = mcmc(pair_pot, n_iter, init_val, propose, np.random.get_state())
#correct shape
assert trace.shape == (n_iter,) + init_val.shape
#values should change
assert_array_less(EPS, trace.var(axis = 0))
#energy must decrease
energy_trace = [energy(conf, pair_pot) for conf in trace]
assert np.mean(energy_trace[:window]) - np.mean(energy_trace[-window:]) > np.std(energy_trace[-window:]) * decrease_factor
def test_mcmc_dyn():
seed = 42
np.random.seed(seed)
state = np.random.get_state()
EPS = 1e-2
size = int(2e1)
beta = 2
steps = 20
pair_pot = lambda x: - beta/(steps + 1) * d_tor(x, size=size)
n_iter = int(3e3)
var = 1
states = [state for state,_ in [[np.random.get_state(), np.random.rand()] for _ in range(int(size/2))]]
init_start_pts = np.repeat((np.random.rand(int(size/2))-.5)[np.newaxis] * size, steps + 1, 0).transpose()
init_bridges = np.array([bridge(var, steps, state) for state in states])
init_val = init_start_pts + init_bridges
torify = lambda x: x - size * np.floor( (x/size+.5))
propose = lambda x: torify(np.repeat((x[:,0]+np.random.randn(x.shape[0]))[np.newaxis], steps + 1, 0).transpose()
+np.array([bridge(var, steps) for _ in range(x.shape[0])]))
#energy decrease factor
decrease_factor = 2
#size of rolling window for moving averages
window = 50
trace = mcmc(pair_pot, n_iter, init_val, propose, state)
#correct shape
assert trace.shape == (n_iter,) + init_val.shape
#values should change
assert_array_less(EPS, trace.var(axis = 0))
#energy must decrease
energy_trace = [energy(conf, pair_pot) for conf in trace]
assert np.mean(energy_trace[:window]) - np.mean(energy_trace[-window:]) > np.std(energy_trace[-window:]) * decrease_factor
def test_mcmc_dyn_dim2():
seed = 42
np.random.seed(seed)
state = np.random.get_state()
EPS = 1e-2
size = np.array([int(2e1), 2])
n_part = int(size[0]/2)
beta = 2
steps = 20
pair_pot = lambda x: - beta/(steps + 1) * d_tor(x, size=size)
n_iter = int(2e3)
var = 1
states = [state for state,_ in [[np.random.get_state(), np.random.rand()] for _ in range(n_part)]]
init_start_pts = np.expand_dims(((np.random.rand(n_part, 2) - .5) * size), -1)
init_start_pts = np.repeat(init_start_pts, steps + 1, -1)
init_bridges = np.array([[bridge(var, steps) for _ in range(2)] for _ in range(n_part)])
init_val = init_start_pts + init_bridges
propose = lambda x: torify(np.repeat(np.expand_dims(x[:,:,0] + np.random.randn(*(x[:,:,0].shape)), -1), steps + 1, -1)
+ np.array([[bridge(var, steps) for _ in range(2)] for _ in range(x.shape[0])]), size)
#energy decrease factor
decrease_factor = 2
#size of rolling window for moving averages
window = 50
trace = mcmc(pair_pot, n_iter, init_val, propose, state)
#correct shape
assert trace.shape == (n_iter,) + init_val.shape
#values should change
assert_array_less(EPS, trace.var(axis = 0))
#energy must decrease
energy_trace = [energy(conf, pair_pot) for conf in trace]
assert | np.mean(energy_trace[:window]) | numpy.mean |
"""
Sampling history for MCMC.
MCMC keeps track of a number of things during sampling.
The results may be queried as follows::
draws, generation, thinning
sample(condition) returns draws, points, logp
logp() returns draws, logp
acceptance_rate() returns draws, AR
chains() returns draws, chains, logp
R_stat() returns draws, R
CR_weight() returns draws, CR_weight
best() returns best_x, best_logp
outliers() returns outliers
show()/save(file)/load(file)
Data is stored in circular arrays, which keeps the last N generations and
throws the rest away.
draws is the total number of draws from the sampler.
generation is the total number of generations.
thinning is the number of generations per stored sample.
draws[i] is the number of draws including those required to produce the
information in the corresponding return vector. Note that draw numbers
need not be linearly spaced, since techniques like delayed rejection
will result in a varying number of samples per generation.
logp[i] is the set of log likelihoods, one for each member of the population.
The logp() method returns the complete set, and the sample() method returns
a thinned set, with on element of logp[i] for each vector point[i, :].
AR[i] is the acceptance rate at generation i, showing the proportion of
proposed points which are accepted into the population.
chains[i, :, :] is the set of points in the differential evolution population
at thinned generation i. Ideally, the thinning rate of the MCMC process
is chosen so that thinned generations i and i+1 are independent samples
from the posterior distribution, though there is a chance that this may
not be the case, and indeed, some points in generation i+1 may be identical
to those in generation i. Actual generation number is i*thinning.
points[i, :] is the ith point in a returned sample. The i is just a place
holder; there is no inherent ordering to the sample once they have been
extracted from the chains. Note that the sample may be from a marginal
distribution.
R[i] is the Gelman R statistic measuring convergence of the Markov chain.
CR_weight[i] is the set of weights used for selecting between the crossover
ratios available to the candidate generation process of differential
evolution. These will be fixed early in the sampling, even when adaptive
differential evolution is selected.
outliers[i] is a vector containing the thinned generation number at which
an outlier chain was removed, the id of the chain that was removed and
the id of the chain that replaced it. We leave it to the reader to decide
if the cloned samples, point[:generation, :, removed_id], should be included
in further analysis.
best_logp is the highest log likelihood observed during the analysis and
best_x is the corresponding point at which it was observed.
generation is the last generation number
"""
#TODO: state should be collected in files as we go
from __future__ import division, print_function
__all__ = ['MCMCDraw', 'load_state', 'save_state']
import os.path
import re
import gzip
import numpy as np
from numpy import empty, sum, asarray, inf, argmax, hstack, dstack
from numpy import savetxt, reshape
from .outliers import identify_outliers
from .util import draw, rng
#EXT = ".mc.gz"
#CREATE = gzip.open
EXT = ".mc"
CREATE = open
# CRUFT: python 2.x needs to convert unicode to bytes when writing to file
try:
# python 2.x
unicode
def write(fid, s):
fid.write(s)
except NameError:
# python 3.x
def write(fid, s):
fid.write(s.encode('utf-8') if isinstance(s, str) else s)
class NoTrace:
def write(self, data):
pass
def flush(self):
pass
def close(self):
pass
def save_state(state, filename):
trace = NoTrace()
#trace = open(filename+"-trace.mc", "w")
write(trace, "starting trace\n")
# Build 2-D data structures
write(trace, "extracting draws, logp\n")
draws, logp = state.logp(full=True)
write(trace, "extracting acceptance rate\n")
_, AR = state.acceptance_rate()
write(trace, "building chain from draws, AR and logp\n")
chain = hstack((draws[:, None], AR[:, None], logp))
write(trace, "extracting point, logp\n")
_, point, logp = state.chains()
Nthin, Npop, Nvar = point.shape
write(trace, "shape is %d,%d,%d\n" % (Nthin, Npop, Nvar))
write(trace, "adding logp to point\n")
point = dstack((logp[:, :, None], point))
write(trace, "collapsing to draws x point\n")
point = reshape(point, (point.shape[0]*point.shape[1], point.shape[2]))
write(trace, "extracting R_stat\n")
draws, R_stat = state.R_stat()
write(trace, "extracting CR_weight\n")
_, CR_weight = state.CR_weight()
_, Ncr = CR_weight.shape
write(trace, "building stats\n")
stats = hstack((draws[:, None], R_stat, CR_weight))
#TODO: missing _outliers from save_state
# Write convergence info
write(trace, "writing chain\n")
fid = CREATE(filename+'-chain'+EXT, 'wb')
write(fid, '# draws acceptance_rate %d*logp\n' % Npop)
savetxt(fid, chain)
fid.close()
# Write point info
write(trace, "writing point\n")
fid = CREATE(filename+'-point'+EXT, 'wb')
write(fid, '# logp point (Nthin x Npop x Nvar = [%d,%d,%d])\n'
% (Nthin, Npop, Nvar))
savetxt(fid, point)
fid.close()
# Write stats
write(trace, "writing stats\n")
fid = CREATE(filename+'-stats'+EXT, 'wb')
write(fid, '# draws %d*R-stat %d*CR_weight\n' % (Nvar, Ncr))
savetxt(fid, stats)
fid.close()
write(trace, "done state save\n")
trace.close()
IND_PAT = re.compile('-1#IND')
INF_PAT = re.compile('1#INF')
def loadtxt(file, report=0):
"""
Like numpy loadtxt, but adapted for windows non-finite numbers.
"""
if not hasattr(file, 'readline'):
if file.endswith('.gz'):
#print("opening with gzip")
fh = gzip.open(file, 'r')
else:
fh = open(file, 'r')
else:
fh = file
res = []
section = 0
lineno = 0
for line in fh:
lineno += 1
if report and lineno%report == 0:
print("read", section*report)
section += 1
IND_PAT.sub('nan', line)
INF_PAT.sub('inf', line)
line = line.split('#')[0].strip()
values = line.split()
if len(values) > 0:
try:
res.append([float(v) for v in values])
except ValueError:
print("Parse error:", values)
if fh != file:
fh.close()
return asarray(res)
def path_contains_saved_state(filename):
chain_file = filename + '-chain' + EXT
return os.path.exists(chain_file)
def load_state(filename, skip=0, report=0, derived_vars=0):
# Read chain file
chain = loadtxt(filename+'-chain'+EXT)
# Read point file
fid = open(filename+'-point'+EXT, 'r')
line = fid.readline()
point_dims = line[line.find('[')+1:line.find(']')]
Nthin, Npop, Nvar = eval(point_dims)
for _ in range(skip*Npop):
fid.readline()
point = loadtxt(fid, report=report*Npop)
fid.close()
# Read stats file
stats = loadtxt(filename+'-stats'+EXT)
# Guess dimensions
Ngen = chain.shape[0]
thinning = 1
Nthin -= skip
Nupdate = stats.shape[0]
#Ncr = stats.shape[1] - Nvar - 1
# Create empty draw and fill it with loaded data
state = MCMCDraw(0, 0, 0, 0, 0, 0, thinning)
#print("gen, var, pop", Ngen, Nvar, Npop)
state.draws = Ngen * Npop
state.generation = Ngen
state._gen_index = 0
state._gen_draws = chain[:, 0]
state._gen_acceptance_rate = chain[:, 1]
state._gen_logp = chain[:, 2:]
state.thinning = thinning
state._thin_count = Ngen//thinning
state._thin_index = 0
state._thin_draws = state._gen_draws[(skip+1)*thinning-1::thinning]
state._thin_logp = point[:, 0].reshape((Nthin, Npop))
state._thin_point = reshape(point[:, 1:Nvar+1-derived_vars], (Nthin, Npop, -1))
state._gen_current = state._thin_point[-1].copy()
state._update_count = Nupdate
state._update_index = 0
state._update_draws = stats[:, 0]
state._update_R_stat = stats[:, 1:Nvar+1-derived_vars]
state._update_CR_weight = stats[:, Nvar+1-derived_vars:]
state._outliers = []
bestidx = np.argmax(point[:, 0])
state._best_logp = point[bestidx, 0]
state._best_x = point[bestidx, 1:Nvar+1-derived_vars]
return state
class MCMCDraw(object):
"""
"""
_labels = None
_integer_vars = None # boolean array of integer variables, or None
title = None
def __init__(self, Ngen, Nthin, Nupdate, Nvar, Npop, Ncr, thinning):
# Total number of draws so far
self.draws = 0
# Maximum observed likelihood
self._best_x = None
self._best_logp = -inf
# Per generation iteration
self.generation = 0
self._gen_index = 0
self._gen_draws = empty(Ngen, 'i')
self._gen_logp = empty((Ngen, Npop))
self._gen_acceptance_rate = empty(Ngen)
# If we are thinning, we need to keep the current generation
# separately. [Note: don't remember why we need both the _gen_*
# and _thin_*] [Note: the caller x vector is assigned to
# _gen_current; this may lead to unexpected behaviour if x is
# changed by the caller.
self._gen_current = None
# Per thinned generation iteration
self.thinning = thinning
self._thin_index = 0
self._thin_count = 0
self._thin_timer = 0
self._thin_draws = empty(Nthin, 'i')
self._thin_point = empty((Nthin, Npop, Nvar))
self._thin_logp = empty((Nthin, Npop))
# Per update iteration
self._update_index = 0
self._update_count = 0
self._update_draws = | empty(Nupdate, 'i') | numpy.empty |
# -*- coding: utf-8 -*-
"""
This is the element routine file wherein Jacobian, strain disp. mat,
elemental stiffness, elemental F_int are computed.
The material routine is called in F_e_int() function, which returns the stress,
overstress and material tangent stiffness .
@author: Danush
"""
import numpy as np
from material_routine import *
##############################################################################
# N- shape functions for the given problem
si = 0
N = np.array([(1/2)*(1-si),(1/2)*(1+si)]).reshape(2,1)
#computation of jacobian
def jacobian_of_elements(r_elements):
"""This function returns the Jacobian for the problem.
Input---> r_elements, depending on the meshgenerator
Output--> Jacobian"""
J = (r_elements[0,1] - r_elements[0,0])/2
return J
# B strain displacement matrix
def strain_disp_mat(r_elements):
"""This function returns the strain displacement matrix.
Input----> r_elements,N-shape functions
Output---> B-strain displacement matrix(2x2)"""
jacobian = jacobian_of_elements(r_elements)
B = np.array([-1/(2*jacobian),1/(2*jacobian),
N[0]/(N[0]*r_elements[0,0] + N[1]*r_elements[0,1]),
N[1]/(N[0]*r_elements[0,0] + N[1]*r_elements[0,1])]).reshape(2,2)
return B
# The no. of Gauss points for the problem is 1 , so w is assigned the value 2
# gauss points = 1
w = 2
# Elemental stiffness matrix
def k_e_mat(B,r,C_t,jacobian,w):
"""This function returns the elemental stiffness matrix
which is called inside F_e_int function.
Input----> B-strain disp.Mat,r-radius from mesh generator,
C_t-Tangent stiffness Mat.,jacobian,w
Output---> K_elemental"""
k = np.array([w *(B.transpose().dot(C_t.dot(B)))*(N.transpose().dot(r.transpose()))
* jacobian]).reshape(2,2)
return k
#Elemental Assignment matrix
def a_e_mat(n_e):
"""This function returns the elemental assignment matrix
for the required no. of elements.
Input-----> n_e no. of elements
Output----> Elemental Assignment matrix"""
B = | np.zeros((2,n_e+1)) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############
# GPathFinder: Identification of ligand pathways by a multi-objective
# genetic algorithm
#
# https://github.com/insilichem/gpathfinder
#
# Copyright 2019 <NAME>, <NAME>,
# <NAME>, <NAME>,
# <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############
"""
Coherent Point Drift (affine and rigid) Python2/3 implementation,
adapted from `kwohlfahrt's <https://github.com/kwohlfahrt/coherent-point-drift>`_.
Only 3D points are supported in this version.
Depends on:
- Python 2.7, 3.4+
- Numpy
- Matplotlib (plotting only)
"""
from __future__ import division
import numpy as np
import math
from itertools import product, repeat, islice
from functools import reduce
def coherent_point_drift(X, Y, w=0.0, B=None, guess_steps=5, max_iterations=20,
method='affine'):
register, transform = {'affine': (affine_cpd, affine_xform),
'rigid': (rigid_cpd, rigid_xform)}[method]
best_xforms, best_transformed_Y, best_rmsd = None, None, 1000
for rotation in spaced_rotations(guess_steps):
RB = rotation_matrix(*rotation)
xforms = last(islice(register(X, Y, w, RB), max_iterations))
transformed_Y = transform(Y, *xforms)
rmsd = RMSD(X, transformed_Y)
if rmsd < best_rmsd:
best_rmsd = rmsd
best_xforms = xforms
best_transformed_Y = transformed_Y
return best_transformed_Y, best_xforms, best_rmsd
def rigid_cpd(X, Y, w=0.0, R=None):
D = X.shape[1]
N, M = len(X), len(Y)
R = np.eye(D) if R is None else R
s = std(X) / std(rigid_xform(Y, R=R))
t = X.mean(axis=0) - rigid_xform(Y, R=R, s=s).mean(axis=0)
sigma_sq = pairwise_sqdist(rigid_xform(Y, R, t, s), X).sum() / (D * M * N)
old_exceptions = np.seterr(divide='ignore', over='ignore', under='ignore',
invalid='raise')
while True:
try:
transformed_Y = rigid_xform(Y, R, t, s)
P, N_p, mu_x, mu_y, X_hat, Y_hat = common_steps(X, Y, transformed_Y,
w, sigma_sq)
except FloatingPointError:
np.seterr(**old_exceptions)
break
A = X_hat.T.dot(P.T).dot(Y_hat)
U, _, VT = np.linalg.svd(A)
C = np.eye(D)
C[-1, -1] = np.linalg.det(U.dot(VT))
R = U.dot(C).dot(VT)
s = np.trace(A.T.dot(R)) / np.trace(Y_hat.T.dot(np.diag(P.sum(axis=1))).dot(Y_hat))
t = mu_x - s * R.dot(mu_y)
sigma_sq = (np.trace(X_hat.T.dot(np.diag(P.T.sum(axis=1))).dot(X_hat))
- s * np.trace(A.T.dot(R))) / (N_p * D)
yield R, t, s
def affine_cpd(X, Y, w=0.0, B=None):
D = X.shape[1]
N = len(X)
M = len(Y)
B = np.eye(D) if B is None else B
s = (std(X) / std(affine_xform(Y, B=B))) # scale
B = s * B
t = X.mean(axis=0) - affine_xform(Y, B=B).mean(axis=0)
sigma_sq = pairwise_sqdist(affine_xform(Y, B, t), X).sum() / (D * M * N)
old_exceptions = np.seterr(divide='ignore', over='ignore', under='ignore',
invalid='raise')
while True:
try:
transformed_Y = affine_xform(Y, B, t)
P, N_p, mu_x, mu_y, X_hat, Y_hat = common_steps(X, Y, transformed_Y, w,
sigma_sq)
except FloatingPointError:
np.seterr(**old_exceptions)
break
B = (X_hat.T.dot(P.T).dot(Y_hat)
.dot(np.linalg.inv(Y_hat.T.dot(np.diag(P.sum(axis=1))).dot(Y_hat))))
t = mu_x - B.dot(mu_y)
sigma_sq = (np.trace(X_hat.T.dot(np.diag(P.T.sum(axis=1))).dot(X_hat))
- np.trace(X_hat.T.dot(P.T).dot(Y_hat).dot(B.T))) / (N_p * D)
yield B, t
def common_steps(X, Y, Y_, w, sigma_sq):
# E step
D = X.shape[1]
N = len(X)
M = len(Y_)
dist = pairwise_sqdist(Y_, X)
overlap = np.exp(-dist / (2 * sigma_sq))
# The original algorithm expects unit variance,
# so normalize (2πς**2)**D/2 to compensate
P = overlap / (overlap.sum(axis=0)
+ (2 * math.pi * sigma_sq) ** (D / 2)
* w / (1-w) * M / N / std(X) ** D)
# M-step
N_p = P.sum()
mu_x = 1 / N_p * X.T.dot(P.T.sum(axis=1))
mu_y = 1 / N_p * Y.T.dot(P.sum(axis=1))
X_hat = X - mu_x.T
Y_hat = Y - mu_y.T
return P, N_p, mu_x, mu_y, X_hat, Y_hat
def last(sequence):
return reduce(lambda acc, x: x, sequence)
def std(x):
return np.sqrt(np.var(x, axis=0).mean())
def rigid_xform(X, R=np.array(1), t=0.0, s=1.0):
return s * R.dot(X.T).T + t
def affine_xform(X, B=np.array(1), t=0):
return B.dot(X.T).T + t
def spaced_rotations(N):
# <NAME>
# Graphics Gems III, pp 124-132
for x_theta in product(frange(0, 1, 1/N),
*repeat(frange(0, 2*math.pi, 2*math.pi/N), 2)):
X, theta = x_theta[0], x_theta[1:]
R0, R1 = math.sqrt(1-X), math.sqrt(X)
yield Quaternion(math.sin(theta[0]) * R0, math.cos(theta[0]) * R0,
math.sin(theta[1]) * R1, math.cos(theta[1]) * R1).axis_angle
def rotation_matrix(*angles):
theta, (x, y, z) = angles
c = math.cos(theta)
s = math.sin(theta)
return | np.array([[c+x*x*(1-c), x*y*(1-c)-z*s, (1-c)*x*z+y*s],
[y*x*(1-c)+z*s, c+y*y*(1-c), y*z*(1-c)-x*s],
[z*x*(1-c)-y*s, z*y*(1-c)+x*s, c+z*z*(1-c)]])
def pairwise_sqdist(X, Y) | numpy.array |
'''
This script has functions in it which are used in network which evaluate images.
If this script here is run it returns the object_dc-score of each segmented object by the predicition with respect to the groundtruth
'''
import os
import skimage
import scipy
import numpy as np
import matplotlib.pyplot as plt
#####################################
# Plotting functions #
#####################################
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
Source: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_equalize.html#sphx-glr-auto-examples-color-exposure-plot-equalize-py
"""
image = skimage.img_as_float(image)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(image, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = skimage.exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
def plot_img_and_segmentations(imgs_dict, names_list, color_list):
fig, axs = plt.subplots(1, len(names_list), figsize=(5 * len(names_list),5))
#plt.title('Visualization of data and prediction')
for ax, img_name, colormap in zip(axs, names_list, color_list):
pic = imgs_dict[img_name]
ax.imshow(pic, cmap=colormap)
ax.axis('off')
ax.set_title(img_name.capitalize())
plt.show()
return
def plot_img_and_segm_overlayed(img, msks_dict, msk_names_list, color_list, change_bg_color_list):
fig, axs = plt.subplots(len(msk_names_list), 1, figsize=(15, 15 * len(msk_names_list)))
for ax, msk_name, colormap, change_bg in zip(axs, msk_names_list, color_list, change_bg_color_list):
ax.imshow(img)
if change_bg:
overlay_mask = msks_dict[msk_name]
else:
overlay_mask = np.ma.masked_array(msks_dict[msk_name], msks_dict[msk_name] == 0)
ax.imshow(overlay_mask, colormap, alpha=0.5)
ax.axis('off')
ax.set_title(msk_name.capitalize())
plt.show()
def plot_segmentations_dice(imgs_dict, names_list, label_list):
fig, axs = plt.subplots(1, len(names_list), figsize=(len(names_list) * 10, 10))
handles = label_list
# plt.title('Visualization of data and prediction')
for ax, msk_name, in zip(axs, names_list):
pic = imgs_dict[msk_name]
ax.imshow(pic * 255)
ax.axis('off')
subtitle = msk_name + " comparison"
ax.set_title(subtitle.capitalize())
ax.legend(handles=handles)
plt.show()
return
####################################
# Metric, Micron extraction #
####################################
def dice_coeff_numpy(y_true, y_pred):
intersection = np.sum(y_true * y_pred)
score = (2 * intersection + 1.) / (y_true.sum() + y_pred.sum() + 1.)
return score
def get_micron_info(pathtofile, filename):
"""
Returns the pixel per micron ratio for x and y.
Works with .tif images from ImageJ
Parameters:
-----------
pathtofile: string
path of the folder where the file is in
filename: string
name of the file
Returns:
--------
(pix mic x, pix mic y) tuple
Tuple with the pixel per micron ratio for x and y
"""
# Load microns unit
with skimage.external.tifffile.TiffFile(os.path.join(pathtofile, filename)) as tif:
metadata = tif.info()
# Find info about pixels per micron
x_pos = metadata.find("* 282 x_resolution")
y_pos = metadata.find("* 283 y_resolution")
pixel_per_micron_x = float(metadata[x_pos + 25: x_pos + 32]) * 0.000001
pixel_per_micron_y = float(metadata[y_pos + 25: y_pos + 32]) * 0.000001
if pixel_per_micron_x != pixel_per_micron_y:
print("Error. The resolution in micron in x and y are different. ",
"Please check the image. If there is no error in the image, this has to be implemented!",
"get_micron_info will return nothing.")
return
return (pixel_per_micron_x, pixel_per_micron_y)
####################################
# Area analyis of images #
####################################
def get_zero_area_in_img(image, area_threshold=0.1):
"""
Finds the sliced away area in an image
Parameters:
-----------
image: array
with shape e.g. (1024, 1024, 3)
values in [0,1]
area_threshold: float
values in [0,1]
percentage of zero_area size necessary to define it as cropped_img_area
Returns:
--------
cropped_img_area: array
with same shape as image
values: True or False
"""
# Reduce image to grayscale image
grayscale_image = skimage.color.rgb2gray(image)
# Set all values which are 0 to 1 in a new array
cropped_img_area = np.zeros(grayscale_image.shape)
cropped_img_area[grayscale_image == 0] = 1
# Find connected components
labelled_image, count_image = scipy.ndimage.label(cropped_img_area)
refined_cropped_img_area = cropped_img_area.copy()
# Filter out all connected components with size smaller or equal area_threshold
for label in range(1, count_image + 1):
if len(refined_cropped_img_area[labelled_image == label]) <= area_threshold * cropped_img_area.size:
refined_cropped_img_area[labelled_image == label] = 0
# count_refined_mask -= 1
# Return a boolean array
final_cropped_img_area = np.array(refined_cropped_img_area > 0)
# Debug:
if np.max(final_cropped_img_area) > 0:
print("zero area in image detected")
print("Percentage of cropped area:", np.sum(final_cropped_img_area) / final_cropped_img_area.size)
return final_cropped_img_area
def get_count_and_area(mask, filter_th, keep_only_largest_label=False, verbose=False):
labelled_mask, count_mask = scipy.ndimage.label(mask)
# Keep only the biggest connected component
if keep_only_largest_label:
refined_mask = mask.copy()
len_largest_label = 0
id_largest_label = 0
for label in range(1, count_mask + 1):
if len(refined_mask[labelled_mask == label]) > len_largest_label:
len_largest_label = len(refined_mask[labelled_mask == label])
id_largest_label = label
refined_mask[:] = 0
refined_mask[labelled_mask == id_largest_label] = 1
count_mask = 1
if verbose:
print(refined_mask.shape, refined_mask.min(), refined_mask.max())
print("Kept only the largest region and set count_mask to 1.")
else:
# count_refined_mask = count_mask
refined_mask = mask.copy()
# Filter out all connected components with size smaller or equal filter_th
for label in range(1, count_mask + 1):
if len(refined_mask[labelled_mask == label]) <= filter_th:
refined_mask[labelled_mask == label] = 0
# count_refined_mask -= 1
# refined_mask has to be relabeled now.
relabelled_mask, recounted_mask = scipy.ndimage.label(refined_mask)
if recounted_mask < count_mask and verbose:
print("Removed ", count_mask - recounted_mask, " regions because they are smaller or equal ", filter_th,
" pixels.")
filtered_mask = np.array(relabelled_mask > 0)
return relabelled_mask, recounted_mask, filtered_mask
def get_count_and_area_rmv_podo_outside(cfg, mask, filter_mask, filter_th, verbose=False):
# Outputs the labelled_mask, the mask_count and the filtered_mask
# The mask is labeled, then cropped by the filter_mask
# Afterwards, all labels which are contained in the mask are not removed in the labelled_mask
labelled_mask, count_mask = scipy.ndimage.label(mask)
if cfg.GLOM_POSTPROCESSING_KEEP_ONLY_LARGEST is True:
labeled_filter_mask, dataset_filter_mask_count, filtered_filter_mask = get_count_and_area\
(filter_mask, cfg.FILTER_CLASSES[0], keep_only_largest_label=True, verbose=verbose)
else:
labeled_filter_mask, dataset_filter_mask_count, filtered_filter_mask = get_count_and_area\
(filter_mask, cfg.FILTER_CLASSES[0], verbose=verbose)
labelled_mask_copy = labelled_mask.copy()
labelled_mask_copy2 = labelled_mask.copy()
labelled_mask_copy[filtered_filter_mask == 0] = 0
if verbose:
print(labelled_mask_copy.max(), labelled_mask_copy.min())
labels_not_cropped = np.unique(labelled_mask_copy)
labels_not_cropped = np.trim_zeros(labels_not_cropped)
if verbose:
print(labels_not_cropped)
final_mask = np.isin(labelled_mask_copy2, labels_not_cropped)
if verbose:
print(final_mask.max(), final_mask.min())
return get_count_and_area(final_mask, filter_th, verbose=verbose)
def image_to_label_image(img):
label, count = scipy.ndimage.label(img)
return label, count
def coregistrate_and_get_object_dc_score(label_pred, count_pred, label_mask, count_mask, verbose=0):
def dice_coeff_with_intersect_matrix(matrix, tensor):
intersection_matrices = matrix * tensor
intersection_sum_array = np.sum(intersection_matrices, axis=(1,2))
score_array = (2 * intersection_sum_array + 1.) / (np.sum(matrix) + np.sum(tensor, axis=(1,2)) + 1.)
return score_array, intersection_sum_array
def get_true_positives_and_false_negatives_all_cells():
true_positives = []
false_negatives = []
array_dim = label_pred.shape
prediction_array = np.empty((count_pred, array_dim[0], array_dim[1]))
score_arrays = np.zeros((count_mask, count_pred))
for i in range(count_pred):
prediction_array[i,:,:] = | np.array([label_pred == i+1]) | numpy.array |
import os
import random
import math
import numpy as np
import torch
import yaml
from tqdm import tqdm
from src.metric.bleu_scorer import SacreBLEUScorer
from src.decoding.beam_search import beam_search
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary, BOS, EOS, PAD
from src.models import build_model
from src.utils.bleu_util import count_gram
from src.utils.common_utils import *
from src.utils.logging import *
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]] #
sorted_indices = np.argsort(lengths)
# sorting inputs
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
# split shards
total_batch = sorted_indices.shape[0] # total number of batches
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
Args:
eval ('bool'): indicator for eval/infer.
Returns:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = | np.transpose(x_np, [1, 0]) | numpy.transpose |
'''
Unit-tests for SuffStatBag
'''
from bnpy.suffstats.SuffStatBag import SuffStatBag
import numpy as np
import unittest
class TestSuffStatBag(unittest.TestCase):
def shortDescription(self):
return None
def test_copy(self, K=2, D=2):
SS = self.makeSuffStatBagAndFillWithOnes(K,D)
self.addELBOtoSuffStatBag(SS,K)
SS2 = SS.copy()
assert SS2.s == SS.s
assert np.all(SS2.N == SS.N)
assert np.all(SS2.getELBOTerm('Elogz') == SS.getELBOTerm('Elogz'))
assert id(SS2.s) != id(SS.s)
assert id(SS2.N) != id(SS.N)
def test_ampFactor(self, K=2, D=2):
SS = self.makeSuffStatBagAndFillWithOnes(K,D)
SS.applyAmpFactor(3.0)
assert np.allclose(SS.s, 3.0)
assert np.allclose(SS.N, 3.0*np.ones(K))
assert np.allclose(SS.x, 3.0*np.ones((K,D)))
assert np.allclose(SS.xxT, 3.0*np.ones((K,D,D)))
assert SS.hasAmpFactor()
assert SS.ampF == 3.0
def makeSuffStatBagAndFillWithOnes(self, K, D):
SS = SuffStatBag(K=K, D=D)
s = 1.0
N = np.ones(K)
x = np.ones((K,D))
xxT = np.ones((K,D,D))
SS.setField('s', s)
SS.setField('N', N, dims='K')
SS.setField('x', x, dims=('K','D'))
SS.setField('xxT', xxT, dims=('K','D','D'))
return SS
def addELBOtoSuffStatBag(self, SS, K):
SS.setELBOTerm('Elogz', np.ones(K), dims='K')
SS.setELBOTerm('Econst', 1.0, dims=None)
SS.setMergeTerm('Elogz', 2*np.ones((K,K)), dims=('K','K'))
return SS
def getExpectedMergedFields(self, K, D, kA=0):
s = 1.0
N = np.ones( K-1)
N[kA] = 2
x = np.ones((K-1, D))
x[kA] = 2
xxT = np.ones((K-1,D,D))
xxT[kA] = 2
return s, N, x, xxT
def test_mergeComps_K2_D3_noELBO(self, K=2, D=3):
SS = self.makeSuffStatBagAndFillWithOnes(K=K, D=D)
SS.mergeComps(0, 1)
assert SS.K == K - 1
assert np.allclose(SS.s, 1)
assert np.allclose(SS.N, 2)
assert np.allclose(SS.x, 2*np.ones(D))
assert np.allclose(SS.xxT, 2*np.ones((D,D)))
def test_mergeComps_K2_D3_withELBO(self, K=2, D=3):
SS = self.makeSuffStatBagAndFillWithOnes(K, D)
self.addELBOtoSuffStatBag(SS, K)
SS.mergeComps(0, 1)
assert SS.K == K - 1
assert np.allclose(SS.s, 1)
assert np.allclose(SS.N, 2)
assert np.allclose(SS.x, 2*np.ones(D))
assert np.allclose(SS.xxT, 2*np.ones((D,D)))
assert np.allclose(SS.getELBOTerm('Elogz'), 2.0)
assert np.allclose(SS.getELBOTerm('Econst'), 1.0)
assert SS._ELBOTerms.K == K - 1
assert SS._MergeTerms.K == K - 1
def test_mergeComps_K5_D3_withELBO_kA0(self, K=5, D=3):
SS = self.makeSuffStatBagAndFillWithOnes(K, D)
self.addELBOtoSuffStatBag(SS, K)
SS.mergeComps(0, 1)
s, N, x, xxT = self.getExpectedMergedFields(K, D)
assert SS.K == K - 1
assert SS._ELBOTerms.K == K - 1
assert SS._MergeTerms.K == K - 1
assert np.allclose(SS.s, s)
assert | np.allclose(SS.N, N) | numpy.allclose |
import numpy as np
import lsst.geom
from lsst.afw.math import ChebyshevBoundedField, ChebyshevBoundedFieldControl
def get_approx_psf_size_and_shape(bbox, psf, wcs, ra, dec, nx=20, ny=20, orderx=2, ordery=2):
pts = [lsst.geom.SpherePoint(r*lsst.geom.degrees, d*lsst.geom.degrees) for
r, d in zip(ra, dec)]
pixels = wcs.skyToPixel(pts)
ctrl = ChebyshevBoundedFieldControl()
ctrl.orderX = orderx
ctrl.orderY = ordery
ctrl.triangular = False
xSteps = np.linspace(bbox.getMinX(), bbox.getMaxX(), nx)
ySteps = np.linspace(bbox.getMinY(), bbox.getMaxY(), ny)
x = np.tile(xSteps, nx)
y = np.repeat(ySteps, ny)
psf_size = np.zeros(x.size)
psf_e1 = np.zeros(x.size)
psf_e2 = np.zeros(x.size)
for i in range(x.size):
shape = psf.computeShape(lsst.geom.Point2D(x[i], y[i]))
psf_size[i] = shape.getDeterminantRadius()
ixx = shape.getIxx()
iyy = shape.getIyy()
ixy = shape.getIxy()
psf_e1[i] = (ixx - iyy)/(ixx + iyy + 2.*psf_size[i]**2.)
psf_e2[i] = (2.*ixy)/(ixx + iyy + 2.*psf_size[i]**2.)
pixel_x = np.array([pix.getX() for pix in pixels])
pixel_y = np.array([pix.getY() for pix in pixels])
cheb_size = ChebyshevBoundedField.fit(lsst.geom.Box2I(bbox), x, y, psf_size, ctrl)
psf_size_pts = cheb_size.evaluate(pixel_x, pixel_y)
cheb_e1 = ChebyshevBoundedField.fit(lsst.geom.Box2I(bbox), x, y, psf_e1, ctrl)
psf_e1_pts = cheb_e1.evaluate(pixel_x, pixel_y)
cheb_e2 = ChebyshevBoundedField.fit(lsst.geom.Box2I(bbox), x, y, psf_e2, ctrl)
psf_e2_pts = cheb_e2.evaluate(pixel_x, pixel_y)
return psf_size_pts, psf_e1_pts, psf_e2_pts
def get_psf_area_center(bbox, psf):
im = psf.computeKernelImage(bbox.getCenter())
return np.sum(im.array)/ | np.sum(im.array**2.) | numpy.sum |
# <NAME>,2020
# https://stackoverflow.com/a/62931912/238978
import cv2
import numpy as np
from PIL import Image
def stroke(origin_image, threshold, stroke_size, color):
img = cv2.cvtColor(np.array(origin_image), cv2.COLOR_RGBA2BGRA)
h, w, _ = img.shape
padding = stroke_size
alpha = img[:, :, 3]
rgb_img = img[:, :, 0:3]
bigger_img = cv2.copyMakeBorder(
rgb_img,
padding,
padding,
padding,
padding,
cv2.BORDER_CONSTANT,
value=(0, 0, 0, 0),
)
alpha = cv2.copyMakeBorder(
alpha, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=0
)
bigger_img = cv2.merge((bigger_img, alpha))
h, w, _ = bigger_img.shape
_, alpha_without_shadow = cv2.threshold(
alpha, threshold, 255, cv2.THRESH_BINARY
) # threshold=0 in photoshop
alpha_without_shadow = 255 - alpha_without_shadow
dist = cv2.distanceTransform(
alpha_without_shadow, cv2.DIST_L2, cv2.DIST_MASK_3
) # dist l1 : L1 , dist l2 : l2
stroked = change_matrix(dist, stroke_size)
stroke_alpha = (stroked * 255).astype(np.uint8)
stroke_b = | np.full((h, w), color[2], np.uint8) | numpy.full |
#!/usr/bin/env python
# coding: utf-8
from dask.distributed import Client
from glob import glob
import dask.dataframe as dd
import json
import numpy as np
import pandas as pd
import os
import os.path
import geopandas
from shapely.geometry import Point
csv_schema = """trip_duration,start_time,stop_time,start_station_id,
start_station_name,start_station_latitude,start_station_longitude,
end_station_id,end_station_name,end_station_latitude,
end_station_longitude,bike_id,user_type,birth_year,gender""".split(',')
csv_schema = [x.strip() for x in csv_schema]
dtype_list = {
'trip_duration': np.int32,
'start_station_id': np.int32,
'start_station_name': object,
'start_station_latitude': np.float64,
'start_station_longitude': np.float64,
'end_station_id': np.int32,
'end_station_name': object,
'end_station_latitude': np.float64,
'end_station_longitude': np.float64,
'bike_id': np.int32,
'user_type': object,
'birth_year': np.float64,
'gender': np.int32,
}
with open('config.json', 'r') as fh:
config = json.load(fh)
def assign_taxi_zones(df, lon_var, lat_var, locid_var):
"""Joins DataFrame with Taxi Zones shapefile.
This function takes longitude values provided by `lon_var`, and latitude
values provided by `lat_var` in DataFrame `df`, and performs a spatial join
with the NYC taxi_zones shapefile.
The shapefile is hard coded in, as this function makes a hard assumption of
latitude and longitude coordinates. It also assumes latitude=0 and
longitude=0 is not a datapoint that can exist in your dataset. Which is
reasonable for a dataset of New York, but bad for a global dataset.
Only rows where `df.lon_var`, `df.lat_var` are reasonably near New York,
and `df.locid_var` is set to np.nan are updated.
Parameters
----------
df : pandas.DataFrame or dask.DataFrame
DataFrame containing latitudes, longitudes, and location_id columns.
lon_var : string
Name of column in `df` containing longitude values. Invalid values
should be np.nan.
lat_var : string
Name of column in `df` containing latitude values. Invalid values
should be np.nan
locid_var : string
Name of column in `df` containing taxi_zone location ids. Rows with
valid, nonzero values are not overwritten.
"""
import geopandas
from shapely.geometry import Point
localdf = df[[lon_var, lat_var, locid_var]].copy()
# localdf = localdf.reset_index()
localdf[lon_var] = localdf[lon_var].fillna(value=0.)
localdf[lat_var] = localdf[lat_var].fillna(value=0.)
localdf['replace_locid'] = (localdf[locid_var].isnull()
& (localdf[lon_var] != 0.)
& (localdf[lat_var] != 0.))
if ( | np.any(localdf['replace_locid']) | numpy.any |
#%%
import imp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import anthro.viz
import anthro.tessellation as tess
import shapely
import scipy.spatial
from shapely.geometry import LineString, MultiLineString, MultiPoint, Point
from shapely.geometry import Polygon, box, MultiPolygon
from shapely.ops import nearest_points, linemerge, unary_union, polygonize
from shapely import affinity
import tqdm
imp.reload(tess)
# CHoose random starting values
values = np.array([30, 5, 1, 8])
target = values / values.sum()
N = len(target)
S = 5 * tess.disc_uniform_pick(N)
W = .8 * | np.random.random(N) | numpy.random.random |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Contact: <EMAIL>
# Date: 18/12/2018
# This code generates train/test splits of edges from input graphs for evaluating graph embeddings
# on link prediction. It also provides false train and test edge sets of the required sizes.
# The train/test sets are efficiently generated by: i) obtaining a spanning tree of the input graph
# selected uniformly at random. ii) adding more edges to the spanning tree until the required amount
# of train edges is reached.
from __future__ import division
from __future__ import print_function
import os
import random
import warnings
import networkx as nx
import numpy as np
import scipy as sp
from scipy.sparse import triu
from scipy.sparse import tril
from scipy.sparse.csgraph import depth_first_tree
from sklearn.externals.joblib import Parallel, delayed
def _sanity_check(G):
r"""
Helper function that checks if the input graphs contains a single connected component. Raises an error if not.
Parameters
----------
G : graph
A NetworkX graph
Raises
------
ValueError
If the graph has more than one (weakly) connected component.
"""
# Compute the number of connected components
if G.is_directed():
num_ccs = nx.number_weakly_connected_components(G)
else:
num_ccs = nx.number_connected_components(G)
# Rise an error if more than one CC exists
if num_ccs != 1:
raise ValueError("Input graph should contain one (weakly) connected component. "
"This graph contains: " + str(num_ccs))
def broder_alg(G, E):
r"""
Runs Andrei Broder's algorithm to select uniformly at random a spanning tree of the input
graph.The direction of the edges included in train_E is taken from E which respects the
edge directions in the original graph, thus, the results are still valid for directed graphs.
For pairs of nodes in the original digraphs which have edges in both directions, we randomly
select the direction of the edge included in the ST.
Parameters
----------
G : graph
A NetworkX graph
E : set
A set of directed or undirected edges constituting the graph G.
Returns
-------
train_E : set
A set of edges of G describing the random spanning tree
References
----------
.. [1] <NAME>, "Generating Random Spanning Trees", Proc. of the 30th Annual Symposium
on Foundations of Computer Science, pp. 442--447, 1989.
"""
# Create two partitions, S and T. Initially store all nodes in S.
S = set(G.nodes)
T = set()
# Pick a random node as the "current node" and mark it as visited.
current_node = random.sample(S, 1).pop()
S.remove(current_node)
T.add(current_node)
# Perform random walk on the graph
train_E = set()
while S:
if G.is_directed():
neighbour_node = random.sample(list(G.successors(current_node)) + list(G.predecessors(current_node)), 1).pop()
else:
neighbour_node = random.sample(list(G.neighbors(current_node)), 1).pop()
if neighbour_node not in T:
S.remove(neighbour_node)
T.add(neighbour_node)
if random.random() < 0.5:
if (current_node, neighbour_node) in E:
train_E.add((current_node, neighbour_node))
else:
train_E.add((neighbour_node, current_node))
else:
if (neighbour_node, current_node) in E:
train_E.add((neighbour_node, current_node))
else:
train_E.add((current_node, neighbour_node))
current_node = neighbour_node
# Return the set of edges constituting the spanning tree
return train_E
def wilson_alg(G, E):
r"""
Runs Willson's algorithm also known as loop erasing random walk to select uniformly at random
a spanning tree of the input graph. A set E contains the original direction of edges in graph G,
and train_E will only include edges which exist in E, thus, the results are still valid for
digraphs. For pairs of nodes in the original digraphs, which have edges in both directions,
we select the direction of the edge in the ST at random.
Parameters
----------
G : graph
A NetworkX graph
E : set
A set of directed or undirected edges constituting the graph G.
Returns
-------
train_E : set
A set of edges of G describing the random spanning tree
References
----------
.. [1] <NAME>, "Generating Random Spanning Trees More Quickly than the Cover Time",
In Proceedings of STOC, pp. 296--303, 1996.
.. [2] <NAME> and <NAME>, "How to Get a Perfectly Random Sample from a Generic
Markov Chain and Generate a Random Spanning Tree of a Directed Graph",
Journal of Algorithms 27, pp. 170--217, 1998.
"""
# Stores the nodes which are part of the trees created by the LERW.
intree = set()
# A dictionary which works as a linked list and stores the spanning tree
tree = dict()
# Pick a random node as the root of the spanning tree and add it to intree
# For undirected graphs this is the correct approach
r = random.sample(G.nodes, 1).pop()
intree.add(r)
for node in G.nodes:
i = node
while i not in intree:
# This random successor works for weighted and unweighted graphs because we just
# want to select a bunch of edges from the graph, no matter what the weights are.
if G.is_directed():
tree[i] = random.sample(list(G.successors(i)) + list(G.predecessors(i)), 1).pop()
else:
tree[i] = random.sample(list(G.neighbors(i)), 1).pop()
i = tree[i]
i = node
while i not in intree:
intree.add(i)
i = tree[i]
# Create a set to store the train edges
train_E = set()
# This is only relevant for directed graphs to make the selection of edge direction equiprobable
for e in set(zip(tree.keys(), tree.values())):
if random.random() < 0.5:
if e in E:
train_E.add(e)
else:
train_E.add(e[::-1])
else:
if e[::-1] in E:
train_E.add(e[::-1])
else:
train_E.add(e)
# Return the edges of the random spanning tree
return train_E
def _compute_one_split(G, output_path, owa=True, train_frac=0.51, num_fe_train=None, num_fe_test=None, split_id=0):
r"""
Computes one split of train/test edges as well as non-edges from an input graph and writes the data to files.
The train sets are always connected / weakly connected and span all nodes of the input graph.
Input graphs (digraphs) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
output_path : string
Indicates the path where data will be stored. Can include a name for all splits to share.
owa : bool, optional
Encodes the belief that the network respects or not the open world assumption. Default is True.
If OWA=True, false train edges can be true test edges. False edges sampled from train graph.
If OWA=False, closed world is assumed so false train edges are known to be false (not in G)
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
split_id : int, optional
The ID of train/test split. Default is 0.
"""
# Generate train and test edge splits
train_E, test_E = split_train_test(G, train_frac)
# Generate the train/test false edges
if owa:
train_E_false, test_E_false = generate_false_edges_owa(G, train_E, test_E, num_fe_train, num_fe_test)
else:
train_E_false, test_E_false = generate_false_edges_cwa(G, train_E, test_E, num_fe_train, num_fe_test)
# Write the computed split to a file
store_train_test_splits(output_path, train_E, train_E_false, test_E, test_E_false, split_id)
def compute_splits_parallel(G, output_path, owa=True, train_frac=0.51, num_fe_train=None, num_fe_test=None,
num_splits=10):
r"""
Computes in parallel the required number of train/test splits of edges and non-edges from an input graph
and writes the data to files. The train sets are always connected / weakly connected and span all nodes
of the input graph. Input graphs (digraphs) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
output_path : string
Indicates the path where data will be stored. Can include a name for all splits to share.
owa : bool, optional
Encodes the belief that the network respects or not the open world assumption. Default is True.
If OWA=True, false train edges can be true test edges. False edges sampled from train graph.
If OWA=False, closed world is assumed so false train edges are known to be false (not in G)
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
num_splits : int, optional
The number of train/test splits to generate. Default is 10.
"""
# Compute the splits sequentially or in parallel
backend = 'multiprocessing'
path_func = delayed(_compute_one_split)
Parallel(n_jobs=num_splits, verbose=True, backend=backend)(
path_func(G, output_path, owa, train_frac, num_fe_train, num_fe_test, split) for split in range(num_splits))
def split_train_test(G, train_frac=0.51, st_alg='wilson'):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The train set will be (weakly) connected and span all nodes of the input graph (digraph).
Input graph (digraph) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
st_alg : basestring, optional
The algorithm to use for generating the spanning tree constituting the backbone of the train set.
Options are: 'wilson' and 'broder'. The first option, 'wilson', also known as LERW is much faster in most cases.
Default is 'wilson'.
Returns
-------
train_E : set
The set of train edges
test_E : set
The set of test edges
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
If the input graph G has more than one (weakly) connected component.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Create a set of all edges in G
E = set(G.edges)
if st_alg == 'broder':
# Compute a random spanning tree using broder's algorithm
train_E = broder_alg(G, E)
else:
# Compute a random spanning tree using wilson's algorithm
train_E = wilson_alg(G, E)
# Fill test edge set as all edges not in the spanning tree
test_E = E - train_E
# Compute num train edges
num_E = len(E)
num_train_E = np.ceil(train_frac * num_E)
# Check if the num edges in the spanning tree is already greater than the num train edges
num_toadd = int(num_train_E - len(train_E))
if num_toadd <= 0:
print("WARNING: In order to return a connected train set the train_frac parameter needs to be higher!")
print("In this case, the provided train set constitutes a random spanning tree of the input graph.")
print("The train_frac value used is: {}".format(len(train_E) / num_E))
print("Edges requested: train = {}, test = {}".format(num_train_E, num_E - num_train_E))
print("Edges returned: train = {}, test = {}".format(len(train_E), num_E - len(train_E)))
else:
# Add more edges to train set from test set until it has desired size
edges = set(random.sample(test_E, num_toadd))
test_E = test_E - edges
train_E = train_E | edges
# Perform some simple checks
assert E == (test_E | train_E)
assert len(E) == len(test_E) + len(train_E)
if num_toadd > 0:
assert num_train_E == len(train_E)
# Return the sets of edges
return train_E, test_E
def rand_split_train_test(G, train_frac=0.51):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The train/test split is computed by randomly removing 1-train_frac edges from the graph.
From the remaining edges, those in the mainCC constitute the train edges. From the set
of removed edges, those whose nodes are in the train set, are considered part or the
test set. The proportion of train/test edges returned might not be the required one.
The train set will be (weakly) connected and span all nodes of the input graph.
Input graph (digraph) can contain one or many (weakly) connected components.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
Returns
-------
train_E : set
The set of train edges
test_E : set
The set of test edges
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
"""
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Create a set of all edges in G
E = set(G.edges)
num_E = len(E)
# Compute the potential number of train and test edges which corresponds to the fraction given
num_train_E = int(np.ceil(train_frac * num_E))
num_test_E = int(num_E - num_train_E)
# Randomly remove 1-train_frac edges from the graph and store them as potential test edges
pte_edges = set(random.sample(E, num_test_E))
# The remaining edges are potential train edges
ptr_edges = E - pte_edges
# Create a graph containing all ptr_edges and compute the mainCC
if G.is_directed():
H = nx.DiGraph()
H.add_edges_from(ptr_edges)
maincc = max(nx.weakly_connected_component_subgraphs(H), key=len)
else:
H = nx.Graph()
H.add_edges_from(ptr_edges)
maincc = max(nx.connected_component_subgraphs(H), key=len)
# The edges in the mainCC graph are the actual train edges
train_E = set(maincc.edges)
# Remove potential test edges for which the end nodes do not exist in the train_E
test_E = set()
for (src, dst) in pte_edges:
if src in maincc.nodes and dst in maincc.nodes:
test_E.add((src, dst))
# Return the sets of edges
return train_E, test_E
def naive_split_train_test(G, train_frac=0.51):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The sets are computed using the naive approach that checks connectivity of the graph
for each removed edge. If graph gets disconnected, that edges is not removed.
The train set will be (weakly) connected and span all nodes of the input graph.
Input graph (digraph) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
Returns
-------
train_E : set
The set of train edges
test_E : set
The set of test edges
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
If the input graph G has more than one (weakly) connected component.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Is directed
directed = G.is_directed()
G = G.copy()
# Create a set of all edges in G
aux = | np.array(G.edges) | numpy.array |
import copy
import logging
import math
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import random
import time
import torch
import torch.multiprocessing as tmp
import torch.nn.functional as F
import torch.tensor as tt
from torchvision.utils import save_image
from dist import Master, Worker
from net import CAModel
from pool import CustomPool
from utils import load_emoji, to_rgb, visualize_batch, append_file, write_file, export_model, dmg
from weight_updates import hebbian_update
HIDDEN_SIZE = None
class EvolutionStrategy:
"""Master class for performing an evolution.
Keeps track of hyperparameters, weights/coeffs.
Contains methods for running the environment, evaluate performances and update parameters.
"""
def __init__(self, args):
self.iterations = args.iter
self.learning_rate = args.lr
self.sigma = args.sigma
self.pop_size = args.pop_size
self.fire_rate = args.fire_rate
self.target_size = args.size
self.target_padding = args.pad
self.new_size = self.target_size + 2 * self.target_padding
self.channel_n = args.channels
self.hidden_size = args.hidden_size
HIDDEN_SIZE = self.hidden_size
self.target_img = load_emoji(args.emoji, self.target_size)
self.use_hebb = args.hebb
self.use_pool = args.pool
self.damage = args.damage
self.damageChannels = args.damageChannels
self.use_mp = args.use_mp
self.decay_state = 0
self.log_main_every = 10
self.hit_goal = False
self.cross_machine = args.cross_machine
self.is_master = args.master
self.nodes = args.nodes
if self.damage > 0:
if not self.use_pool and not self.damage <=3:
raise ValueError("use_pool needs to be true and damage_bottom_n < 4.")
if self.cross_machine:
if self.is_master:
self.master = Master(nodes=args.nodes)
else:
self.worker = Worker(run_id=0)
p = self.target_padding
self.pad_target = F.pad(tt(self.target_img), (0, 0, p, p, p, p))
h, w = self.pad_target.shape[:2]
self.seed = np.zeros([h, w, self.channel_n], np.float64)
self.seed[h // 2, w // 2, 3:] = 1.0
if self.use_pool:
self.pool_size = 1024
self.batch_size = 4
self.pool = CustomPool(self.seed, self.pool_size)
else:
self.batch_size = 1
if self.use_hebb:
self.coefficients_per_synapse = 5
plastic_weights = 3 * self.channel_n * self.hidden_size + self.hidden_size * self.channel_n
self.coeffs_start_interval = 0.001
self.coeffs = np.random.uniform(-self.coeffs_start_interval, self.coeffs_start_interval,
(plastic_weights, self.coefficients_per_synapse))
self.net = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size, use_hebb=True)
else:
self.net = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size)
self.parameters_shape = [tuple(w.shape) for w in self.net.parameters()]
self.log_folder = args.log_folder
logging.basicConfig(filename=self.log_folder + "/logging.txt", format='%(message)s', filemode="w",
level=logging.INFO)
if args.pre_trained != "":
if self.use_hebb:
self.coeffs = np.load(args.pre_trained)
else:
self.load_model(args.pre_trained)
logging.info("lr/(pop*sigma) at start: " + str(self.learning_rate / (self.pop_size * self.sigma)))
# For logging
self.x_range = []
self.y_lin = []
self.avg = []
self.avg_iter = []
self.losses_main = []
self.iter_main = []
t_rgb = to_rgb(self.pad_target).permute(2, 0, 1)
save_image(t_rgb, self.log_folder + "/target_image.png")
def load_model(self, path):
"""Load a PyTorch model from path."""
self.net.load_state_dict(torch.load(path))
self.net.double()
def fitness_shaping(self, x):
"""Sort x and and map x to linear values between -0.5 and 0.5
Return standard score of x
"""
shaped = np.zeros(len(x))
shaped[x.argsort()] = np.arange(len(x), dtype=np.float64)
shaped /= (len(x) - 1)
shaped -= 0.5
shaped = (shaped - shaped.mean()) / shaped.std()
return shaped
def update_coeffs(self, fitnesses, epsilons):
"""Update parent Hebbian coefficients using evaluated mutants and fitness."""
fitnesses = self.fitness_shaping(fitnesses)
for index, c in enumerate(self.coeffs):
layer_population = np.array([p[index] for p in epsilons])
update_factor = self.learning_rate / (self.pop_size * self.sigma)
self.coeffs[index] = c + update_factor * np.dot(layer_population.T, fitnesses).T
def update_parameters(self, fitnesses, epsilons):
"""Update parent network weights using evaluated mutants and fitness."""
fitnesses = self.fitness_shaping(fitnesses)
for i, e in enumerate(epsilons):
for j, w in enumerate(self.net.parameters()):
w.data += self.learning_rate * 1 / (self.pop_size * self.sigma) * fitnesses[i] * e[j]
def get_population(self, use_seed=None):
"""Return an array with values sampled from N(0, sigma).
The shape of the array is (pop_size, (layer1_size, layer2_size)) using ES and (pop_size, plastic_weights, 5)
"""
if use_seed is not None:
| np.random.seed(use_seed) | numpy.random.seed |
"""Module for performing optimization over the stiefel manifold."""
import numpy as np
from scipy import linalg as linalg
import neuropy.temp as tmp
def optimize(ObjFn):
"""Perform optimization over the Stiefel manifold."""
# Parameters
max_iter = 1000
max_ls_iter = 500 # Maximum number of line search iterations
n_restarts = 5 # Number of random restarts to use
delta_b = 0.9 #
eps_f = 1e-10
# Get size of space -- it would be good to get these from the obj. fn.
x_dim = ObjFn.x_dim # Size of the data (high-d)
m_dim = ObjFn.m_dim # Size of the desired orthonormal space
# Initialize results
S = [] # List to store results for each random restart
for i in range(n_restarts):
# Initialize M (randomly)
A = np.random.randn(x_dim, m_dim)
M = linalg.orth(A)
# Run gradient descent
J = [] # Objective function value
J_terms = [] # Value of objective function terms
converged_flag = False
J_iter_prev = np.inf
for grad_iter in range(max_iter):
b = 0.1 # Reset step size
# Step 1: Calculate free gradient
Z = ObjFn.gradient(M)
# Step 2: Compute the search direction
Z = search_dir(-Z, M)
# Step 3: Line search with retraction
df = []
for ls_iter in range(max_ls_iter):
# Adjust B
b = b * delta_b
# Evaluate step
fM, _ = ObjFn.evaluate(M)
fR, _ = ObjFn.evaluate(retract(b*Z, M))
df_iter = fM - fR
df.append(df_iter)
# Check for convergence
if df_iter >= 0:
break # Break out of line search
# Step 4: Update estimate of M
M = retract(b*Z, M)
# Step 5: Check for convergence
J_iter, J_terms_iter = ObjFn.evaluate(M)
J.append(J_iter)
J_terms.append(J_terms_iter)
dJ = J_iter - J_iter_prev
J_iter_prev = J_iter
# Print convergence status
if grad_iter % 10 == 0:
print('Restart {}, Iter {}: J = {:0.3e}, dJ = {:0.3e}'.format(
i, grad_iter, J_iter, dJ))
if abs(dJ) < eps_f:
converged_flag = True
break # Break out of gradient descent
# Save results for current random restart
S.append({
'M': M,
'J': J,
'J_terms': np.concatenate(J_terms),
'J_final': J[-1],
'converged': converged_flag,
'n_iter': grad_iter
})
# Find the random restart with the smallest objective function
J = [s['J_final'] for s in S] # Final objective function values
min_idx = np.argmin(J)
S_final = S[min_idx]
return S_final
def search_dir(Z, M):
"""Compute Stiefel optimization search direction."""
x_dim = M.shape[0]
SK = (1/2) * (M.T @ Z - Z.T @ M)
Z = M @ SK + (np.eye(x_dim) - M @ M.T) @ Z
return Z
def retract(Z, M):
"""Retract onto Stiefel manifold.
See section A.3 of Cunningham and Ghahramani, 2015
"""
# Note that this requires computing the inverse of the square root of a
# matrix (X^(-1/2)), which is defined as the solution to XX = B.
S = np.eye(M.shape[1]) + Z.T @ Z
d, Q = np.linalg.eig(S)
D = np.diag(d**(-1/2))
S_root_inv = Q @ D @ Q.T # S^(-1/2)
# Calculate final retraction
Z = (M + Z) @ S_root_inv
return Z
class ObjFn:
"""Objective function class.
This serves as the base class for objective functions used in the stiefel
optimization-based approach for finding specific orthogonal projections of
neural activity.
"""
def __init__(self, data, params=None):
"""Initialization function."""
self.params = params
self.data = data
self.x_dim = None
self.m_dim = None
def evaluate(self, M):
"""Evaluate objective function."""
J = 0
J_terms = [0]
return J, J_terms
def gradient(self, M):
"""Evaluate gradient of objective function."""
dJ = 0
return dJ
class AsymmetryStandard(ObjFn):
"""Standard (default) asymmetry-defining objective function.
This class implements a weighted version of the 'standard' objective
function used for the energy landscape experiments. It seeks to find a
projection showing a strong trajectory asymmetry, where the midpoint of the
A->B and B->A trajectories are maximally different along one axis of the
projection.
The weighting parameters (w_mid, w_var, w_start) are used to weight the
various terms in the objective function. This was needed b/c the variance
term can dominate the objective, particularly because it is a second-order
term and the others are first-order.
Note that a similar objective function can instead use squared distance to
minimize this issue. The 'AsymmetrySquared' class implements this
objective function.
"""
def __init__(self, data, params=None):
"""Initialization function."""
# Set default parameters
if params is None:
params = {
'w_mid': 1,
'w_var': 1,
'w_start': 1
}
# Call super method -- this adds the params and data to the object
super().__init__(data, params)
# TODO: check parameters structure here
self.term_str = [
'midpoint distance',
'midpoint variance',
'start distance'
]
# Get size of data
self.x_dim = self.data['mu_start'][0].shape[0]
self.m_dim = 2 # Hard-coded based on obj. fn.
def evaluate(self, M):
"""Evaluate objective function."""
# Unpack parameters (for clarity)
w_mid = self.params['w_mid']
w_var = self.params['w_var']
w_start = self.params['w_start']
# Unpack data
mu_A = self.data['mu_start'][0]
mu_B = self.data['mu_start'][1]
mu_AB = self.data['mu_center'][0]
mu_BA = self.data['mu_center'][1]
sig_AB = self.data['cov_center'][0]
sig_BA = self.data['cov_center'][1]
# Unpack orthonormal projection
p_1 = M[:, [0]]
p_2 = M[:, [1]]
# --- Compute individual terms in the objective function ---
# Term 1 -- distance between the centers of the midpoints. This is
# positive b/c we want this quantity to be large (meaning that there is
# a strong asymmetry).
term_1 = w_mid * p_1.T @ (mu_AB - mu_BA)
# Term 2 -- sum of variance along p_2 at the midpoint of the
# trajectories. This is negative b/c we want this quantity to be small
# (meaning that the trajectories are consistent at the midpoint).
term_2 = -w_var * p_1.T @ (sig_AB + sig_BA) @ p_1
# Term 3 -- distance between the starting points. This is positive b/c
# we want this quantity to be large (meaning that the distance
# between the starting positions is as large as possible.
term_3 = w_start * p_2.T @ (mu_A - mu_B)
# Compute overall objective -- this is negative b/c we want to minimize
J = - (term_1 + term_2 + term_3)
J = J[0, 0] # Convert from np array to a scalar
J_terms = np.concatenate([-term_1, -term_2, -term_3], axis=1)
return J, J_terms
def gradient(self, M):
"""Calculate gradient."""
# Unpack parameters (for clarity)
w_mid = self.params['w_mid']
w_var = self.params['w_var']
w_start = self.params['w_start']
# Unpack data
mu_A = self.data['mu_start'][0]
mu_B = self.data['mu_start'][1]
mu_AB = self.data['mu_center'][0]
mu_BA = self.data['mu_center'][1]
sig_AB = self.data['cov_center'][0]
sig_BA = self.data['cov_center'][1]
# Unpack orthonormal projection
p_1 = M[:, [0]]
p_2 = M[:, [1]] # NOTE -- this is not used
# --- Compute derivatives of terms in the objective function ---
term_1 = w_mid * (mu_AB - mu_BA)
term_2 = -w_var * 2 * (sig_AB + sig_BA) @ p_1
term_3 = w_start * (mu_A - mu_B)
# Combine terms
d_p_1 = - (term_1 + term_2)
d_p_2 = -term_3
dJ = | np.concatenate([d_p_1, d_p_2], axis=1) | numpy.concatenate |
import numpy as np
from image_denoising.image_denoising.ConverterModel.FastModel.FastModel_init_utils import generate_pswf_quad, parameters_for_forward
from image_denoising.image_denoising.ConverterModel.FastModel.FastModel_forward_utils import forward
class FastModel:
def __init__(self, resolution, truncation, beta, pswf2d, even):
# find max alpha for each N
max_ns = []
a = np.square(float(beta * resolution) / 2)
m = 0
alpha_all = []
while True:
alpha = pswf2d.alpha_all[m]
lambda_var = a * np.square( | np.absolute(alpha) | numpy.absolute |
#! /usr/bin/env python
from __future__ import division, print_function
import argparse
import collections
import logging
import os
import random
import threading
import numpy as np
import pandas as pd
from itertools import cycle, islice
import keras
from keras import backend as K
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TensorBoard
from keras.utils import get_custom_objects
from keras.utils.vis_utils import plot_model
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from scipy.stats.stats import pearsonr
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import combo
import p1_common
# import p1_common_keras
from solr_keras import CandleRemoteMonitor, compute_trainable_params, TerminateOnTimeOut
# import argparser
# from datasets import NCI60
import NCI60
import combo
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def set_seed(seed):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
if K.backend() == 'tensorflow':
import tensorflow as tf
tf.set_random_seed(seed)
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
# Uncommit when running on an optimized tensorflow where NUM_INTER_THREADS and
# NUM_INTRA_THREADS env vars are set.
# session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
# intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
def verify_path(path):
folder = os.path.dirname(path)
if folder and not os.path.exists(folder):
os.makedirs(folder)
def set_up_logger(logfile, verbose):
verify_path(logfile)
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(sh)
def extension_from_parameters(args):
"""Construct string for saving model with annotation of parameters"""
ext = ''
ext += '.A={}'.format(args.activation)
ext += '.B={}'.format(args.batch_size)
ext += '.E={}'.format(args.epochs)
ext += '.O={}'.format(args.optimizer)
# ext += '.LEN={}'.format(args.maxlen)
ext += '.LR={}'.format(args.learning_rate)
ext += '.CF={}'.format(''.join([x[0] for x in sorted(args.cell_features)]))
ext += '.DF={}'.format(''.join([x[0] for x in sorted(args.drug_features)]))
if args.feature_subsample > 0:
ext += '.FS={}'.format(args.feature_subsample)
if args.drop > 0:
ext += '.DR={}'.format(args.drop)
if args.warmup_lr:
ext += '.wu_lr'
if args.reduce_lr:
ext += '.re_lr'
if args.residual:
ext += '.res'
if args.use_landmark_genes:
ext += '.L1000'
if args.gen:
ext += '.gen'
if args.use_combo_score:
ext += '.scr'
for i, n in enumerate(args.dense):
if n > 0:
ext += '.D{}={}'.format(i+1, n)
if args.dense_feature_layers != args.dense:
for i, n in enumerate(args.dense):
if n > 0:
ext += '.FD{}={}'.format(i+1, n)
return ext
def discretize(y, bins=5):
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
return classes
class ComboDataLoader(object):
"""Load merged drug response, drug descriptors and cell line essay data
"""
def __init__(self, seed, val_split=0.2, shuffle=True,
cell_features=['expression'], drug_features=['descriptors'],
use_landmark_genes=False, use_combo_score=False,
preprocess_rnaseq=None, exclude_cells=[], exclude_drugs=[],
feature_subsample=None, scaling='std', scramble=False,
cv_partition='overlapping', cv=0):
"""Initialize data merging drug response, drug descriptors and cell line essay.
Shuffle and split training and validation set
Parameters
----------
seed: integer
seed for random generation
val_split : float, optional (default 0.2)
fraction of data to use in validation
cell_features: list of strings from 'expression', 'expression_5platform', 'mirna', 'proteome', 'all', 'categorical' (default ['expression'])
use one or more cell line feature sets: gene expression, microRNA, proteome
use 'all' for ['expression', 'mirna', 'proteome']
use 'categorical' for one-hot encoded cell lines
drug_features: list of strings from 'descriptors', 'latent', 'all', 'categorical', 'noise' (default ['descriptors'])
use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder
trained on NSC drugs, or both; use random features if set to noise
use 'categorical' for one-hot encoded drugs
shuffle : True or False, optional (default True)
if True shuffles the merged data before splitting training and validation sets
scramble: True or False, optional (default False)
if True randomly shuffle dose response data as a control
feature_subsample: None or integer (default None)
number of feature columns to use from cellline expressions and drug descriptors
use_landmark_genes: True or False
only use LINCS1000 landmark genes
use_combo_score: bool (default False)
use combination score in place of percent growth (stored in 'GROWTH' column)
scaling: None, 'std', 'minmax' or 'maxabs' (default 'std')
type of feature scaling: 'maxabs' to [-1,1], 'maxabs' to [-1, 1], 'std' for standard normalization
"""
self.cv_partition = cv_partition
| np.random.seed(seed) | numpy.random.seed |
from scannerpy import Database, DeviceType
from scannerpy.stdlib import NetDescriptor, parsers, bboxes
import scipy.misc
import numpy as np
import cv2
import sys
import random
import json
import time
import os
import os.path
import struct
def write_dmb_file(path, image):
with open(path, 'wb') as f:
# type
f.write(struct.pack('i', 1)) # type
# height
f.write(struct.pack('i', image.shape[0]))
# width
f.write(struct.pack('i', image.shape[1]))
# channels
if len(image.shape) > 2:
f.write(struct.pack('i', image.shape[2]))
else:
f.write(struct.pack('i', 1))
f.write(image.tobytes())
def make_p_matrices(calib):
cameras = calib['cameras']
p_matrices = {}
for cam in cameras:
K = np.array(cam['K'])
R = np.array(cam['R'])
t = np.array(cam['t'])
p = K.dot(np.hstack((R, t)))
p_matrices[(cam['panel'], cam['node'])] = p
return p_matrices
def main():
with open('/n/scanner/apoms/panoptic/160422_mafia2/calibration_160422_mafia2.json', 'r') as f:
calib = json.load(f)
p_matrices = make_p_matrices(calib)
dataset = '160422_haggling1'
template_path = '/n/scanner/apoms/panoptic/' + dataset + '/vgaVideos/vga_{:02d}_{:02d}.mp4'
i = 0
video_paths = []
table_idx = {}
for p in range(1, 21):
for c in range(1, 25):
video_paths.append(template_path.format(p, c))
table_idx[(p, c)] = i
i += 1
with Database(debug=True) as db:
# Ingest
if False:
#collection, _ = db.ingest_video_collection(dataset, video_paths,
# force=True)
collection = db.collection(dataset)
# Setup tables with calibration data
calibration_table_names = []
columns = ['P']
for p in range(1, 21):
for c in range(1, 25):
table_name = 'calibration_{:02d}_{:02d}'.format(p, c)
num_rows = collection.tables(len(calibration_table_names)).num_rows()
cam = db.protobufs.Camera()
if (p == 14 and c == 18) or num_rows == 0:
rows = [[cam.SerializeToString()]]
db.new_table(table_name, columns, rows, force=True)
calibration_table_names.append(table_name)
continue
P = p_matrices[(p, c)]
for i in range(3):
for j in range(4):
cam.p.append(P[i, j])
rows = []
for i in range(num_rows):
rows.append([cam.SerializeToString()])
print(table_name)
db.new_table(table_name, columns, rows, force=True)
calibration_table_names.append(table_name)
calib_collection = db.new_collection(dataset + '_calibration',
calibration_table_names,
force=True)
collection = db.collection(dataset)
calib_collection = db.collection(dataset + '_calibration')
gipuma_args = db.protobufs.GipumaArgs()
gipuma_args.min_disparity = 0
gipuma_args.max_disparity = 384
gipuma_args.min_depth = 30
gipuma_args.max_depth = 500
gipuma_args.iterations = 8
gipuma_args.kernel_width = 19
gipuma_args.kernel_height = 19
columns = []
camera_groups_length = 4
for i in range(camera_groups_length):
columns += ["frame" + str(i), "fi" + str(i), "calib" + str(i)]
input_op = db.ops.Input(["index"] + columns)
op = db.ops.Gipuma(
inputs=[(input_op, columns)],
args=gipuma_args, device=DeviceType.GPU)
tasks = []
start_frame = 4300
end_frame = 4302
item_size = 64
sampler_args = db.protobufs.StridedRangeSamplerArgs()
sampler_args.stride = 1
start = start_frame
end = end_frame
while start < end:
sampler_args.warmup_starts.append(start)
sampler_args.starts.append(start)
sampler_args.ends.append(min(start + item_size, end))
start += item_size
camera_groups = [
[(1, 1), (1, 2), (5, 1), (16, 13)],
# [(3, 1), (3, 3), (5, 3), (1, 6)],
# [(4, 2), (1, 3), (5, 3), (3, 3)],
# [(7, 4), (7, 8), (6, 3), (8, 3)],
# [(10, 4), (9, 3), (10, 3), (11, 3)],
# [(13, 8), (13, 10), (12, 8), (14, 20)],
# [(16, 4), (16, 16), (15, 2), (16, 8)],
]
for group in camera_groups:
first_idx = table_idx[group[0]]
print(first_idx)
first_table = collection.tables(first_idx)
first_calib_table = calib_collection.tables(first_idx)
task = db.protobufs.Task()
task.output_table_name = 'disparity_{:02d}_{:02d}'.format(
group[0][0], group[0][1])
column_names = [c.name() for c in first_table.columns()]
# Load frames
sample = task.samples.add()
sample.table_name = first_table.name()
sample.column_names.extend(column_names)
sample.sampling_function = "StridedRange"
sample.sampling_args = sampler_args.SerializeToString()
# Load calibration
sample = task.samples.add()
sample.table_name = first_calib_table.name()
sample.column_names.extend(['P'])
sample.sampling_function = "StridedRange"
sample.sampling_args = sampler_args.SerializeToString()
for c, p in group[1:]:
idx = table_idx[(c, p)]
print(idx)
table = collection.tables(idx)
calib_table = calib_collection.tables(idx)
sample = task.samples.add()
sample.table_name = table.name()
sample.column_names.extend(["frame", "frame_info"])
sample.sampling_function = "StridedRange"
sample.sampling_args = sampler_args.SerializeToString()
sample = task.samples.add()
sample.table_name = calib_table.name()
sample.column_names.extend(['P'])
sample.sampling_function = "StridedRange"
sample.sampling_args = sampler_args.SerializeToString()
tasks.append(task)
# Output data for fusibile
top_folder = 'gipuma_results/'
frame_folder = top_folder + '{:08d}/'
images_folder = frame_folder + 'images/'
image_name = '{:03d}.png'
image_path = images_folder + image_name
krt_path = images_folder + 'cam.txt'
results_folder = frame_folder + 'results/'
cam_results_folder = results_folder + '2hat_cam_{:03d}/'
normals_path = cam_results_folder + 'normals.dmb'
depth_path = cam_results_folder + 'disp.dmb'
output_tables = db.run(tasks, op, pipeline_instances_per_node=4, force=True)
# Export data directory corresponding to image files
# for i, table in enumerate(collection.tables()):
# for fi, tup in table.load(['frame'], rows=range(start_frame,
# end_frame)):
# if not os.path.exists(images_folder.format(fi)):
# os.makedirs(images_folder.format(fi))
# img = tup[0]
# cv2.imwrite(image_path.format(fi, i), img)
# Export camera calibration params file (krt_file)
for fi in range(end_frame - start_frame):
with open(krt_path.format(fi), 'w') as f:
f.write(str(479) + '\n')
i = -1
offset = 0
cameras = calib['cameras']
for p in range(1, 21):
for c in range(1, 25):
i += 1
if p == 14 and c == 18:
continue
f.write(image_name.format(i) + ' ')
cam = cameras[offset]
K = cam['K']
for n in [item for sublist in K for item in sublist]:
f.write(str(n) + ' ')
R = cam['R']
for n in [item for sublist in R for item in sublist]:
f.write(str(n) + ' ')
t = cam['t']
for n in [item for sublist in t for item in sublist]:
f.write(str(n) + ' ')
f.write('\n')
offset += 1
# Export normals and depth dmb files
for i, table in enumerate(output_tables):
for fi, tup in table.load(['points', 'cost']):
if not os.path.exists(cam_results_folder.format(fi, i)):
os.makedirs(cam_results_folder.format(fi, i))
points = np.frombuffer(tup[0], dtype=np.float32).reshape(480, 640, 4)
cost = np.frombuffer(tup[1], dtype=np.float32).reshape(480, 640, 1)
avg = | np.median(cost[:]) | numpy.median |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-07-24 15:18:47
# @Author : <NAME> (<EMAIL>)
# @Link : https://blog.csdn.net/taifengzikai/
# @Version : $1.0$
import json
import pickle
import numpy as np
import os.path as osp
class kittidataset(object):
def __init__(self, detection_file):
self.detection_file =detection_file
self.fp = open(self.detection_file,"rb+")
self.detection = pickle.load(self.fp)
"dict_keys(['name', 'truncated', 'occluded', 'alpha', 'bbox', " \
"'dimensions', 'location', 'rotation_y', 'score', 'metadata'])"
# def analysis_detection(self):
# self.data = self.detection
def __len__(self):
return len(self.detection)
# def save_kitti(self, kitti_infos_test_video):
# # file = "kitti_infos_train.pkl"
# filename = './results/000002/test/output_test_video_kalman.pkl'#self.output_path
# print(f"Kitti info test video file is saved to {filename}")
# with open('./results/000002/test/output_test_video_kalman.pkl', "wb") as f:
# pickle.dump(kitti_infos_test_video, f)
def get_label_anno(self, tracking_results):
annotations = {}
annotations.update(
{
"name": [],
"truncated": [],
"occluded": [],
"alpha": [],
"bbox": [],
"dimensions": [],
"location": [],
"rotation_y": [],
"score": [],
}
)
ind = 0
for gt in tracking_results:
name = gt['tracking_name']
truncated = -1
occluded = -1
alpha = -1
bbox = [-1, -1, -1, -1]
dimensions = np.array([float(dim) for dim in gt['size']])
location = np.array([float(loc) for loc in gt['translation']])
rotation_y = float(gt['rotation'])
score = float(gt['tracking_score'])
annotations['name'].append(name)
annotations['truncated'].append(truncated)
annotations['occluded'].append(occluded)
annotations['alpha'].append(alpha)
annotations['bbox'].append(bbox)
annotations['dimensions'].append(dimensions)
annotations['location'].append(location)
annotations['rotation_y'].append(rotation_y)
annotations['score'].append(score)
annotations['name'] = np.array(annotations['name'])
annotations['truncated'] = | np.array(annotations['truncated']) | numpy.array |
#!/usr/bin/python
import sys
import textadapter
import unittest
from .generate import (generate_dataset, IntIter,
MissingValuesIter, FixedWidthIter)
import numpy as np
from numpy.testing import assert_array_equal
import gzip
import os
import io
from six import StringIO
class TestTextAdapter(unittest.TestCase):
num_records = 100000
def assert_equality(self, left, right):
try:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
self.assert_array_equal(left, right)
else:
self.assertTrue(left == right)
except AssertionError:
raise AssertionError('FAIL: {0} != {1}'.format(left, right))
# Basic parsing tests
def test_string_parsing(self):
data = StringIO('1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
data = io.StringIO(u'1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
data = io.BytesIO(b'1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
# basic utf_8 tests
def test_utf8_parsing(self):
# test single byte character
data = io.BytesIO(u'1,2,\u0033'.encode('utf_8'))
adapter = textadapter.text_adapter(data, field_names=False)
expected = np.array([('1', '2', '3')], dtype='u8,u8,u8')
assert_array_equal(adapter[:], expected)
# test multibyte character
data = io.BytesIO(u'1,2,\u2092'.encode('utf_8'))
adapter = textadapter.text_adapter(data, field_names=False)
expected = np.array([('1', '2', u'\u2092')], dtype='u8,u8,O')
assert_array_equal(adapter[:], expected)
def test_no_whitespace_stripping(self):
data = StringIO('1 ,2 ,3 \n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('1 ', '2 ', '3 ')], dtype='S3,S3,S3'))
data = StringIO(' 1, 2, 3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([(' 1', ' 2', ' 3')], dtype='S3,S3,S3'))
data = StringIO(' 1 , 2 , 3 \n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([(' 1 ', ' 2 ', ' 3 ')], dtype='S5,S5,S5'))
data = StringIO('\t1\t,\t2\t,\t3\t\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('\t1\t', '\t2\t', '\t3\t')], dtype='S3,S3,S3'))
def test_quoted_whitespace(self):
data = StringIO('"1 ","2 ","3 "\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('1 ', '2 ', '3 ')], dtype='S3,S3,S3'))
data = StringIO('"\t1\t"\t"\t2\t"\t"\t3\t"\n')
adapter = textadapter.text_adapter(data, field_names=False, delimiter='\t')
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('\t1\t', '\t2\t', '\t3\t')], dtype='S3,S3,S3'))
def test_fixed_simple(self):
# TODO: fix this test on 32-bit and on Windows
if tuple.__itemsize__ == 4:
# This test does not work on 32-bit, so we skip it
return
if sys.platform == 'win32':
# This test does not work on Windows
return
data = StringIO(" 1 2 3\n 4 5 67\n890123 4")
adapter = textadapter.FixedWidthTextAdapter(data, 3, infer_types=False, field_names=False)
adapter.set_field_types({0:'i', 1:'i', 2:'i'})
control = np.array([(1, 2, 3), (4, 5, 67), (890, 123, 4)], dtype='i,i,i')
assert_array_equal(adapter[:], control)
def test_spaces_around_numeric_values(self):
data = StringIO(' 1 , -2 , 3.3 , -4.4 \n 5 , -6 , 7.7 , -8.8 ')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'u4', 1:'i8', 2:'f4', 3:'f8'})
array = adapter[:]
control = np.array([(1,-2,3.3,-4.4), (5,-6,7.7,-8.8)], dtype='u4,i8,f4,f8')
assert_array_equal(array, control)
def test_slicing(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
assert_array_equal(adapter[0], np.array([(0, 1, 2, 3, 4)], dtype='u4,u4,u4,u4,u4'))
expected_values = [((self.num_records-1)*5)+x for x in range(5)]
self.assert_equality(adapter[self.num_records-1].item(), tuple(expected_values))
#adapter.create_index()
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
self.assert_equality(adapter['f0'][0].item(), (0,))
self.assert_equality(adapter['f4'][1].item(), (9,))
#self.assert_equality(adapter[self.num_records-1]['f4'], (self.num_records*5)-1)
array = adapter[:]
record = [x for x in range(0, 5)]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[:-1]
record = [x for x in range(0, 5)]
self.assert_equality(array.size, self.num_records-1)
for i in range(0, self.num_records-1):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[0:10]
self.assert_equality(array.size, 10)
record = [x for x in range(0, 5)]
for i in range(0, 10):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[1:]
self.assert_equality(array.size, self.num_records-1)
record = [x for x in range(5, 10)]
for i in range(0, self.num_records-1):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[0:10:2]
self.assert_equality(array.size, 5)
record = [x for x in range(0, 5)]
for i in range(0, 5):
self.assert_equality(array[i].item(), tuple(record))
record = [x+10 for x in record]
array = adapter[['f0', 'f4']][:]
record = [0, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
adapter.field_filter = [0, 'f4']
array = adapter[:]
record = [0, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
adapter.field_filter = None
array = adapter[:]
record = [0, 1, 2, 3, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
try:
adapter[self.num_records]
except textadapter.AdapterIndexError:
pass
else:
self.fail('AdaperIndexError not thrown')
try:
adapter[0:self.num_records+1]
except textadapter.AdapterIndexError:
pass
else:
self.fail('AdaperIndexError not thrown')
def test_converters(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False)
#adapter.set_field_types({0:'u4', 1:'u4', 2:'u4', 3:'u4', 4:'u4'})
def increment(input_str):
return int(input_str) + 1
def double(input_str):
return int(input_str) + int(input_str)
if sys.platform == 'win32' and tuple.__itemsize__ == 8:
# TODO: there problems below here 64-bit Windows, I get
# OverflowError: can't convert negative value to unigned PY_LONG_LONG
return
adapter.set_converter(0, increment)
adapter.set_converter('f1', double)
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [1, 2, 2, 3, 4]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record[0] += 5
record[1] = (10 * (i+1)) + 2
record[2] += 5
record[3] += 5
record[4] += 5
def test_missing_fill_values(self):
data = StringIO()
generate_dataset(data, MissingValuesIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({'f0':'u4', 1:'u4', 2:'u4', 3:'u4', 'f4':'u4'})
adapter.set_missing_values({0:['NA', 'NaN'], 'f4':['xx','inf']})
adapter.set_fill_values({0:99, 4:999})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
if i % 4 == 0 or i % 4 == 1:
record[0] = 99
record[4] = 999
else:
record[0] = record[1] - 1
record[4] = record[3] + 1
self.assert_equality(array[i].item(), tuple(record))
record[1] += 5
record[2] += 5
record[3] += 5
data.seek(0)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=True)
adapter.set_missing_values({0:['NA', 'NaN'], 4:['xx','inf']})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
if i % 4 == 0 or i % 4 == 1:
record[0] = 0
record[4] = 0
else:
record[0] = record[1] - 1
record[4] = record[3] + 1
self.assert_equality(array[i].item(), tuple(record))
record[1] += 5
record[2] += 5
record[3] += 5
# Test missing field
data = StringIO('1,2,3\n4,5\n7,8,9')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.field_types = {0:'O', 1:'O', 2:'O'}
adapter.set_fill_values({0:np.nan, 1:np.nan, 2:np.nan})
array = adapter[:]
# NumPy assert_array_equal no longer supports mixed O/nan types
expected = [('1','2','3'),('4','5',np.nan),('7','8','9')]
self.assert_equality(array.tolist(), expected)
def test_fixed_width(self):
data = StringIO()
generate_dataset(data, FixedWidthIter(), '', self.num_records)
adapter = textadapter.FixedWidthTextAdapter(data, [2,3,4,5,6], field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [0, 0, 0, 0, 0]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+1 for x in record]
if record[0] == 100:
record[0] = 0
if record[1] == 1000:
record[1] = 0
if record[2] == 10000:
record[2] = 0
if record[3] == 100000:
record[3] = 0
if record[4] == 1000000:
record[4] = 0
# Test skipping blank lines
data = StringIO(' 1 2 3\n\n 4 5 6')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test comment lines
data = StringIO('# 1 2 3\n 1 2 3\n# foo\n 4 5 6')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test field names line
data = StringIO(' a b c\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test field names line as comment line
data = StringIO('# a b c\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test incomplete field names line
data = StringIO(' a\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('f1','<u8'),('f2','<u8')]))
def test_regex(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.RegexTextAdapter(data, '([0-9]*),([0-9]*),([0-9]*),([0-9]*),([0-9]*)\n', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
# Test skipping blank lines
data = StringIO('1 2 3\n\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test comment lines
data = StringIO('#1 2 3\n1 2 3\n# foo\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test field names line
data = StringIO('a b c\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test field names line as comment line
data = StringIO('#a b c\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test incomplete field names line
data = StringIO('a b\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('f2','<u8')]))
# Test field names line that doesn't match regex
data = StringIO('a b c\n1 2 3 4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9\s]+) ([0-9\s]+) ([0-9\s]+)', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([('1 2', '3 4', '5 6')],
dtype=[('a','O'),('b','O'),('c','O')]))
def test_index(self):
if sys.platform == 'win32':
# TODO: this test fails on Windows because of file lock problems
return
num_records = 100000
expected_values = [((num_records-1)*5) + x for x in range(5)]
data = StringIO()
generate_dataset(data, IntIter(), ',', num_records)
# test explicit index building
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.create_index()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test implicitly creating disk index on the fly
if os.path.exists('test.idx'):
os.remove('test.idx')
data.seek(0)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.to_array()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
adapter.close()
# test loading disk index
data.seek(0)
adapter2 = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter2[-1].item(), tuple(expected_values))
adapter.close()
os.remove('test.idx')
def test_gzip_index(self):
num_records = 1000000
data = StringIO()
generate_dataset(data, IntIter(), ',', num_records)
#if sys.version > '3':
if True:
dataz = io.BytesIO()
else:
dataz = StringIO()
gzip_output = gzip.GzipFile(fileobj=dataz, mode='wb')
#if sys.version > '3':
if True:
gzip_output.write(data.getvalue().encode('utf8'))
else:
gzip_output.write(data.getvalue())
gzip_output.close()
dataz.seek(0)
# test explicit index building
adapter = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.create_index()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter[818000].item(), tuple([(818000*5) + x for x in range(5)]))
# test implicitly creating disk index on the fly
# JNB: not implemented yet
'''adapter = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False, indexing=True, index_filename='test.idx')
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.to_array()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter[818000].item(), tuple([(818000*5) + x for x in range(5)]))
# test loading disk index
adapter2 = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False, indexing=True, index_filename='test.idx')
adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter2[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter2[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter2[818000].item(), tuple([(818000*5) + x for x in range(5)]))
os.remove('test.idx')'''
def test_header_footer(self):
data = StringIO('0,1,2,3,4\n5,6,7,8,9\n10,11,12,13,14')
adapter = textadapter.text_adapter(data, header=1, field_names=False)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(5,6,7,8,9), (10,11,12,13,14)],
dtype='u4,u4,u4,u4,u4'))
data.seek(0)
adapter = textadapter.text_adapter(data, header=2, field_names=False)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(10,11,12,13,14)],
dtype='u4,u4,u4,u4,u4'))
data.seek(0)
adapter = textadapter.text_adapter(data, header=1, field_names=True)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(10,11,12,13,14)],
dtype=[('5','u4'),('6','u4'),('7','u4'),('8','u4'),('9','u4')]))
def test_delimiter(self):
data = StringIO('1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1 2 3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1\t2\t3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1x2x3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
# Test no delimiter in single field csv data
data = StringIO('aaa\nbbb\nccc')
array = textadapter.text_adapter(data, field_names=False, delimiter=None)[:]
assert_array_equal(array, np.array([('aaa',), ('bbb',), ('ccc',)], dtype=[('f0', 'O')]))
def test_auto_type_inference(self):
data = StringIO('0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=True)
array = adapter.to_array()
self.assert_equality(array.dtype.fields['f0'][0], np.dtype('float64'))
self.assert_equality(array.dtype.fields['f1'][0], np.dtype('uint64'))
self.assert_equality(array.dtype.fields['f2'][0], np.dtype('O'))
self.assert_equality(array.dtype.fields['f3'][0], np.dtype('uint64'))
self.assert_equality(array.dtype.fields['f4'][0], | np.dtype('O') | numpy.dtype |
import numpy as np
from neuron import neuron
import random
from recep_field import rf
from spike_train import encode
from parameters import param as par
from weight_initialization import learned_weights_x, learned_weights_o, learned_weights_synapse
import imageio
#Parameters
global time, T, dt, t_back, t_fore, w_min
time = np.arange(1, par.T+1, 1)
layer2 = []
# creating the hidden layer of neurons
for i in range(par.n):
a = neuron()
layer2.append(a)
#synapse matrix
synapse = | np.zeros((par.n,par.m)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# NAME: RMpeakfit_3D.py #
# #
# PURPOSE: Fit peak of RM spectra, for every pixel in 3D FDF cube. #
#
# Initial version: <NAME>, Dec 2020
"""
import sys
import os
import numpy as np
import astropy.io.fits as pf
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_misc import interp_images,progress
from RMutils.util_RM import fits_make_lin_axis
from RMtools_3D.do_RMsynth_3D import readFitsCube, readFreqFile
C = 2.997924538e8 # Speed of light [m/s]
def pixelwise_peak_fitting(FDF, phiArr, fwhmRMSF,lamSqArr_m2, lam0Sq,
product_list,noiseArr=None,stokesIcube=None):
"""
Performs the 1D FDF peak fitting used in RMsynth/RMclean_1D, pixelwise on
all pixels in a 3D FDF cube.
Inputs:
FDF: FDF cube (3D array). This is assumed to be in astropy axis ordering
(Phi, dec, ra)
phiArr: (1D) array of phi values
fwhmRMSF: 2D array of RMSF FWHM values
lamSqArr_m2: 1D array of channel lambda^2 values.
lam0Sq: scalar value for lambda^2_0, the reference wavelength squared.
product_list: list containing the names of the fitting products to save.
dFDF: 2D array of theoretical noise values. If not supplied, the
peak fitting will default to using the measured noise.
Outputs: dictionary of 2D maps, 1 per fit output
"""
#FDF: output by synth3d or clean3d
#phiArr: can be generated from FDF cube
#fwhm: 2D map produced synth3D
#dFDFth: not currently produced (default mode not to input noise!)
# If not present, measure_FDF_parms uses the corMAD noise.
#
#lamSqArr is only needed for computing errors in derotated angles
# This could be compressed to a map or single value from RMsynth?
#lam0Sq is necessary for de-rotation
map_size=FDF.shape[1:]
#Create pixel location arrays:
xarr,yarr=np.meshgrid(range(map_size[0]),range(map_size[1]))
xarr=xarr.ravel()
yarr=yarr.ravel()
#Create empty maps:
map_dict={}
for parameter in product_list:
map_dict[parameter]=np.zeros(map_size)
freqArr_Hz=C/np.sqrt(lamSqArr_m2)
freq0_Hz=C/ | np.sqrt(lam0Sq) | numpy.sqrt |
import bpy
import numpy as np
from PIL import Image
class CarModelViewToImage():
# def __init__:
# self.camera_ = None
# self.image_folder_ = None
# self.car_width_ = 0
# self.car_length_ = 0
# self.viewport_width_ = 0
# self.viewport_height_ = 0
# self.stride_ = 0
# self.stride_radians_ = 0
# self.car_ = None
# self.scene_length_ = 0
# self.scene_height_ = 0
# self.light_ctr_ = None
def init(self, info):
"""
info: {
"car_width" : float,
"car_length": float,
"viewport_width" : float,
"viewport_height" : float,
"image_folder" : string
}
"""
# get base information
self.car_width_ = info["car_width"]
self.car_length_ = info["car_length"]
self.viewport_width_ = info["viewport_width"]
self.viewport_height_ = info["viewport_height"]
self.image_folder_ = info["image_folder"]
self.scene_length_ = self.car_length_ * 2
self.scene_height_ = self.car_length_
bpy.context.scene.render.resolution_x = self.viewport_width_
bpy.context.scene.render.resolution_y = self.viewport_height_
bpy.context.scene.render.filepath = self.image_folder_
# resize model and light
# save model dimensions and location
self.car_ = bpy.data.objects["car"]
# save light location
self.light_ctr_ = [bpy.data.objects["left_light"],
bpy.data.objects["right_light"], bpy.data.objects["top_light"]]
# move model and light
offset = self.car_.location.copy()
self.car_.location -= offset
for l in self.light_ctr_:
l.location -= offset
# calculate prop from length and resize
car_length_now = max(self.car_.dimensions)
scale_size = self.car_length_ / car_length_now
self.car_.scale *= scale_size
for l in self.light_ctr_:
l.location *= scale_size
l.scale *= scale_size
# set camera
bpy.ops.object.camera_add()
self.camera_ = bpy.data.objects["Camera"]
# set camera base info
self.camera_.data.lens_unit = "FOV"
self.camera_.data.angle = np.radians(90)
self.camera_.data.clip_start = 0.1
self.camera_.data.clip_end = self.scene_length_ * 2
# set camera constraint
bpy.ops.object.constraint_add(type="TRACK_TO")
bpy.context.object.constraints["Track To"].up_axis = 'UP_Y'
bpy.context.object.constraints["Track To"].track_axis = 'TRACK_NEGATIVE_Z'
bpy.context.object.constraints["Track To"].target = self.car_
bpy.context.object.constraints["Track To"].use_target_z = True
# set render Node
self.scene_ = bpy.context.scene
self.scene_.use_nodes = True
self.tree_ = self.scene_.node_tree
self.links_ = self.tree_.links
# clear default nodes
for n in self.tree_.nodes:
self.tree_.nodes.remove(n)
self.render_layer_ = self.tree_.nodes.new('CompositorNodeRLayers')
self.viewer_image_ = self.tree_.nodes.new('CompositorNodeViewer')
self.viewer_image_.use_alpha = False
def set_camera_pos(self, x, y, z=None):
# 计算真实坐标
real_x = | np.clip(x, -1, 1) | numpy.clip |
"""
Run experiments with several segmentation techniques for instance segmentation
Require installation of Morph. Snakes - https://github.com/Borda/morph-snakes ::
pip install --user git+https://github.com/Borda/morph-snakes.git
Sample usage::
python run_ovary_egg-segmentation.py \
-list data_images/drosophila_ovary_slice/list_imgs-segm-center-points.csv \
-out results -n ovary_slices --nb_workers 1 \
-m ellipse_moments \
ellipse_ransac_mmt \
ellipse_ransac_crit \
GC_pixels-large \
GC_pixels-shape \
GC_slic-shape \
rg2sp_greedy-mixture \
rg2sp_GC-mixture \
watershed_morph
Copyright (C) 2016-2017 <NAME> <<EMAIL>>
"""
import os
import sys
import time
import argparse
import logging
import pickle
from functools import partial
import matplotlib
if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':
print('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage import segmentation, morphology
from skimage import measure, draw
# from sklearn.externals import joblib
# from sklearn import metrics, cross_validation
from skimage.measure.fit import EllipseModel
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utilities.data_io as tl_data
import imsegm.utilities.experiments as tl_expt
import imsegm.utilities.drawing as tl_visu
import imsegm.superpixels as seg_spx
import imsegm.region_growing as seg_rg
import imsegm.ellipse_fitting as ell_fit
from morphsnakes import morphsnakes, multi_snakes
# from libs import chanvese
NB_WORKERS = tl_expt.nb_workers(0.8)
NAME_EXPERIMENT = 'experiment_egg-segment'
TYPE_LOAD_IMAGE = '2d_struct'
DIR_VISUAL_POSIX = '___visu'
DIR_CENTRE_POSIX = '___centres'
DIR_DEBUG_POSIX = '___debug'
# setting default file names
NAME_FIG_LABEL_HISTO = 'fig_histo_annot_segments.png'
NAME_CSV_SEGM_STAT_SLIC_ANNOT = 'statistic_segm_slic_annot.csv'
NAME_CSV_SEGM_STAT_RESULT = 'statistic_segm_results.csv'
NAME_CSV_SEGM_STAT_RESULT_GC = 'statistic_segm_results_gc.csv'
EACH_UNIQUE_EXPERIMENT = False
INIT_MASK_BORDER = 50.
# minimal diameter for estimating ellipse
MIN_ELLIPSE_DAIM = 25.
# subfigure size for experting images
MAX_FIGURE_SIZE = 14
# threshold if two segmentation overlap more, keep just one of them
SEGM_OVERLAP = 0.5
# paramters for SLIC segmentation
SLIC_SIZE = 40
SLIC_REGUL = 0.3
# Region Growing configuration
DEBUG_EXPORT = False
RG2SP_THRESHOLDS = { # thresholds for updating between iterations
'centre': 20,
'shift': 10,
'volume': 0.05,
'centre_init': 50
}
COLUMNS_ELLIPSE = ('xc', 'yc', 'a', 'b', 'theta')
PATH_DATA = tl_data.update_path('data_images', absolute=True)
PATH_IMAGES = os.path.join(PATH_DATA, 'drosophila_ovary_slice')
# sample segmentation methods
LIST_SAMPLE_METHODS = (
'ellipse_moments', 'ellipse_ransac_mmt', 'ellipse_ransac_crit',
'GC_pixels-large', 'GC_pixels-shape', 'GC_slic-large', 'GC_slic-shape',
'rg2sp_greedy-mixture', 'rg2sp_GC-mixture',
'watershed_morph'
)
# default segmentation configuration
SEGM_PARAMS = {
# ovary labels: background, funicular cells, nurse cells, cytoplasm
'tab-proba_ellipse': [0.01, 0.95, 0.95, 0.85],
'tab-proba_graphcut': [0.01, 0.6, 0.99, 0.75],
'tab-proba_RG2SP': [0.01, 0.6, 0.95, 0.75],
'path_single-model': os.path.join(PATH_DATA, 'RG2SP_eggs_single-model.pkl'),
'path_multi-models': os.path.join(PATH_DATA, 'RG2SP_eggs_mixture-model.pkl'),
'gc-pixel_regul': 3.,
'gc-slic_regul': 2.,
'RG2SP-shape': 5.,
'RG2SP-pairwise': 3.,
'RG2SP-swap': True,
'label_trans': [0.1, 0.03],
'overlap_theshold': SEGM_OVERLAP,
'RG2SP_theshold': RG2SP_THRESHOLDS,
'slic_size': SLIC_SIZE,
'slic_regul': SLIC_REGUL,
'path_list': os.path.join(PATH_IMAGES,
'list_imgs-segm-center-points_short.csv'),
'path_out': tl_data.update_path('results', absolute=True)
}
def arg_parse_params(params):
"""
SEE: https://docs.python.org/3/library/argparse.html
:return {str: str}:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-list', '--path_list', type=str, required=False,
help='path to the list of image',
default=params['path_list'])
parser.add_argument('-out', '--path_out', type=str, required=False,
help='path to the output directory',
default=params['path_out'])
parser.add_argument('-n', '--name', type=str, required=False,
help='name of the experiment', default='ovary')
parser.add_argument('-cfg', '--path_config', type=str, required=False,
help='path to the configuration', default=None)
parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS,
help='number of processes in parallel')
parser.add_argument('-m', '--methods', type=str, required=False, nargs='+',
help='list of segment. methods', default=None)
arg_params = vars(parser.parse_args())
params.update(arg_params)
if not isinstance(arg_params['path_config'], str) \
or arg_params['path_config'].lower() == 'none':
params['path_config'] = ''
else:
params['path_config'] = tl_data.update_path(params['path_config'])
assert os.path.isfile(params['path_config']), \
'missing file: %s' % params['path_config']
ext = os.path.splitext(params['path_config'])[-1]
assert (ext == '.yaml' or ext == '.yml'), \
'"%s" should be YAML file' % os.path.basename(params['path_config'])
data = tl_expt.load_config_yaml(params['path_config'])
params.update(data)
params.update(arg_params)
for k in (k for k in arg_params if 'path' in k):
if not arg_params[k]:
continue
params[k] = tl_data.update_path(arg_params[k], absolute=True)
assert os.path.exists(params[k]), 'missing: %s' % params[k]
# load saved configuration
logging.info('ARG PARAMETERS: \n %r', params)
return params
def load_image(path_img, img_type=TYPE_LOAD_IMAGE):
""" load image from given path according specification
:param str path_img:
:param str img_type:
:return ndarray:
"""
path_img = os.path.abspath(os.path.expanduser(path_img))
assert os.path.isfile(path_img), 'missing: "%s"' % path_img
if img_type == 'segm':
img = tl_data.io_imread(path_img)
elif img_type == '2d_struct':
img, _ = tl_data.load_img_double_band_split(path_img)
assert img.ndim == 2, 'image can be only single color'
else:
logging.error('not supported loading img_type: %s', img_type)
img = tl_data.io_imread(path_img)
logging.debug('image shape: %r, value range %f - %f', img.shape,
img.min(), img.max())
return img
def path_out_img(params, dir_name, name):
return os.path.join(params['path_exp'], dir_name, name + '.png')
def export_draw_image_segm(path_fig, img, segm=None, segm_obj=None, centers=None):
""" draw and export visualisation of image and segmentation
:param str path_fig: path to the exported figure
:param ndarray img:
:param ndarray segm:
:param ndarray segm_obj:
:param ndarray centers:
"""
size = np.array(img.shape[:2][::-1], dtype=float)
fig, ax = plt.subplots(figsize=(size / size.max() * MAX_FIGURE_SIZE))
ax.imshow(img, alpha=1., cmap=plt.cm.Greys)
if segm is not None:
ax.contour(segm)
if segm_obj is not None:
ax.imshow(segm_obj, alpha=0.1)
assert len(np.unique(segm_obj)) < 1e2, \
'too many labeled objects - %i' % len(np.unique(segm_obj))
ax.contour(segm_obj, levels=np.unique(segm_obj).tolist(),
cmap=plt.cm.jet_r, linewidths=(10, ))
if centers is not None:
ax.plot(np.array(centers)[:, 1], np.array(centers)[:, 0], 'o', color='r')
fig = tl_visu.figure_image_adjustment(fig, img.shape)
fig.savefig(path_fig)
plt.close(fig)
def segment_watershed(seg, centers, post_morph=False):
""" perform watershed segmentation on input imsegm
and optionally run some postprocessing using morphological operations
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param bool post_morph: apply morphological postprocessing
:return ndarray, [[int, int]]: resulting segmentation, updated centres
"""
logging.debug('segment: watershed...')
seg_binary = (seg > 0)
seg_binary = ndimage.morphology.binary_fill_holes(seg_binary)
# thr_area = int(0.05 * np.sum(seg_binary))
# seg_binary = morphology.remove_small_holes(seg_binary, min_size=thr_area)
distance = ndimage.distance_transform_edt(seg_binary)
markers = np.zeros_like(seg)
for i, pos in enumerate(centers):
markers[int(pos[0]), int(pos[1])] = i + 1
segm = morphology.watershed(-distance, markers, mask=seg_binary)
# if morphological postprocessing was not selected, ends here
if not post_morph:
return segm, centers, None
segm_clean = np.zeros_like(segm)
for lb in range(1, np.max(segm) + 1):
seg_lb = (segm == lb)
# some morphology operartion for cleaning
seg_lb = morphology.binary_closing(seg_lb, selem=morphology.disk(5))
seg_lb = ndimage.morphology.binary_fill_holes(seg_lb)
# thr_area = int(0.15 * np.sum(seg_lb))
# seg_lb = morphology.remove_small_holes(seg_lb, min_size=thr_area)
seg_lb = morphology.binary_opening(seg_lb, selem=morphology.disk(15))
segm_clean[seg_lb] = lb
return segm_clean, centers, None
def create_circle_center(img_shape, centers, radius=10):
""" create initialisation from centres as small circles
:param img_shape:
:param [[int, int]] centers:
:param int radius:
:return:
"""
mask_circle = np.zeros(img_shape, dtype=int)
mask_perimeter = np.zeros(img_shape, dtype=int)
center_circles = list()
for i, pos in enumerate(centers):
rr, cc = draw.circle(int(pos[0]), int(pos[1]), radius,
shape=img_shape[:2])
mask_circle[rr, cc] = i + 1
rr, cc = draw.circle_perimeter(int(pos[0]), int(pos[1]), radius,
shape=img_shape[:2])
mask_perimeter[rr, cc] = i + 1
center_circles.append(np.array([rr, cc]).transpose())
return center_circles, mask_circle, mask_perimeter
def segment_active_contour(img, centers):
""" segmentation using acive contours
:param ndarray img: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
logging.debug('segment: active_contour...')
# http://scikit-image.org/docs/dev/auto_examples/edges/plot_active_contours.html
segm = np.zeros(img.shape[:2])
img_smooth = ndimage.filters.gaussian_filter(img, 5)
center_circles, _, _ = create_circle_center(img.shape[:2], centers)
for i, snake in enumerate(center_circles):
snake = segmentation.active_contour(img_smooth, snake.astype(float),
alpha=0.015, beta=10, gamma=0.001,
w_line=0.0, w_edge=1.0,
max_px_move=1.0,
max_iterations=2500,
convergence=0.2)
seg = | np.zeros(segm.shape, dtype=bool) | numpy.zeros |
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": pd.Categorical([1, 2, 1])}), {}, {"categories": ["x"]}),
(pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"), {}, {}),
pytest.param(
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ns]"),
{},
{},
),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, UTC]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, CET]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("float32"), {}, {}),
(pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({"x": [3, 1, 5]}, index=pd.Index([1, 2, 3], name="foo")), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]), {}, {}),
(pd.DataFrame({"0": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [3, 2, None]}), {}, {}),
(pd.DataFrame({"-": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({".": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({" ": [3.0, 2.0, None]}), {}, {}),
],
)
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):
if "x" in df and df.x.dtype == "M8[ns]" and "arrow" in engine:
pytest.xfail(reason="Parquet pyarrow v1 doesn't support nanosecond precision")
if (
"x" in df
and df.x.dtype == "M8[ns]"
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail(reason="fastparquet doesn't support nanosecond precision yet")
if (
PANDAS_GT_130
and read_kwargs.get("categories", None)
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail("https://github.com/dask/fastparquet/issues/577")
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
oe = write_kwargs.pop("object_encoding", None)
if oe and engine == "fastparquet":
dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)
else:
dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)
if str(ddf2.dtypes.get("x")) == "UInt16" and engine == "fastparquet":
# fastparquet choooses to use masked type to be able to get true repr of
# 16-bit int
assert_eq(ddf.astype("UInt16"), ddf2)
else:
assert_eq(ddf, ddf2)
def test_categories(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": list("caaab")})
ddf = dd.from_pandas(df, npartitions=2)
ddf["y"] = ddf.y.astype("category")
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, categories=["y"], engine=engine)
# Shouldn't need to specify categories explicitly
ddf3 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf3, ddf2)
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {"a", "b", "c"}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()
assert cats_set.tolist() == ["a", "c", "a", "b"]
if engine == "fastparquet":
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=["x"], engine=engine).compute()
with pytest.raises((ValueError, FutureWarning)):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=["foo"], engine=engine)
def test_categories_unnamed_index(tmpdir, engine):
# Check that we can handle an unnamed categorical index
# https://github.com/dask/dask/issues/6885
tmpdir = str(tmpdir)
df = pd.DataFrame(
data={"A": [1, 2, 3], "B": ["a", "a", "b"]}, index=["x", "y", "y"]
)
ddf = dd.from_pandas(df, npartitions=1)
ddf = ddf.categorize(columns=["B"])
ddf.to_parquet(tmpdir, engine=engine)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf.index, ddf2.index, check_divisions=False)
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
assert ddf3.npartitions < 5
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = dd._compat.makeTimeDataFrame()
df.index.name = "foo"
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
@PYARROW_MARK
def test_to_parquet_default_writes_nulls(tmpdir):
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"c1": [1.0, np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@PYARROW_LE_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):
df = pd.DataFrame(
{"partition_column": [0, 0, 1, 1], "strings": ["a", "b", None, None]}
)
ddf = dd.from_pandas(df, npartitions=2)
# In order to allow pyarrow to write an inconsistent schema,
# we need to avoid writing the _metadata file (will fail >0.17.1)
# and need to avoid schema inference (i.e. use `schema=None`)
ddf.to_parquet(
str(tmpdir),
engine="pyarrow",
partition_on=["partition_column"],
write_metadata_file=False,
schema=None,
)
# Test that schema is not validated by default
# (shouldn't raise error with legacy dataset)
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
).compute()
# Test that read fails when validate_schema=True
# Note: This fails differently for pyarrow.dataset api
with pytest.raises(ValueError) as e_info:
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
dataset={"validate_schema": True},
).compute()
assert e_info.message.contains("ValueError: Schema in partition")
assert e_info.message.contains("was different")
@PYARROW_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(
tmpdir,
):
# Data types to test: strings, arrays, ints, timezone aware timestamps
in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]
out_arrays = [[0, 1, 2], [3, 4], None, None]
in_strings = ["a", "b", np.nan, np.nan]
out_strings = ["a", "b", None, None]
tstamp = pd.Timestamp(1513393355, unit="s")
in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]
out_tstamps = [
# Timestamps come out in numpy.datetime64 format
tstamp.to_datetime64(),
tstamp.to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
timezone = "US/Eastern"
tz_tstamp = pd.Timestamp(1513393355, unit="s", tz=timezone)
in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]
out_tz_tstamps = [
# Timezones do not make it through a write-read cycle.
tz_tstamp.tz_convert(None).to_datetime64(),
tz_tstamp.tz_convert(None).to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
df = pd.DataFrame(
{
"partition_column": [0, 0, 1, 1],
"arrays": in_arrays,
"strings": in_strings,
"tstamps": in_tstamps,
"tz_tstamps": in_tz_tstamps,
}
)
ddf = dd.from_pandas(df, npartitions=2)
schema = pa.schema(
[
("arrays", pa.list_(pa.int64())),
("strings", pa.string()),
("tstamps", pa.timestamp("ns")),
("tz_tstamps", pa.timestamp("ns", timezone)),
("partition_column", pa.int64()),
]
)
ddf.to_parquet(
str(tmpdir), engine="pyarrow", partition_on="partition_column", schema=schema
)
ddf_after_write = (
dd.read_parquet(str(tmpdir), engine="pyarrow", gather_statistics=False)
.compute()
.reset_index(drop=True)
)
# Check array support
arrays_after_write = ddf_after_write.arrays.values
for i in range(len(df)):
assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])
# Check datetime support
tstamps_after_write = ddf_after_write.tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tstamps_after_write[i]):
assert np.isnat(out_tstamps[i])
else:
assert tstamps_after_write[i] == out_tstamps[i]
# Check timezone aware datetime support
tz_tstamps_after_write = ddf_after_write.tz_tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tz_tstamps_after_write[i]):
assert np.isnat(out_tz_tstamps[i])
else:
assert tz_tstamps_after_write[i] == out_tz_tstamps[i]
# Check string support
assert np.array_equal(ddf_after_write.strings.values, out_strings)
# Check partition column
assert np.array_equal(ddf_after_write.partition_column, df.partition_column)
@PYARROW_MARK
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("schema", ["infer", "complex"])
def test_pyarrow_schema_inference(tmpdir, index, engine, schema):
if schema == "complex":
schema = {"index": pa.string(), "amount": pa.int64()}
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"index": ["1", "2", "3", "2", "3", "1", "4"],
"date": pd.to_datetime(
[
"2017-01-01",
"2017-01-01",
"2017-01-01",
"2017-01-02",
"2017-01-02",
"2017-01-06",
"2017-01-09",
]
),
"amount": [100, 200, 300, 400, 500, 600, 700],
},
index=range(7, 14),
)
if index:
df = dd.from_pandas(df, npartitions=2).set_index("index")
else:
df = dd.from_pandas(df, npartitions=2)
df.to_parquet(tmpdir, engine="pyarrow", schema=schema)
df_out = dd.read_parquet(tmpdir, engine=engine)
df_out.compute()
if index and engine == "fastparquet":
# Fastparquet fails to detect int64 from _metadata
df_out["amount"] = df_out["amount"].astype("int64")
# Fastparquet not handling divisions for
# pyarrow-written dataset with string index
assert_eq(df, df_out, check_divisions=False)
else:
assert_eq(df, df_out)
def test_partition_on(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
"d": np.arange(0, 100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
# Note #1: Cross-engine functionality is missing
# Note #2: The index is not preserved in pyarrow when partition_on is used
out = dd.read_parquet(
tmpdir, engine=engine, index=False, gather_statistics=False
).compute()
for val in df.a1.unique():
assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])
# Now specify the columns and allow auto-index detection
out = dd.read_parquet(tmpdir, engine=engine, columns=["d", "a2"]).compute()
for val in df.a2.unique():
assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])
def test_partition_on_duplicates(tmpdir, engine):
# https://github.com/dask/dask/issues/6445
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"data": np.random.random(size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
for _ in range(2):
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
out = dd.read_parquet(tmpdir, engine=engine).compute()
assert len(df) == len(out)
for root, dirs, files in os.walk(tmpdir):
for file in files:
assert file in (
"part.0.parquet",
"part.1.parquet",
"_common_metadata",
"_metadata",
)
@PYARROW_MARK
@pytest.mark.parametrize("partition_on", ["aa", ["aa"]])
def test_partition_on_string(tmpdir, partition_on):
tmpdir = str(tmpdir)
with dask.config.set(scheduler="single-threaded"):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"aa": np.random.choice(["A", "B", "C"], size=100),
"bb": np.random.random(size=100),
"cc": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(
tmpdir, partition_on=partition_on, write_index=False, engine="pyarrow"
)
out = dd.read_parquet(
tmpdir, index=False, gather_statistics=False, engine="pyarrow"
)
out = out.compute()
for val in df.aa.unique():
assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])
@write_read_engines()
def test_filters_categorical(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
cats = ["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]
dftest = pd.DataFrame(
{
"dummy": [1, 1, 1, 1],
"DatePart": pd.Categorical(cats, categories=cats, ordered=True),
}
)
ddftest = dd.from_pandas(dftest, npartitions=4).set_index("dummy")
ddftest.to_parquet(tmpdir, partition_on="DatePart", engine=write_engine)
ddftest_read = dd.read_parquet(
tmpdir,
index="dummy",
engine=read_engine,
filters=[(("DatePart", "<=", "2018-01-02"))],
)
assert len(ddftest_read) == 2
@write_read_engines()
def test_filters(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine=write_engine)
a = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "==", "c")])
assert b.npartitions == 1
assert (b.y == "c").all().compute()
c = dd.read_parquet(
tmp_path, engine=read_engine, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
assert_eq(c, c)
d = dd.read_parquet(
tmp_path,
engine=read_engine,
filters=[
# Select two overlapping ranges
[("x", ">", 1), ("x", "<", 6)],
[("x", ">", 3), ("x", "<", 8)],
],
)
assert d.npartitions == 3
assert ((d.x > 1) & (d.x < 8)).all().compute()
e = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", "in", (0, 9))])
assert e.npartitions == 2
assert ((e.x < 2) | (e.x > 7)).all().compute()
f = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "=", "c")])
assert f.npartitions == 1
assert len(f)
assert (f.y == "c").all().compute()
@write_read_engines()
def test_filters_v0(tmpdir, write_engine, read_engine):
if write_engine == "fastparquet" or read_engine == "fastparquet":
pytest.importorskip("fastparquet", minversion="0.3.1")
# Recent versions of pyarrow support full row-wise filtering
# (fastparquet and older pyarrow versions do not)
pyarrow_row_filtering = read_engine == "pyarrow-dataset"
fn = str(tmpdir)
df = pd.DataFrame({"at": ["ab", "aa", "ba", "da", "bb"]})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
| np.random.choice(["a", "b", "c", "d", "e", "f"], size=100) | numpy.random.choice |
import os
import numpy as np
from bmtk.builder.networks import NetworkBuilder
# helper functions
def generate_positions(N, x0=0.0, x1=300.0, y0=0.0, y1=100.0):
X = np.random.uniform(x0, x1, N)
Y = np.random.uniform(y0, y1, N)
return np.column_stack((X, Y))
# returns the stack in 3D space
def generate_positions_3D(N, x0=0.0, x1=300.0, z0=0.0, z1=300, y0=0.0, y1=300.0):
X = np.random.uniform(x0, x1, N)
Y = np.random.uniform(y0, y1, N)
Z = np.random.uniform(z0, z1, N)
result= | np.column_stack((X, Y)) | numpy.column_stack |
"""
The TensorProductState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import functools as _functools
import itertools as _itertools
import numpy as _np
from pygsti.modelmembers.states.state import State as _State
from pygsti.modelmembers import modelmember as _modelmember, term as _term
from pygsti.baseobjs import statespace as _statespace
from pygsti.tools import listtools as _lt
from pygsti.tools import matrixtools as _mt
class TensorProductState(_State):
"""
A state vector that is a tensor-product of other state vectors.
Parameters
----------
factors : list of States
a list of the component states to take the tensor product of.
state_space : StateSpace, optional
The state space for this operation.
"""
def __init__(self, factors, state_space):
assert(len(factors) > 0), "Must have at least one factor!"
self.factors = factors # do *not* copy - needs to reference common objects
evotype = self.factors[0]._evotype
rep = evotype.create_tensorproduct_state_rep([f._rep for f in factors], state_space)
_State.__init__(self, rep, evotype)
self.init_gpindices() # initialize our gpindices based on sub-members
self._update_rep() # initializes rep data
#Note: no to_memoized_dict needed, as ModelMember version does all we need.
@classmethod
def _from_memoized_dict(cls, mm_dict, serial_memo):
state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space'])
factors = [serial_memo[i] for i in mm_dict['submembers']]
return cls(factors, state_space)
def submembers(self):
"""
Get the ModelMember-derived objects contained in this one.
Returns
-------
list
"""
return self.factors # factor POVM object
def _update_rep(self):
self._rep.reps_have_changed()
@property
def parameter_labels(self):
"""
An array of labels (usually strings) describing this model member's parameters.
"""
vl = _np.empty(self.num_params, dtype=object)
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
vl[factor_local_inds] = factor_state.parameter_labels
return vl
def to_dense(self, on_space='minimal', scratch=None):
"""
Return this state vector as a (dense) numpy array.
The memory in `scratch` maybe used when it is not-None.
Parameters
----------
on_space : {'minimal', 'Hilbert', 'HilbertSchmidt'}
The space that the returned dense operation acts upon. For unitary matrices and bra/ket vectors,
use `'Hilbert'`. For superoperator matrices and super-bra/super-ket vectors use `'HilbertSchmidt'`.
`'minimal'` means that `'Hilbert'` is used if possible given this operator's evolution type, and
otherwise `'HilbertSchmidt'` is used.
scratch : numpy.ndarray, optional
scratch space available for use.
Returns
-------
numpy.ndarray
"""
return self._rep.to_dense(on_space)
def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this state vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
State's parameters, where the polynomial's variable indices index the
*global* parameters of the State's parent (usually a :class:`Model`)
, not the State's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
max_polynomial_vars : int, optional
maximum number of variables the created polynomials can have.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
terms = []
fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor
# assumes density matrix evolution
total_nQ = sum(fnq) # total number of qubits
for p in _lt.partition_into(order, len(self.factors)):
factor_lists = [self.factors[i].taylor_order_terms(pi, max_polynomial_vars) for i, pi in enumerate(p)]
# When possible, create COLLAPSED factor_lists so each factor has just a single
# (State) pre & post op, which can be formed into the new terms'
# TensorProdState ops.
# - DON'T collapse stabilizer states & clifford ops - can't for POVMs
collapsible = False # bool(self._evotype =="svterm") # need to use reps for collapsing now... TODO?
if collapsible:
factor_lists = [[t.collapse_vec() for t in fterms] for fterms in factor_lists]
for factors in _itertools.product(*factor_lists):
# create a term with a TensorProdState - Note we always create
# "prep"-mode vectors, since even when self._prep_or_effect == "effect" these
# vectors are created with factor (prep- or effect-type) States not factor POVMs
# we workaround this by still allowing such "prep"-mode
# TensorProdStates to be represented as effects (i.e. in torep('effect'...) works)
coeff = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pre_rep = self._evotype.create_tensorproduct_state_rep(
[f.pre_state for f in factors if (f.pre_state is not None)], self.state_space)
post_rep = self._evotype.create_tensorproduct_state_rep(
[f.post_state for f in factors if (f.post_state is not None)], self.state_space)
term = _term.RankOnePolynomialPrepTerm.create_from(coeff, pre_rep, post_rep,
self._evotype, self.state_space)
if not collapsible: # then may need to add more ops. Assume factor ops are clifford gates
# Embed each factors ops according to their target qubit(s) and just daisy chain them
ss = _statespace.QubitSpace(total_nQ); curQ = 0
for f, nq in zip(factors, fnq):
targetLabels = tuple(range(curQ, curQ + nq)); curQ += nq
term._rep.pre_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op)
for op in f.pre_ops]) # embed and add ops
term._rep.post_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op)
for op in f.post_ops]) # embed and add ops
terms.append(term)
if return_coeff_polys:
def _decompose_indices(x):
return tuple(_modelmember._decompose_gpindices(
self.gpindices, _np.array(x, _np.int64)))
poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices
tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs]
if len(tapes) > 0:
vtape = _np.concatenate([t[0] for t in tapes])
ctape = _np.concatenate([t[1] for t in tapes])
else:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
coeffs_as_compact_polys = (vtape, ctape)
#self.local_term_poly_coeffs[order] = coeffs_as_compact_polys #FUTURE?
return terms, coeffs_as_compact_polys
else:
return terms # Cache terms in FUTURE?
@property
def num_params(self):
"""
Get the number of independent parameters which specify this state vector.
Returns
-------
int
the number of independent parameters.
"""
return len(self.gpindices_as_array())
def to_vector(self):
"""
Get the state vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
v = _np.empty(self.num_params, 'd')
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
v[factor_local_inds] = factor_state.to_vector()
return v
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the state vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of state vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this state vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
factor_state.from_vector(v[factor_local_inds], close, dirty_value)
#Update representation, which may be a dense matrix or
# just fast-kron arrays or a stabilizer state.
self._update_rep() # TODO - how does this apply to state reps??
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this state vector.
Construct a matrix whose columns are the derivatives of the state vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per state vector parameter.
An empty 2D array in the StaticState case (num_params == 0).
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
typ = self.factors[0].to_dense(on_space='minimal').dtype if len(self.factors) > 0 else 'd'
#HACK to deal with fact that output of to_dense is really what is differentiated
# but this may not match self.dim == self.state_space.dim, e.g. for pure state vecs.
dims = [len(fct.to_dense(on_space='minimal')) for fct in self.factors]
dim = int(_np.product(dims))
derivMx = _np.zeros((dim, self.num_params), typ)
#Product rule to compute jacobian
# loop over the spamvec/povm we differentiate wrt:
for i, (fct, fct_local_inds, fct_dim) in enumerate(zip(self.factors, self._submember_rpindices, dims)):
vec = fct
if vec.num_params == 0: continue # no contribution
deriv = vec.deriv_wrt_params(None) # TODO: use filter?? / make relative to this gate...
deriv.shape = (fct_dim, vec.num_params)
if i > 0: # factors before ith
pre = self.factors[0].to_dense(on_space='minimal')
for vecA in self.factors[1:i]:
pre = _np.kron(pre, vecA.to_dense(on_space='minimal'))
deriv = _np.kron(pre[:, None], deriv) # add a dummy 1-dim to 'pre' and do kron properly...
if i + 1 < len(self.factors): # factors after ith
post = self.factors[i + 1].to_dense(on_space='minimal')
for vecA in self.factors[i + 2:]:
post = _np.kron(post, vecA.to_dense(on_space='minimal'))
deriv = _np.kron(deriv, post[:, None]) # add a dummy 1-dim to 'post' and do kron properly...
assert(fct_local_inds is not None), \
"Error: gpindices has not been initialized for factor %d - cannot compute derivative!" % i
derivMx[:, fct_local_inds] += deriv
derivMx.shape = (dim, self.num_params) # necessary?
if wrt_filter is None:
return derivMx
else:
return | _np.take(derivMx, wrt_filter, axis=1) | numpy.take |
import numpy as np
from numpy import shape
class fft_basic:
'''
基础离散傅立叶变换类
'''
def __init__(self, matrix):
self.matrix = matrix
def do_fft(self):
self.fft = np.fft.fft2(self.matrix)
return np.log( | np.abs(self.fft) | numpy.abs |
import numpy as np
from scipy.optimize import minimize
import math
def isRotationMatrix(R: np.array):
tag = False
I = np.identity(R.shape[0])
if np.all(np.matmul(R, R.T) - I < 1e-6) and np.linalg.det(R) - 1 < 1e-6:
tag = True
return tag
class W300():
def W300_GetInitGuess(self, l_vec, b_vec, f_vec):
f_vec = np.cross(b_vec, l_vec) * -1
l_vec = np.cross(f_vec, b_vec) * -1
l_norm = np.linalg.norm(l_vec)
l_vec /= l_norm
b_norm = np.linalg.norm(b_vec)
b_vec /= b_norm
f_norm = np.linalg.norm(f_vec)
f_vec /= f_norm
l_vec = l_vec.reshape(3, 1)
b_vec = b_vec.reshape(3, 1)
f_vec = f_vec.reshape(3, 1)
l = np.array([1, 0, 0]).reshape(1, 3)
b = np.array([0, 1, 0]).reshape(1, 3)
f = np.array([0, 0, 1]).reshape(1, 3)
R = l_vec @ l + b_vec @ b + f_vec @ f
yaw = math.asin(R[0, 2])
roll = math.atan2(-R[0, 1], R[0, 0])
pitch = math.atan2(-R[1, 2], R[2, 2])
yaw *= -1
return np.array([pitch, yaw, roll])
def W300_ObjectiveV3(self, x, l_vec, b_vec, f_vec):
rx = x[0]
ry = x[1]
rz = x[2]
l_hat, b_hat, f_hat = self.W300_EulerAngles2Vectors(rx, ry, rz)
l_vec_dot = np.clip(l_hat[0] * l_vec[0] + l_hat[1] * l_vec[1] + l_hat[2] * l_vec[2], -1, 1)
b_vec_dot = np.clip(b_hat[0] * b_vec[0] + b_hat[1] * b_vec[1] + b_hat[2] * b_vec[2], -1, 1)
f_vec_dot = np.clip(f_hat[0] * f_vec[0] + f_hat[1] * f_vec[1] + f_hat[2] * f_vec[2], -1, 1)
return math.acos(l_vec_dot) ** 2 + math.acos(b_vec_dot) ** 2 + math.acos(f_vec_dot) ** 2
def W300_EulerAngles2Vectors(self, rx, ry, rz):
'''
rx: pitch
ry: yaw
rz: roll
'''
ry *= -1
R_x = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(rx), -np.sin(rx)],
[0.0, np.sin(rx), np.cos(rx)]])
R_y = np.array([[np.cos(ry), 0.0, np.sin(ry)],
[0.0, 1.0, 0.0],
[-np.sin(ry), 0.0, np.cos(ry)]])
R_z = np.array([[np.cos(rz), -np.sin(rz), 0.0],
[np.sin(rz), | np.cos(rz) | numpy.cos |
###Package Importing
import numpy as np
import pandas as pd
from sklearn import metrics
import logging
from sklearn.externals import joblib
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
import os
from datetime import datetime
from preprocessing import hash_col
from preprocessing import onehot
from preprocessing import normalize
from model import machine_learning as ml
from preprocessing import discretize
#from sklearn.ensemble import RandomForestClassifier
wkdir = ""
#set parameters
dump_path = wkdir + "/" + "model"
data_path = wkdir + "/" + "data"
out_path = wkdir + "/" + "output"
colList_float = []
colList_cnt = []
colList_days = []
colList_unicode = []
colList_dup = []
keys = ''
labels = ''
def train(*args, **kwargs) :
###Setup logging
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("START training")
# Mandatory Args
wkdir, dump_path, data_path, out_path = kwargs["path_list"]
filename0, filename1 = kwargs["filenames"]
colList_float, colList_cnt, colList_days, colList_unicode = kwargs["column_lists"]
keys = kwargs["keys"]
# Optional Args
oversampling_ratio = kwargs["oversampling_ratio"] if "oversampling_ratio" in kwargs.keys() else 0.5
comprehensive_search = kwargs["comprehensive_search"] if "comprehensive_search" in kwargs.keys() else False
###Data Loading
os.chdir(wkdir)
try :
data_ori0 = pd.read_csv(data_path + "/" + filename0 #/rex_up_features_sample0.csv"
, low_memory=False, encoding=u'utf-8') \
.drop_duplicates(subset=keys,keep='first')
data_ori1 = pd.read_csv(data_path + "/" + filename1
, low_memory=False, encoding=u'utf-8').drop_duplicates(subset=keys,keep='first')
#axis = 0 means merge by column(same column join)
#axis = 1 means merge by row(same index join)
data_tmp = pd.concat([data_ori0, data_ori1], axis=0)
data_tmp.index = data_tmp[keys]
#print(data_ori0.shape, data_ori1.shape, data_tmp.shape)
#print(data_tmp)
assert data_ori0.shape[0]+data_ori1.shape[0] == data_tmp.shape[0] , "0/1 Merging failed"
assert data_ori0.shape[1] == data_ori1.shape[1] == data_tmp.shape[1] , "Column number not match"
logger.info("shapes of data_ori0, data_ori1, data_tmp:" + str(data_ori0.shape) + str(data_ori1.shape) + str(data_tmp.shape))
#For numeric features including float, cnt and days, we fill NaN and normalize
#No need to discretize in this model.
#n_disc = 5
clients_discretized = data_tmp.loc[:, :].copy()
#nsamples = clients_discretized.shape[0]
features_num = clients_discretized[[keys]+colList_float + colList_days + colList_cnt].drop_duplicates( keep='first')
features_num = features_num[colList_float + colList_days + colList_cnt] \
.applymap(discretize.clean_cell) \
.applymap(lambda x : np.float64(x))
# save (mean, std) into tables so that can be retrieved at predict phase
features_num.apply(lambda x : pd.Series([np.mean(x), | np.std(x) | numpy.std |
import numpy as np
from astropy.io import ascii
from sys import argv
import kinematics
import lacewing
import ellipse
#########################################################
#########################################################
### MAIN ROUTINE
### ARR 2016-07-29
### 1.3: Now compatible with LACEwING v1.3, and uses the
### lacewing.lacewing() and lacewing_moving_group_loader()
### functions
### 1.4: Now includes a UVWXYZ test mode
#########################################################
#########################################################
iterations = int(argv[1])
number = argv[2]
uvw = False
if len(argv) > 3:
if argv[3] == "UVW":
uvw = True
moving_groups = lacewing.moving_group_loader()
outfile = []
if uvw:
outfile2 = open('test_points_{0:}'.format(number),'wb')
else:
for i in xrange(len(moving_groups)):
outfile.append(open('{0:}{1:}'.format(moving_groups[i].name.replace(' ','_'),number),'wb'))
weightednumber = []
for j in xrange(len(moving_groups)):
weightednumber.append(moving_groups[j].weightednumber)
for i in xrange(iterations):
# Now begin the random number generators
# 1. Decide which type of star this is going to be.
selector = np.random.rand()
# Choose the first type greater than 'selector'
startype = np.where(np.asarray(weightednumber) > selector)[0][0]
mgp = moving_groups[startype].name
# 2. Now generate a star within the dispersion of the group.
tu = np.random.randn()*float(moving_groups[startype].A)
tv = np.random.randn()*float(moving_groups[startype].B)
tw = np.random.randn()*float(moving_groups[startype].C)
if moving_groups[startype].uniform == 0: # uniform random distribution of positions
tx = ((np.random.rand()*2)-1)*float(moving_groups[startype].D)
ty = ((np.random.rand()*2)-1)*float(moving_groups[startype].E)
tz = ((np.random.rand()*2)-1)*float(moving_groups[startype].F)
while ((tx/float(moving_groups[startype].D))**2 + (ty/float(moving_groups[startype].E))**2 + (tz/float(moving_groups[startype].F)**2)) > 1:
tx = ((np.random.rand()*2)-1)*float(moving_groups[startype].D)
ty = ((np.random.rand()*2)-1)*float(moving_groups[startype].E)
tz = ((np.random.rand()*2)-1)*float(moving_groups[startype].F)
# A quick test shows that an ellipse fit to a uniform distribution
# has axes smaller than the initial ellipse by a factor of sqrt(3)
tx = tx * np.sqrt(3)
ty = ty * np.sqrt(3)
tz = tz * np.sqrt(3)
if moving_groups[startype].uniform == 1: # for clusters: use gaussians
tx = np.random.randn()*float(moving_groups[startype].D)
ty = np.random.randn()*float(moving_groups[startype].E)
tz = np.random.randn()*float(moving_groups[startype].F)
if moving_groups[startype].uniform == 2: # exponential disk dropoff (for the field stars)
tx = ((np.random.rand()*2)-1)*float(moving_groups[startype].D)
ty = ((np.random.rand()*2)-1)*float(moving_groups[startype].E)
tz = | np.random.exponential(scale=300) | numpy.random.exponential |
import numpy as np
import sys
import cv2
import time
import copy
import os
import traceback
import ffmpeg
import subprocess as sp
import os.path as path
from pprint import pprint
from concurrent.futures import ThreadPoolExecutor
from PIL import ImageColor
| np.set_printoptions(threshold=np.inf) | numpy.set_printoptions |
import h5py
from typing import List, Dict
import networkx as nx
import numpy as np
import os
from collections import Counter
import pandas as pd
import json
from tqdm import tqdm
__all__ = ['Graph']
class Graph(nx.Graph):
"""
Class for storing Nabo's SNN graph. Inherits from networkx's `Graph` class
"""
def __init__(self):
super().__init__()
self.refName = None
self.refNodes: List[str] = []
self.refG = None
self.targetNames: List[str] = []
self.targetNodes: Dict[str, List[str]] = {}
self.deTestCells: List[str] = None
self.deCtrlCells: List[str] = None
self._agglomDendrogram = None
def load_from_h5(self, fn: str, name: str, kind: str) -> None:
"""
Loads a graph saved by `Mapping` class in HDF5 format
:param fn: Path to HDF5 file
:param name: Label/name of sample used in Mapping object. This
function assumes that the group in HDF5 containing
graph data is named: `name` + '_graph'
:param kind: Can have a value of either 'reference' or 'target'.
Only be one sample can have kind='reference' for an
instance of this class
:return: None
"""
if os.path.exists(fn) is False:
raise IOError('ERROR: File %s does not exist' % fn)
if kind == 'reference':
if self.refName is not None:
raise ValueError('ERROR: A reference kind is already loaded')
elif kind == 'target':
if name in self.targetNames:
raise ValueError('ERROR: %s target group already present in '
'graph' % name)
if self.refName is None:
raise ValueError('ERROR: Please load reference kind first')
else:
raise ValueError('ERROR: Kind can be either "reference" or '
'"target"')
try:
h5 = h5py.File(fn, mode='r')
except (IOError, OSError):
raise IOError('ERROR: Unable to open file %s' % fn)
if kind == 'reference':
try:
saved_name = h5['name_stash/ref_name'][0].decode('UTF-8')
uid = h5['name_stash/ref_name'][1].decode('UTF-8')
except KeyError:
raise KeyError("ERROR: Could not find stashed names in the "
"mapping file. Make sure reference graph has "
"been created in the mapping file")
if name != saved_name:
raise KeyError("ERROR: The reference is named %s in the "
"mapping file and not %s. Please verify that "
"you are trying to load right reference." % (
saved_name, name))
else:
try:
target_names = h5['name_stash/target_names'][:]
except KeyError:
raise KeyError("ERROR: Could not find stashed names in the "
"mapping file. Make sure reference graph has "
"been created in the mapping file")
uid = None
for i in target_names:
if i[0].decode('UTF-8') == name:
uid = i[1].decode('UTF-8')
if uid is None:
raise KeyError("ERROR: The target name not could not be found "
"in the mapping file")
grp = uid + '_graph'
if grp not in h5:
h5.close()
raise KeyError('ERROR: Group %s not found in HDF5 file %s'
% (grp, fn))
attrs = {'kind': kind, 'name': name}
existing_nodes = {x: None for x in self.nodes()}
new_nodes = []
for node in h5[grp]:
if node in existing_nodes:
print('WARNING: node %s already present in the graph. Will '
'not add.' % node)
else:
new_nodes.append(node)
self.add_node(node, **attrs)
for j in h5[grp][node]:
node2 = j[0].decode('UTF-8')
weight = float(j[1].decode('UTF-8'))
self.add_edge(node, node2, weight=weight)
h5.close()
if kind == 'reference':
self.refName = name
self.refNodes = new_nodes
self.refG = self.subgraph(self.refNodes)
else:
self.targetNames.append(name)
self.targetNodes[name] = new_nodes
return None
def load_from_gml(self, fn: str) -> None:
"""
Load data from GML format file. It is critical that this graph was
generated using Nabo's `Mapping` class.
:param fn: Full path of GML file
:return: None
"""
if len(self.nodes) > 0:
raise ValueError('ERROR: The graph already contains nodes. '
'Cannot load GML file on this object.')
if os.path.exists(fn) is False:
raise IOError('ERROR: File %s does not exist' % fn)
try:
g = nx.read_gml(fn)
except (IOError, OSError):
raise IOError('ERROR: Could open the file %s. Make sure the file '
'is in GML format' % fn)
for i in g.nodes(data=True):
attrs = i[1]
if 'kind' not in attrs or 'name' not in attrs:
self.clear()
raise ValueError('ERROR: Attributes "kind" and/or "name" '
'not found for one or more cells. Make '
'sure that the GML was saved using Nabo')
if attrs['kind'] == 'reference':
if self.refName is None:
self.refName = attrs['name']
elif self.refName != attrs['name']:
self.clear()
raise ValueError('ERROR: Multiple reference samples '
'found. Please make sure you saved '
'the GML with Nabo.')
self.refNodes.append(i[0])
elif attrs['kind'] == 'target':
if attrs['name'] not in self.targetNames:
self.targetNames.append(attrs['name'])
self.targetNodes[attrs['name']] = []
self.targetNodes[attrs['name']].append(i[0])
else:
self.clear()
raise ValueError('ERROR: Kind can only be either "reference" '
'or "target"')
if 'pos' in i[1] and i[1]['pos'] == 'None':
i[1]['pos'] = None
self.add_node(i[0], **i[1])
for i in g.edges(data=True):
self.add_edge(i[0], i[1], weight=i[2]['weight'])
self.refG = self.subgraph(self.refNodes)
return None
def save_graph(self, save_name: str) -> None:
"""
Save graph in GML format
:param save_name: Output filename with path
:return: None
"""
nx.write_gml(self, save_name, stringizer=lambda x: str(x))
return None
def set_ref_layout(self, niter: int = 500, verbose: bool = True,
init_pos: dict = None, disable_rescaling: bool = False,
outbound_attraction_distribution: bool = True,
edge_weight_influence: float = 1.0,
jitter_tolerance: float = 1.0,
barnes_hut_optimize: bool = True,
barnes_hut_theta: float = 1.2,
scaling_ratio: float = 1.0,
strong_gravity_mode: bool = False,
gravity: float = 1.0) -> None:
"""
Calculates a 2D graph layout using ForceAtlas2 algorithm.
The ForceAtlas2 implementation being used here will not prevent
nodes in the graph from overlapping with each other. We aim to
improve this in the future.
:param niter: Number of iterations (default: 500)
:param verbose: Print the progress (default: True)
:param init_pos: Initial positions of nodes
:param disable_rescaling: If True then layout coordinates are not
rescaled to only have non negative
positions (Default: False)
:param outbound_attraction_distribution:
:param edge_weight_influence:
:param jitter_tolerance:
:param barnes_hut_optimize:
:param barnes_hut_theta:
:param scaling_ratio:
:param strong_gravity_mode:
:param gravity:
:return: None
"""
from fa2 import ForceAtlas2
force_atlas = ForceAtlas2(
outboundAttractionDistribution=outbound_attraction_distribution,
edgeWeightInfluence=edge_weight_influence,
jitterTolerance=jitter_tolerance,
barnesHutOptimize=barnes_hut_optimize,
barnesHutTheta=barnes_hut_theta, scalingRatio=scaling_ratio,
strongGravityMode=strong_gravity_mode, gravity=gravity,
verbose=verbose)
pos = force_atlas.forceatlas2_networkx_layout(
self.refG, pos=init_pos, iterations=niter)
if disable_rescaling is False:
pos_array = np.array(list(pos.values())).T
min_x, min_y = pos_array[0].min(), pos_array[1].min()
# max_x, max_y = pos_array[0].max(), pos_array[1].max()
pos = {k: ((v[0] - min_x), (v[1] - min_y)) for k, v in pos.items()}
# pos = {k: ((v[0] - min_x) / (max_x - min_x),
# (v[1] - min_y) / (max_y - min_y))
# for k, v in pos.items()}
for node in pos:
self.nodes[node]['pos'] = (float(pos[node][0]),
float(pos[node][1]))
for node in self:
if node not in pos:
self.nodes[node]['pos'] = None
return None
@property
def clusters(self) -> Dict[str, str]:
ret_val = {}
for i in self.refG.nodes(data=True):
if 'cluster' in i[1]:
ret_val[i[0]] = i[1]['cluster']
return ret_val
def make_clusters(self, n_clusters: int) -> None:
"""
Performs graph agglomerative clustering using algorithm in Newman 2004
:param n_clusters: Number of clusters
:return: None
"""
import hac
if self._agglomDendrogram is None:
clusterer = hac.GreedyAgglomerativeClusterer()
self._agglomDendrogram = clusterer.cluster(self.refG)
cluster_list = self._agglomDendrogram.clusters(n_clusters)
for n, node_group in enumerate(cluster_list):
for node in node_group:
clust_num = n + 1
self.nodes[node]['cluster'] = str(clust_num)
return None
def make_leiden_clusters(self, resolution: float = 1.0, random_seed = 4466) -> None:
"""
Leiden clustering
:param n_clusters: Number of clusters
:return: None
"""
try:
import leidenalg
except ImportError:
raise ImportError("ERROR: 'leidenalg' package is not installed. Please find the installation instructions "
"here: https://github.com/vtraag/leidenalg#installation.")
import igraph # python-igraph
adj = nx.to_scipy_sparse_matrix(self.refG)
sources, targets = adj.nonzero()
g = igraph.Graph()
g.add_vertices(adj.shape[0])
g.add_edges(list(zip(sources, targets)))
g.es['weight'] = adj[sources, targets].A1
part = leidenalg.find_partition(g, leidenalg.RBConfigurationVertexPartition, resolution_parameter=resolution,
seed=random_seed)
clusts = np.array(part.membership) + 1
for n, c in zip(self.refG.nodes, clusts):
self.nodes[n]['cluster'] = str(c)
return None
def get_cluster_identity_weights(self) -> pd.Series:
"""
:return: Cluster identity weights for each cell
"""
ciw = {}
clusters = self.clusters
if clusters == {}:
raise ValueError("ERROR: Please make sure that clusters have "
"been assigned to cells. Run 'make_clusters' or "
"import clusters")
skipped_cells = 0
max_nodes = []
for i in tqdm(self.refG, total=len(self.refG)):
cw = []
for j in self.refG.edges(i, data=True):
try:
cw.append((clusters[j[1]], j[2]['weight']))
except KeyError:
skipped_cells += 1
continue
if len(cw) > 0:
cw = pd.DataFrame(cw)
cw = cw.groupby(0).size() * \
cw.groupby(0).sum()[1].sort_values().values
if len(cw) > 1:
ciw[i] = cw[-1] / cw[:-1].sum()
else:
ciw[i] = cw[-1]
max_nodes.append(i)
else:
skipped_cells += 1
if skipped_cells > 0:
print("WARNING: %d cells were skipped" % skipped_cells)
ciw = pd.Series(ciw)
if len(max_nodes) > 0:
for i in max_nodes:
ciw[i] = ciw.max()
return ciw
def import_clusters(self, cluster_dict: Dict[str, str] = None,
missing_val: str = 'NA') -> None:
"""
Import cluster information for reference cells.
:param cluster_dict: Dictionary with cell names as keys and cluster
number as values. Cluster numbers should start
from 1
:param missing_val: This value will be filled in when fill_missing
is True (Default: NA)
:return: None
"""
skipped_nodes = len(cluster_dict)
for node in self.refNodes:
if node in cluster_dict:
self.nodes[node]['cluster'] = str(cluster_dict[node])
skipped_nodes -= 1
else:
self.nodes[node]['cluster'] = missing_val
if skipped_nodes > 0:
print('WARNING: %d cells do not exist in the reference graph and '
'their cluster info was not imported.' % skipped_nodes)
def import_clusters_from_json(self, fn):
"""
Import clusters om JSON file
:param fn: Input file in JSON format.
:return: None
"""
return self.import_clusters(json.load(open(fn)))
def import_clusters_from_csv(self, csv: str, csv_sep: str = ',',
cluster_col: int = 0, header = None,
append_ref_name: bool = False):
"""
:param csv: Filename containing cluster information. Make
sure that the first column contains cell names and
second contains the cluster labels.
:param csv_sep: Separator for CSV file (default: ',')
:param cluster_col: Column number (0 based count) where cluster
info is present (Default: 0)
:param append_ref_name: Append the reference name to the cell name (
Default: True)
:return: None
"""
df = pd.read_csv(csv, index_col=0, sep=csv_sep, header=header)
cluster_dict = df[df.columns[cluster_col]].to_dict()
if append_ref_name:
cluster_dict = {k + '_' + self.refName: v for
k, v in cluster_dict.items()}
return self.import_clusters(cluster_dict)
def save_clusters_as_json(self, outfn):
"""
:param outfn: Output JSON file
:return:
"""
with open(outfn, 'w') as OUT:
json.dump(self.clusters, OUT, indent=2)
def save_clusters_as_csv(self, outfn):
"""
:param outfn: Output CSV file
:return:
"""
pd.Series(self.clusters).to_csv(outfn)
def _validate_clusters(self):
nclusts = len(set(self.clusters.values()))
if nclusts == 0:
raise ValueError('ERROR: Calculate clusters first using '
'"make_clusters" or import clusters using '
'"import_clusters"')
elif nclusts == 1:
raise ValueError('ERROR: Cannot classify targets when only '
'one cluster is present in the graph')
return True
def calc_modularity(self) -> float:
"""
Calculates modularity of the reference graph. The clusters should have
already been defined.
:return: Value between 0 and 1
"""
partition = {}
for k, v in self.clusters.items():
if v not in partition:
partition[v] = {}
partition[v][k] = None
partition = list(partition.values())
if sum([len(x) for x in partition]) != len(self.refG):
raise AssertionError('ERROR: Not all reference nodes have been '
'assigned to a cluster. Cannot calculate '
'modularity!')
# noinspection PyCallingNonCallable
w_degree = dict(self.refG.degree(weight='weight'))
norm = 1 / (2 * self.refG.size(weight='weight'))
q = 0
for p in partition:
for i in p:
t = -w_degree[i] * norm
q += sum([t * w_degree[x] for x in p])
q += sum([self.refG[i][x]['weight']
for x in self.refG[i] if x in p])
return q * norm
def import_layout(self, pos_dict) -> None:
"""
Alternatively one can provide a
dictionary with keys as node name and values as coordinate (x,
y) tuple.
:param pos_dict: Dictionary with keys as node names and values as
2D coordinates of nodes on the graph.
:return: None
"""
skipped_nodes = len(pos_dict)
error_nodes = 0
for node in self.nodes:
if node in pos_dict:
try:
self.nodes[node]['pos'] = (
float(pos_dict[node][0]),
float(pos_dict[node][1])
)
skipped_nodes -= 1
except (IndexError, TypeError):
error_nodes += 1
else:
self.nodes[node]['pos'] = None
if skipped_nodes > 0:
print('WARNING: %d cells do not exist in the reference graph and '
'their position info was not imported.' % skipped_nodes)
if error_nodes > 0:
print('WARNING: %d cells had position info in incorrect '
'format' % error_nodes)
return None
def import_layout_from_json(self, fn):
"""
:param fn: Input json file
:return:
"""
return self.import_layout(json.load(open(fn)))
def import_layout_from_csv(self, csv: str, csv_sep: str = ',',
dim_cols: tuple = (0, 1), header = None,
append_ref_name: bool = False):
"""
Import graph layout coordinates from a CSV file
:param csv: Filename containing layout coordinates. Make
sure that the first column contains cell names and
second and thrid contain the x and y coordinates
:param csv_sep: Separator for CSV file (default: ',')
:param append_ref_name: Append the reference name to the cell name (
Default: True)
:return: None
"""
layout = pd.read_csv(csv, index_col=0, sep=csv_sep, header=header)
d1 = layout.columns[dim_cols[0]]
d2 = layout.columns[dim_cols[1]]
if append_ref_name:
layout = {x + '_' + self.refName: (layout[d1][x], layout[d2][x])
for x in layout.index}
else:
layout = {x: (layout[d1][x], layout[d2][x]) for x in layout.index}
return self.import_layout(layout)
@property
def layout(self):
"""
Copies 'pos' attribute values (x/y coordinate tuple) from graph nodes
and returns a dictionary
:return:
"""
pos_dict = {}
for i in self.nodes(data=True):
try:
pos_dict[i[0]] = i[1]['pos']
except (KeyError, IndexError):
pos_dict[i[0]] = None
return pos_dict
def save_layout_as_json(self, out_fn):
"""
:param out_fn: Output json file
:return:
"""
with open(out_fn, 'w') as OUT:
json.dump(self.layout, OUT, indent=2)
return None
def save_layout_as_csv(self, out_fn):
"""
Saves the layout in CSV format
:param out_fn: Output CSV file
:return:
"""
pd.DataFrame(self.layout).T.to_csv(out_fn, header=None)
return None
@staticmethod
def get_score_percentile(score: Dict[str, int], p: int) -> float:
"""
Get value for at a given percentile
:param score: Mapping score or any other dictionary
where values are numbers
:param p: Percentile
:return: Percentile value
"""
return np.percentile(list(score.values()), p)
def get_mapping_score(self, target: str, min_weight: float = 0,
min_score: float = 0, weighted: bool = True,
by_cluster: bool = False,
sorted_names_only: bool = False,
top_n_only: int = None,
all_nodes: bool = True, score_multiplier: int = 1000,
ignore_nodes: List[str] = None,
include_nodes: List[str] = None,
remove_suffix: bool = False, verbose: bool = False):
"""
Calculate a weighted/unweighted degree of incident target nodes on
reference nodes.
:param target: Target sample name
:param min_weight: Ignore a edge if edge weight is smaller then this
value in the SNN graph. Only applicable if
calculating a weighted mapping score
:param min_score: If score is smaller then reset score to zero
:param weighted: Use edge weights if True
:param by_cluster: If True, then combine scores from nodes of same
cluster into a list. The keys are cluster number
in the output dictionary
:param sorted_names_only: If True, then return only sorted list of
base cells from highest to lowest mapping
score. Cells with mapping score less than
`min_score` are not reported (default: False)
:param top_n_only: If sorted_names_only is True and an integer value is
provided then this method will return top n
number of nodes sorted based on score. min_score
value will be ignored.
:param all_nodes: if False, then returns only nodes with non-zero
score (after resetting using min_score)
:param score_multiplier: Score is multiplied by this number after
normalizing for total number of target cells.
:param ignore_nodes: List of nodes from 'target' sample to be
ignored while calculating the score (default:
None).
:param include_nodes: List of target nodes from 'target' sample.
Mapping score will be calculated ONLY for those
reference cells that are connected to this
subset of target cells in the graph. By
default mapping score will be calculated
against each target node.
:param remove_suffix: Remove suffix from cell names (default: False)
:param verbose: Prints graph stats
:return: Mapping score
"""
if by_cluster:
if set(self.clusters.values()) == 'NA':
raise ValueError('ERROR: Calculate clusters first using '
'"make_clusters" or import clusters using '
'"import_clusters"')
if target not in self.targetNames:
raise ValueError('ERROR: %s not present in graph' % target)
if ignore_nodes is not None and include_nodes is not None:
raise ValueError("ERROR: PLease provide only one of "
"either 'ignore_nodes' or 'include_nodes' at a "
"time")
target_nodes = {x: None for x in self.targetNodes[target]}
if ignore_nodes is None:
ignore_nodes = []
else:
temp = []
for node in ignore_nodes:
if node in target_nodes:
temp.append(node)
ignore_nodes = list(temp)
if include_nodes is None:
include_nodes = list(self.targetNodes[target])
else:
temp = []
for node in include_nodes:
if node in target_nodes:
temp.append(node)
include_nodes = list(temp)
include_nodes = list(set(include_nodes).difference(ignore_nodes))
g = nx.Graph(self.subgraph(self.refNodes + include_nodes))
g.remove_edges_from(self.refG.edges)
if verbose:
isolates = set(list(nx.isolates(g)))
print("INFO: The bipartite graph has %d edges" % g.size())
print("INFO: Mapping calculated against %d %s nodes" % (
len(include_nodes), target))
print("INFO: %d reference nodes do not connect with any target"
" node" % len(isolates.intersection(self.refNodes)))
print("INFO: %d target nodes do not connect with any reference"
" node" % len(isolates.intersection(include_nodes)))
score = {}
for i in self.refNodes:
score[i] = 0
for j in g.edges(i, data=True):
if weighted:
if j[2]['weight'] > min_weight:
score[i] += j[2]['weight']
else:
score[i] += 1
score = {k: score_multiplier * v / len(self.targetNodes[target])
for k, v in score.items()}
if by_cluster:
cluster_dict = self.clusters
cluster_values = {x: [] for x in set(cluster_dict.values())}
na_cluster_score = []
for node in score:
try:
cluster_values[cluster_dict[node]].append(score[node])
except KeyError:
na_cluster_score.append(score[node])
if len(na_cluster_score) > 0:
if 'NA' not in cluster_values:
cluster_values['NA'] = []
else:
print("WARNING: 'NA' cluster already exists. Appending "
"value to it")
cluster_values['NA'].extend(na_cluster_score)
return cluster_values
if sorted_names_only:
if top_n_only is not None:
if top_n_only > len(score):
raise ValueError('ERROR: Value of top_n_only should be '
'less than total number of nodes in '
'reference graph')
retval = [x[0] for x in sorted(score.items(),
key=lambda x: x[1])][::-1][
:top_n_only]
else:
ms = {k: v for k, v in score.items() if v >= min_score}
retval = [x[0] for x in sorted(ms.items(),
key=lambda x: x[1])][::-1]
if remove_suffix:
return [x.rsplit('_', 1)[0] for x in retval]
else:
return retval
if not all_nodes:
retval = {k: v for k, v in score.items() if v >= min_score}
else:
retval = {k: v if v >= min_score else 0 for k, v in score.items()}
if remove_suffix:
return [x.rsplit('_', 1)[0] for x in retval]
else:
return retval
def get_cells_from_clusters(self, clusters: List[str],
remove_suffix: bool = True) -> List[str]:
"""
Get cell names for input cluster numbers
:param clusters: list of cluster identifiers
:param remove_suffix: Remove suffix from cell names
:return: List of cell names
"""
if set(self.clusters.values()) == 'NA':
raise ValueError('ERROR: Calculate clusters first using '
'"make_clusters" or import clusters using '
'"import_clusters"')
cells = []
clusters = {str(x) for x in clusters}
for k, v in self.clusters.items():
if v in clusters:
if remove_suffix:
cells.append(k.rsplit('_', 1)[0])
else:
cells.append(k)
return cells
def classify_target(self, target: str, weight_frac: float = 0.5,
min_degree: int = 2, min_weight: float = 0.1,
cluster_dict: Dict[str, int] = None, na_label: str
= 'NA', ret_counts: bool = False) -> dict:
"""
This classifier identifies the total weight of all the connections made
by each target cell to each cluster (of reference cells). If a target
cell has more than 50% (default value) of it's total connection weight
in one of the clusters then the target cell is labeled to be from that
cluster. One useful aspect of this classifier is that it will not
classify the target cell to be from any cluster if it fails to reach
the threshold (default, 50%) for any cluster (such target cell be
labeled as '0' by default).
:param target: Name of target sample
:param weight_frac: Required minimum fraction of weight in a cluster
to be classified into that cluster
:param min_degree: Minimum degree of the target node
:param min_weight: Minimum edge weight. Edges with less weight
than min_weight will be ignored but will still
contribute to total weight.
:param cluster_dict: Cluster labels for each reference cell. If not
provided then the stored cluster information is
used.
:param na_label: Label for cells that failed to get classified
into any cluster
:param ret_counts: It True, then returns number of target cells
classified to each cluster, else returns predicted
cluster for each target cell
:return: Dictionary. Keys are target cell names and value their
predicted custer if re_Count is False. Otherwise,
keys are cluster labels and values are the number
of target cells classified to that cluster
"""
import operator
if cluster_dict is None:
self._validate_clusters()
cluster_dict = self.clusters
clusts = set(cluster_dict.values())
classified_clusters = []
degrees = dict(self.degree)
for i in self.targetNodes[target]:
if i not in degrees:
continue
if degrees[i] < min_degree:
classified_clusters.append(na_label)
continue
clust_weights = {x: 0 for x in clusts}
tot_weight = 0
for j in self.edges(i, data=True):
if j[2]['weight'] > min_weight and j[1] in cluster_dict:
clust_weights[cluster_dict[j[1]]] += j[2]['weight']
tot_weight += j[2]['weight'] # even low weight is added to
# total weight to allow poor mappings to be penalized.
max_clust = max(clust_weights.items(),
key=operator.itemgetter(1))[0]
if clust_weights[max_clust] > (weight_frac * tot_weight):
classified_clusters.append(max_clust)
else:
classified_clusters.append(na_label)
if ret_counts:
counts = Counter(classified_clusters)
if na_label not in counts:
counts[na_label] = 0
for i in set(cluster_dict.values()):
if i not in counts:
counts[i] = 0
return counts
else:
return dict(zip(self.targetNodes[target], classified_clusters))
def get_mapping_specificity(self, target_name: str,
fill_na: bool = True) -> Dict[str, float]:
"""
Calculates the mapping specificity of target nodes. Mapping
specificity of a target node is calculated as the mean of shortest
path lengths between all pairs of mapped reference nodes.
:param target_name: Name of target sample
:param fill_na: if True, then nan values will be replaced with
largest value (default: True)
:return: Dictionary with target node names as keys and mapping
specificity as values
"""
path_lengths = {}
# TODO: FIX FOR MISSING TARGET NODES IF NODES ARE REMOVED MANUALLY
for node in tqdm(self.targetNodes[target_name]):
spls = []
targets = [x[1] for x in self.edges(node)]
nt = len(targets)
for i in range(nt):
for j in range(nt):
if i < j:
spls.append(nx.algorithms.shortest_path_length(
self.refG, source=targets[i], target=targets[j]))
path_lengths[node] = float(np.mean(spls))
if fill_na:
max_val = max(path_lengths.values())
return pd.Series(path_lengths).fillna(max_val).to_dict()
else:
return path_lengths
def get_ref_specificity(self, target: str, target_values: Dict[str, float],
incl_unmapped: bool = False) -> Dict[str, float]:
"""
Calculates the average mapping specificity of all target nodes that
mapped to a given a reference node. Requires that the mapping
specificity of target nodes is already calculated.
:param target: Name of target sample
:param target_values: Mapping specificity values of target nodes
:param incl_unmapped: If True, then includes unmapped reference
nodes in the dictionary with value set at 0
Default: False)
:return: Dictionary with reference node names as keys and values as
mean mapping specificity of their mapped target nodes
"""
back_prom = {}
for i in self.refNodes:
back_prom[i] = []
for j in self.edges(i, data=True):
if j[1][-len(target):] == target:
back_prom[i].append(target_values[j[1]])
new_back_prom = {}
for i in back_prom:
if len(back_prom[i]) > 1:
new_back_prom[i] = np.mean(back_prom[i])
elif len(back_prom[i]) == 1:
new_back_prom[i] = back_prom[i][0]
else:
if incl_unmapped:
new_back_prom[i] = 0
return new_back_prom
def get_mapped_cells(self, target: str, ref_cells: str,
remove_suffix: bool = True) -> List[str]:
"""
Get target cells that map to a given list of reference cells.
:param target: Name of target sample
:param ref_cells: List of reference cell names
:param remove_suffix: If True then removes target name suffix from
end of node name
:return: List of target cell names
"""
if target not in self.targetNames:
raise ValueError('ERROR: %s not present in graph!' % target)
target_cells = {x: None for x in self.targetNodes[target]}
mapped_cells = []
for i in ref_cells:
if remove_suffix:
i = i + '_' + self.refName
for j in self.edges(i):
if j[1] in target_cells:
mapped_cells.append(j[1])
mapped_cells = list(set(mapped_cells))
if remove_suffix:
return [x.rsplit('_', 1)[0] for x in mapped_cells]
else:
return mapped_cells
def get_random_nodes(self, n: int) -> List[str]:
"""
Get random list of nodes from reference graph.
:param n: Number of nodes to return
:return: A list of reference nodes
"""
all_nodes = list(self.refNodes)
if n >= len(all_nodes):
raise ValueError('ERROR: n should be lower than total nodes in '
'reference graph')
random_nodes = []
for i in range(n):
x = np.random.choice(all_nodes)
random_nodes.append(x)
all_nodes.remove(x)
return sorted(random_nodes)
def calc_contiguous_spl(self, nodes: List[str]) -> float:
"""
Calculates mean of shortest path lengths between subsequent nodes
provided in the input list in reference graph.
:param nodes: List of nodes from reference sample
:return: Mean shortest path length
"""
spl = []
for i in range(len(nodes) - 1):
spl.append(nx.shortest_path_length(self.refG, nodes[i],
nodes[i + 1]))
return float( | np.mean(spl) | numpy.mean |
import os
import h5py
import numpy as np
from hdmf import Container, Data
from hdmf.backends.hdf5 import H5DataIO
from hdmf.build import (GroupBuilder, DatasetBuilder, ObjectMapper, BuildManager, TypeMap, ReferenceBuilder,
ReferenceTargetNotBuiltError)
from hdmf.data_utils import DataChunkIterator
from hdmf.spec import (AttributeSpec, DatasetSpec, DtypeSpec, GroupSpec, SpecCatalog, SpecNamespace, NamespaceCatalog,
RefSpec)
from hdmf.spec.spec import ZERO_OR_MANY
from hdmf.testing import TestCase
from hdmf.utils import docval, getargs, call_docval_func
from tests.unit.utils import Foo, CORE_NAMESPACE
class Baz(Data):
@docval({'name': 'name', 'type': str, 'doc': 'the name of this Baz'},
{'name': 'data', 'type': (list, h5py.Dataset, 'data', 'array_data'), 'doc': 'some data'},
{'name': 'baz_attr', 'type': str, 'doc': 'an attribute'})
def __init__(self, **kwargs):
name, data, baz_attr = getargs('name', 'data', 'baz_attr', kwargs)
super().__init__(name=name, data=data)
self.__baz_attr = baz_attr
@property
def baz_attr(self):
return self.__baz_attr
class BazHolder(Container):
@docval({'name': 'name', 'type': str, 'doc': 'the name of this Baz'},
{'name': 'bazs', 'type': list, 'doc': 'some Baz data', 'default': list()})
def __init__(self, **kwargs):
name, bazs = getargs('name', 'bazs', kwargs)
super().__init__(name=name)
self.__bazs = {b.name: b for b in bazs} # note: collections of groups are unordered in HDF5
for b in bazs:
b.parent = self
@property
def bazs(self):
return self.__bazs
class BazSpecMixin:
def setUp(self):
self.setUpBazSpec()
self.spec_catalog = SpecCatalog()
self.spec_catalog.register_spec(self.baz_spec, 'test.yaml')
self.namespace = SpecNamespace('a test namespace', CORE_NAMESPACE, [{'source': 'test.yaml'}],
version='0.1.0',
catalog=self.spec_catalog)
self.namespace_catalog = NamespaceCatalog()
self.namespace_catalog.add_namespace(CORE_NAMESPACE, self.namespace)
self.type_map = TypeMap(self.namespace_catalog)
self.type_map.register_container_type(CORE_NAMESPACE, 'Baz', Baz)
self.type_map.register_map(Baz, ObjectMapper)
self.manager = BuildManager(self.type_map)
self.mapper = ObjectMapper(self.baz_spec)
def setUpBazSpec(self):
raise NotImplementedError('Test must implement this method.')
class TestDataMap(BazSpecMixin, TestCase):
def setUp(self):
self.setUpBazSpec()
self.spec_catalog = SpecCatalog()
self.spec_catalog.register_spec(self.baz_spec, 'test.yaml')
self.namespace = SpecNamespace('a test namespace', CORE_NAMESPACE, [{'source': 'test.yaml'}],
version='0.1.0',
catalog=self.spec_catalog)
self.namespace_catalog = NamespaceCatalog()
self.namespace_catalog.add_namespace(CORE_NAMESPACE, self.namespace)
self.type_map = TypeMap(self.namespace_catalog)
self.type_map.register_container_type(CORE_NAMESPACE, 'Baz', Baz)
self.type_map.register_map(Baz, ObjectMapper)
self.manager = BuildManager(self.type_map)
self.mapper = ObjectMapper(self.baz_spec)
def setUpBazSpec(self):
self.baz_spec = DatasetSpec(
doc='an Baz type',
dtype='int',
name='MyBaz',
data_type_def='Baz',
shape=[None],
attributes=[AttributeSpec('baz_attr', 'an example string attribute', 'text')]
)
def test_build(self):
''' Test default mapping functionality when no attributes are nested '''
container = Baz('MyBaz', list(range(10)), 'abcdefghijklmnopqrstuvwxyz')
builder = self.mapper.build(container, self.manager)
expected = DatasetBuilder('MyBaz', list(range(10)), attributes={'baz_attr': 'abcdefghijklmnopqrstuvwxyz'})
self.assertBuilderEqual(builder, expected)
def test_build_empty_data(self):
"""Test building of a Data object with empty data."""
baz_inc_spec = DatasetSpec(doc='doc', data_type_inc='Baz', quantity=ZERO_OR_MANY)
baz_holder_spec = GroupSpec(doc='doc', data_type_def='BazHolder', datasets=[baz_inc_spec])
self.spec_catalog.register_spec(baz_holder_spec, 'test.yaml')
self.type_map.register_container_type(CORE_NAMESPACE, 'BazHolder', BazHolder)
self.holder_mapper = ObjectMapper(baz_holder_spec)
baz = Baz('MyBaz', [], 'abcdefghijklmnopqrstuvwxyz')
holder = BazHolder('holder', [baz])
builder = self.holder_mapper.build(holder, self.manager)
expected = GroupBuilder(
name='holder',
datasets=[DatasetBuilder(
name='MyBaz',
data=[],
attributes={'baz_attr': 'abcdefghijklmnopqrstuvwxyz',
'data_type': 'Baz',
'namespace': 'test_core',
'object_id': baz.object_id}
)]
)
self.assertBuilderEqual(builder, expected)
def test_append(self):
with h5py.File('test.h5', 'w') as file:
test_ds = file.create_dataset('test_ds', data=[1, 2, 3], chunks=True, maxshape=(None,))
container = Baz('MyBaz', test_ds, 'abcdefghijklmnopqrstuvwxyz')
container.append(4)
| np.testing.assert_array_equal(container[:], [1, 2, 3, 4]) | numpy.testing.assert_array_equal |
import numpy
from hmmlearn.base import _BaseHMM
from hmmlearn.hmm import _check_and_set_gaussian_n_features
from hmmlearn import _utils
class FullPTHMM(_BaseHMM):
r"""Hidden Markov Model for Particle Tracking.
Args:
n_components (int): Number of states.
min_var (float, optional): Floor on the variance to prevent overfitting.
Defaults to 1e-5.
startprob_prior (array, optional):
shape (n_components, ). Parameters of the Dirichlet prior distribution for
:attr:`startprob_`.
transmat_prior (array, optional):
shape (n_components, n_components). Parameters of the Dirichlet prior distribution for each row
of the transition probabilities :attr:`transmat_`.
algorithm (string, optional):
Decoder algorithm. Must be one of "viterbi" or`"map".
Defaults to "viterbi".
random_state (RandomState or an int seed, optional):
A random number generator instance.
n_iter (int, optional): Maximum number of iterations to perform.
tol (float, optional):
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose (bool, optional):
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params (string, optional):
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
init_params (string, optional):
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
Attributes:
monitor\_ (ConvergenceMonitor):
Monitor object used to check the convergence of EM.
startprob\_ (array): shape (n_components, ).
Initial state occupation distribution.
transmat\_ (array): shape (n_components, n_components).
Matrix of transition probabilities between states.
diffusivities\_ (array): shape (n_components, 1).
Diffusion constants for each state.
intensity_means\_ (array): shape (n_components, 1).
Mean parameters of intensity distribution for each state.
intensity_vars\_ (array): shape (n_components, 1).
Variance parameters of intensity distribution for each state.
"""
def __init__(self, n_components=1,
min_var=1e-5,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params="stdmv", init_params="stdmv"):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=tol, params=params, verbose=verbose,
init_params=init_params)
self.min_var = min_var
def _check(self):
super()._check()
self.diffusivities_ = numpy.asarray(self.diffusivities_)
assert self.diffusivities_.shape == (self.n_components, 1)
self.intensity_means_ = numpy.asarray(self.intensity_means_)
assert self.intensity_means_.shape == (self.n_components, 1)
self.intensity_vars_ = | numpy.asarray(self.intensity_vars_) | numpy.asarray |
# -*- coding: utf-8 -*-
import numpy as np
from neural import activations, costs
__all__ = ["NeuralNetwork", "activations", "costs"]
class NeuralNetwork(object):
"""A Feed-forward Neural Network"""
def __init__(self, sizes, activations, cost=costs.quad_cost, stddev=1.0):
if not hasattr(activations, '__iter__'):
activations = [activations] * (len(sizes) - 1)
self.activations = activations
self.cost = cost
self.weights = []
for i in range(1, len(sizes) - 1):
r = 2 * np.random.normal(
size=(sizes[i - 1] + 1, sizes[i] + 1), scale=stddev
) - 1
self.weights.append(r)
r = 2 * np.random.normal(
size=(sizes[-2] + 1, sizes[-1]), scale=stddev
) - 1
self.weights.append(r)
self.layer_count = len(self.weights)
def train(
self, x, y, alpha=0.2, epochs=100,
batch_size=1, momentum=0, use_softcross=False,
after_epoch=None
):
"""Train the network on a set of input/output values."""
# TODO: make biases work properly
# Reshape target data
prev_adj = [0] * self.layer_count
ys = np.array(y)
while ys.ndim < 2:
ys = np.expand_dims(ys, -1)
# Add bias to input data
xs = np.array(x, ndmin=2)
xs = np.concatenate((
xs,
np.ones((xs.shape[0], 1))
), axis=1)
num_cases = xs.shape[0]
# Chunk cases into batches
if batch_size < 1:
# Full-batch learning
batch_size = ys.shape[0]
i_batches = | np.array_split(xs, num_cases // batch_size) | numpy.array_split |
import os
import time
from typing import Union
from typing import Optional
from typing import Tuple
from gym import Env
import numpy as np
import cv2 as cv
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from dqn.agents import DQNAgent
from dqn.environments import GameWrapper, game_wrapper
class Agent():
def __init__(
self,
env: Union[str, Env],
input_shape: Tuple[int]=(84, 84),
batch_size: int=4,
history_length: int=4,
learning_rate: float=0.00001,
grad_descent_freq: int=4,
target_update_freq: int=1000,
max_frames: int=30000000,
max_episode_length: int=18000,
frames_between_eval: int=100000,
eval_length: int=10000,
gamma: float=0.99, # discount factor
replay_buffer_start_size: int=50000,
replay_buffer_size: int=100000,
no_op_steps: int=20,
save_path: Optional[str]='agent-checkpoints',
load_replay_buffer: bool=True,
use_tensorboard: bool=True,
tensorboard_path: Optional[str]='trainer-tensorboard',
priority_scale: float=0.7,
clip_reward: bool=True,
use_per: bool=False) -> None:
self.max_frames = max_frames
self.grad_descent_freq = grad_descent_freq
self.target_update_freq = target_update_freq
self.max_episode_length = max_episode_length
self.frames_between_eval = frames_between_eval
self.eval_length = eval_length
self.gamma = gamma
self.priority_scale = priority_scale
self.clip_reward = clip_reward
self.replay_buffer_start_size = replay_buffer_start_size
self.batch_size = batch_size
self.load_replay_buffer = load_replay_buffer
self.use_tensorboard = use_tensorboard
# self.tensorboard_path = tensorboard_path
self.save_path = save_path
self.game_wrapper = GameWrapper(env, no_op_steps=no_op_steps)
self.agent = DQNAgent(
n_actions=self.game_wrapper.env.action_space.n,
input_shape=input_shape,
batch_size=batch_size,
history_length=history_length,
learning_rate=learning_rate,
max_frames=max_frames,
replay_buffer_size=replay_buffer_size,
replay_buffer_start_size=replay_buffer_start_size,
use_per=use_per)
self.logger = tf.summary.create_file_writer(tensorboard_path)
self.frame_number: int = 0
self.rewards: list = []
self.loss_list: list = []
self._is_exist_load_saved_agent_from(self.save_path)
def load(self, path: str) -> None:
print(f'Loading from {path}')
meta = self.agent.load(path, self.load_replay_buffer)
self.frame_number = meta['frame_number']
self.rewards = meta['rewards']
self.loss_list = meta['loss_list']
print(f'Loaded from {path}')
def run(self) -> None:
try:
with self.logger.as_default():
while self.frame_number < self.max_frames: # Why this need to be a while
eval_interval = 0
while eval_interval < self.frames_between_eval:
start_time = time.time()
self.game_wrapper.reset()
life_lost: bool = True
episode_reward_sum: int = 0
for _ in range(self.max_episode_length):
action = self.agent.get_action(self.game_wrapper.state, frame_number=self.frame_number)
processed_frame, reward, terminal, life_lost, frame = self.game_wrapper.step(action)
# Show game
cv.imshow('Game', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
raise KeyboardInterrupt
self.frame_number += 1
eval_interval += 1
episode_reward_sum += reward
self.agent.add_experience(
action=action,
frame=processed_frame[:, :, 0],
reward=reward,
clip_reward=self.clip_reward,
terminal=terminal)
if self.frame_number % self.grad_descent_freq == 0 and self.frame_number > self.replay_buffer_start_size:
loss, _ = self.agent.learn(
gamma=self.gamma,
frame_number=self.frame_number,
priority_scale=self.priority_scale)
self.loss_list.append(loss)
if self.frame_number % self.target_update_freq == 0 and self.frame_number > self.replay_buffer_start_size:
self.agent.update_target_network()
if terminal:
terminal = False
break
self.rewards.append(episode_reward_sum)
if len(self.rewards) % 10 == 0:
if self.use_tensorboard:
tf.summary.scalar('Reward', np.mean(self.rewards[-10:]), self.frame_number)
tf.summary.scalar('Loss', np.mean(self.loss_list[-100:]), self.frame_number)
self.logger.flush()
print(f'Game number: {str(len(self.rewards)).zfill(6)} Frame number: {str(self.frame_number).zfill(8)} Average reward: { | np.mean(self.rewards[-10:]) | numpy.mean |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import numpy.testing as npt
import pymc3
from pymc3.aesaraf import floatX
from pymc3.step_methods.hmc.base_hmc import BaseHMC
from pymc3.tests import models
logger = logging.getLogger("pymc3")
def test_leapfrog_reversible():
n = 3
| np.random.seed(42) | numpy.random.seed |
import os
import numpy as np
import pandas as pd
import codecs, json
import math
def cal_varn_4 (data_num) :
mat = np.cov(data_num.T)
mat = np.diag(np.diag(mat))
return np.divide(mat,100)
def generate_num_neighbors_4 (inst_num, n, varn):
return np.random.multivariate_normal(inst_num,varn,n)
def generate_categ_neighbors_4 (inst_categ,n ,mat_nb_categ,special) :
rs = np.random.RandomState()
p_categ = np.size(inst_categ)
categ_neigh = np.zeros(n*p_categ).reshape(n,p_categ)
for j in range(0,p_categ) :
med = int(n/2)
if j in special :
categ_neigh[:med,j] = inst_categ[j] - 1
else :
categ_neigh[:med,j] = inst_categ[j]
categ_neigh[med:,j] = rs.choice(mat_nb_categ[j], size=(1, med))[0]
return categ_neigh
def generate_all_neighbors_4(data, num_indices, categ_indices, mat_nb_categ, n_neigh,special) :
list_neigh = []
n = | np.size(data,0) | numpy.size |
"""This module handles Planck's law of blackbody radiation.
**Global Variables**
* ``pysynphot.planck.H`` - Planck's constant in CGS units.
* ``pysynphot.planck.HS`` - Planck's constant in SI units.
* ``pysynphot.planck.C`` - Speed of light in SI units.
* ``pysynphot.planck.K`` - Boltzmann constant in SI units.
These are used in calculations to prevent floating point overflow, as defined
in IRAF STSDAS SYNPHOT ``bbfunc`` task:
* ``pysynphot.planck.LOWER``
* ``pysynphot.planck.UPPER``
These are constants used in :func:`llam_SI`:
* ``pysynphot.planck.C1`` - Power :math:`\\times` unit area per steradian.
* ``pysynphot.planck.C2``
This is used in :func:`bb_photlam_arcsec`:
* ``pysynphot.planck.F`` - Factor for conversion from
:math:`\\textnormal{m}^{2} \\; \\textnormal{sr}^{-1} \\; \\textnormal{m}^{-1}` to
:math:`\\textnormal{cm}^{2} \\; \\textnormal{arcsec}^{-2} \\; \\AA^{-1}`.
"""
from __future__ import division
import math
import numpy as N
H = 6.6262E-27 # Planck's constant in cgs units
HS = 6.6262E-34 # Planck's constant in standard units
C = 2.997925E+8 # speed of light in standard units
K = 1.38062E-23 # Boltzmann constant in standard units
C1 = 2.0 * HS * C * C # Power * unit area / steradian
C2 = HS * C / K
# Anand's original comments for the F factor:
#
# >>> af = 0.01 * 0.01 # per m^2 --> per cm^2
# >>> af
# 0.0001
# >>> sf = 206265.0 * 206265.0
# >>> sf = 1/sf
# >>> sf # per sr --> per sqarcsec
# 2.3504386381829067e-11
# >>> af * sf
# 2.3504386381829069e-15
# >>> af * sf * 1.0e-10 # per m --> per Angstrom
# 2.3504386381829069e-25
#
F = 2.3504386381829069E-25 # convert from m^2/steradian/m to
# cm^2/sq.arcsec/A (see below)
LOWER = 1.0E-4 # taken from synphot's bbfunc.
UPPER = 85.
def bbfunc(wave, temperature):
"""Evaluate Planck's law in ``photlam`` (per steradian).
.. note::
Adapted from IRAF STSDAS SYNPHOT ``bbfunc`` task.
Parameters
----------
wave : array_like
Wavelength values in Angstrom.
temperature : float
Blackbody temperature in Kelvin.
Returns
-------
result : array_like
Blackbody radiation in ``photlam`` per steradian.
"""
x = wave * temperature
mask = N.where(x > 0.0, 1, 0)
x = | N.where(mask==1, 1.43883E8 / x, 0.0) | numpy.where |
import argparse
from collections import deque
import numpy as np
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.python import keras as K
from PIL import Image
import gym
import gym_ple
from fn_framework import FNAgent, Trainer, Observer
tf.compat.v1.disable_eager_execution()
class ActorCriticAgent(FNAgent):
def __init__(self, actions):
# ActorCriticAgent uses self policy (doesn't use epsilon).
super().__init__(epsilon=0.0, actions=actions)
self._updater = None
@classmethod
def load(cls, env, model_path):
actions = list(range(env.action_space.n))
agent = cls(actions)
agent.model = K.models.load_model(model_path, custom_objects={
"SampleLayer": SampleLayer})
agent.initialized = True
return agent
def initialize(self, experiences, optimizer):
feature_shape = experiences[0].s.shape
self.make_model(feature_shape)
self.set_updater(optimizer)
self.initialized = True
print("Done initialization. From now, begin training!")
def make_model(self, feature_shape):
normal = K.initializers.glorot_normal()
model = K.Sequential()
model.add(K.layers.Conv2D(
32, kernel_size=8, strides=4, padding="same",
input_shape=feature_shape,
kernel_initializer=normal, activation="relu"))
model.add(K.layers.Conv2D(
64, kernel_size=4, strides=2, padding="same",
kernel_initializer=normal, activation="relu"))
model.add(K.layers.Conv2D(
64, kernel_size=3, strides=1, padding="same",
kernel_initializer=normal, activation="relu"))
model.add(K.layers.Flatten())
model.add(K.layers.Dense(256, kernel_initializer=normal,
activation="relu"))
actor_layer = K.layers.Dense(len(self.actions),
kernel_initializer=normal)
action_evals = actor_layer(model.output)
actions = SampleLayer()(action_evals)
critic_layer = K.layers.Dense(1, kernel_initializer=normal)
values = critic_layer(model.output)
self.model = K.Model(inputs=model.input,
outputs=[actions, action_evals, values])
def set_updater(self, optimizer,
value_loss_weight=1.0, entropy_weight=0.1):
actions = tf.compat.v1.placeholder(shape=(None), dtype="int32")
values = tf.compat.v1.placeholder(shape=(None), dtype="float32")
_, action_evals, estimateds = self.model.output
neg_logs = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=action_evals, labels=actions)
# tf.stop_gradient: Prevent policy_loss influences critic_layer.
advantages = values - tf.stop_gradient(estimateds)
policy_loss = tf.reduce_mean(neg_logs * advantages)
value_loss = tf.keras.losses.MeanSquaredError()(values, estimateds)
action_entropy = tf.reduce_mean(self.categorical_entropy(action_evals))
loss = policy_loss + value_loss_weight * value_loss
loss -= entropy_weight * action_entropy
updates = optimizer.get_updates(loss=loss,
params=self.model.trainable_weights)
self._updater = K.backend.function(
inputs=[self.model.input,
actions, values],
outputs=[loss,
policy_loss,
value_loss,
tf.reduce_mean(neg_logs),
tf.reduce_mean(advantages),
action_entropy],
updates=updates)
def categorical_entropy(self, logits):
"""
From OpenAI baseline implementation.
https://github.com/openai/baselines/blob/master/baselines/common/distributions.py#L192
"""
a0 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=-1)
def policy(self, s):
if not self.initialized:
return np.random.randint(len(self.actions))
else:
action, action_evals, values = self.model.predict(np.array([s]))
return action[0]
def estimate(self, s):
action, action_evals, values = self.model.predict(np.array([s]))
return values[0][0]
def update(self, states, actions, rewards):
return self._updater([states, actions, rewards])
class SampleLayer(K.layers.Layer):
def __init__(self, **kwargs):
self.output_dim = 1 # sample one action from evaluations
super(SampleLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(SampleLayer, self).build(input_shape)
def call(self, x):
noise = tf.random.uniform(tf.shape(x))
return tf.argmax(x - tf.math.log(-tf.math.log(noise)), axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
class ActorCriticAgentTest(ActorCriticAgent):
def make_model(self, feature_shape):
normal = K.initializers.glorot_normal()
model = K.Sequential()
model.add(K.layers.Dense(10, input_shape=feature_shape,
kernel_initializer=normal, activation="relu"))
model.add(K.layers.Dense(10, kernel_initializer=normal,
activation="relu"))
actor_layer = K.layers.Dense(len(self.actions),
kernel_initializer=normal)
action_evals = actor_layer(model.output)
actions = SampleLayer()(action_evals)
critic_layer = K.layers.Dense(1, kernel_initializer=normal)
values = critic_layer(model.output)
self.model = K.Model(inputs=model.input,
outputs=[actions, action_evals, values])
class CatcherObserver(Observer):
def __init__(self, env, width, height, frame_count):
super().__init__(env)
self.width = width
self.height = height
self.frame_count = frame_count
self._frames = deque(maxlen=frame_count)
def transform(self, state):
grayed = Image.fromarray(state).convert("L")
resized = grayed.resize((self.width, self.height))
resized = np.array(resized).astype("float")
normalized = resized / 255.0 # scale to 0~1
if len(self._frames) == 0:
for i in range(self.frame_count):
self._frames.append(normalized)
else:
self._frames.append(normalized)
feature = | np.array(self._frames) | numpy.array |
#!/usr/bin/env python
#
# test_fsl_ents.py -
#
# Author: <NAME> <<EMAIL>>
#
import sys
import numpy as np
import pytest
import fsl.utils.tempdir as tempdir
import fsl.scripts.fsl_ents as extn
def test_genComponentIndexList():
with tempdir.tempdir():
# sequence of 1-indexed integers/file paths
icomps = [1, 5, 28, 12, 42, 54]
fcomps1 = [1, 4, 6, 3, 7]
fcomps2 = [12, 42, 31, 1, 4, 8]
with open('comps1.txt', 'wt') as f:
f.write(','.join([str(l) for l in fcomps1]))
with open('comps2.txt', 'wt') as f:
f.write(','.join([str(l) for l in fcomps2]))
ncomps = 60
comps = icomps + ['comps1.txt', 'comps2.txt']
expcomps = list(sorted(set(icomps + fcomps1 + fcomps2)))
expcomps = [c - 1 for c in expcomps]
assert extn.genComponentIndexList(comps, ncomps) == expcomps
with pytest.raises(ValueError):
extn.genComponentIndexList(comps + [-1], 60)
with pytest.raises(ValueError):
extn.genComponentIndexList(comps, 40)
def test_loadConfoundFiles():
with tempdir.tempdir():
npts = 50
confs = [
np.random.randint(1, 100, (50, 10)),
np.random.randint(1, 100, (50, 1)),
np.random.randint(1, 100, (50, 5))]
badconfs = [
np.random.randint(1, 100, (40, 10)),
np.random.randint(1, 100, (60, 10))]
expected = np.empty((50, 16), dtype=np.float64)
expected[:, :] = np.nan
expected[:, :10] = confs[0]
expected[:, 10:11] = confs[1]
expected[:, 11:16] = confs[2]
conffiles = []
for i, c in enumerate(confs):
fname = 'conf{}.txt'.format(i)
conffiles.append(fname)
np.savetxt(fname, c)
result = extn.loadConfoundFiles(conffiles, npts)
amask = ~np.isnan(expected)
assert np.all(~np.isnan(result) == amask)
assert np.all(result[amask] == expected[amask])
assert np.all(result[amask] == expected[amask])
badconfs = [
np.random.randint(1, 100, (40, 10)),
np.random.randint(1, 100, (60, 10))]
conffiles = []
for i, c in enumerate(badconfs):
fname = 'conf{}.txt'.format(i)
conffiles.append(fname)
np.savetxt(fname, c)
with pytest.raises(ValueError):
extn.loadConfoundFiles(conffiles, npts)
def test_fsl_ents():
with tempdir.tempdir() as td:
# (npts, ncomps)
melmix = np.random.randint(1, 100, (100, 20))
np.savetxt('melodic_mix', melmix)
sys.argv = ['fsl_ents', td] + '-o out.txt 1 2 3'.split()
extn.main()
assert np.all( | np.loadtxt('out.txt') | numpy.loadtxt |
import itasca
import math
import numpy as np
itasca.command("""
set echo off
call AE_postprocess2D.dat
set echo on
""")
def AE_draw_ball(r_min,r_max):
#绘制事件的效果图
itasca.fish.set('r_min',r_min)
itasca.fish.set('r_max',r_max)
itasca.fish.call_function('AE_draw_ball')
print('Finsh draw_ball')
def cal_ratio_R():
#计算事件矩张量R值
hit_num = itasca.fish.get('hit_num')
if hit_num > 0:
for num_h in range(1,hit_num+1):
itasca.fish.set('num_h',num_h)
itasca.fish.call_function('copy_data')
m1 = itasca.fish.get('m_xx')
m2 = itasca.fish.get('m_xy')
m3 = itasca.fish.get('m_yx')
m4 = itasca.fish.get('m_yy')
m = [[m1, m2],[m3,m4]]
lis = np.array(m)
instead=np.linalg.eig(lis)
m_1 = instead[0][0]
m_2 = instead[0][1]
m_3 = 0 #2D模拟,m_3为0
if m_1 == 0 and m_2 == 0 and m_3 == 0:
print('The eigenvalue is equal to zero. Pass!')
elif np.isnan(m_1) or np.isnan(m_2) or np.isnan(m_3):
print('The eigenvalue is equal to nan. Pass!')
else:
M_0_new = math.sqrt((math.pow(m_1,2) + math.pow(m_2,2) + math.pow(m_3,2))/2)
M_0 = itasca.fish.get('M_0')
M_w = itasca.fish.get('M_w')
if M_0 <= M_0_new:
M_0 = M_0_new
M_w = 2*math.log10(M_0)/3 - 6
tr_M = m_1 + m_2 #+ m_3
sum_m = abs(m_1-tr_M/2) + abs(m_2-tr_M/2) #+ abs(m_3-tr_M/3)
value_R = tr_M*100/(abs(tr_M)+sum_m)
if isinstance(m_1,complex) or isinstance(m_2,complex):
print('The m_1 or m_2 is complex. Pass!')
itasca.fish.set('m_1',0)
itasca.fish.set('m_2',0)
else:
itasca.fish.set('m_1',m_1)
itasca.fish.set('m_2',m_2)
itasca.fish.set('M_0',M_0)
itasca.fish.set('M_w',M_w)
if isinstance(value_R,complex):
print('The value_R is complex. Pass!')
itasca.fish.set('value_R',200)
else:
itasca.fish.set('value_R',value_R)
itasca.fish.call_function('assign_data')
def draw_ratio_R(end_time,r_R):
#绘事件矩张量R值形状图
itasca.fish.call_function('draw_R_main')
hit_num = itasca.fish.get('hit_num')
itasca.fish.set('r_R',r_R)
for num_h in range(1,hit_num+1):
itasca.fish.set('num_h',num_h)
itasca.fish.call_function('copy_data')
hit_begin = itasca.fish.get('hit_begin')
if end_time == 0 or hit_begin <= end_time: #设置截止时间s,0绘制全部结果
itasca.fish.call_function('draw_ratio_R')
print('Finsh draw_ratio_R')
def draw_tensor():
#绘制事件矩张量图
hit_num = itasca.fish.get('hit_num')
itasca.fish.call_function('delete_vector')
for num_t in range(1,hit_num+1):
itasca.fish.set('num_t',num_t)
itasca.fish.call_function('copy_data')
#t_now = itasca.fish.get('t_now')
#AE_long = itasca.fish.get('AE_long')
#if t_now > AE_long:
m1 = itasca.fish.get('m_xx')
m2 = itasca.fish.get('m_xy')
m3 = itasca.fish.get('m_yx')
m4 = itasca.fish.get('m_yy')
m = [[m1, m2],[m3,m4]]
lis = | np.array(m) | numpy.array |
from itertools import islice
import numpy as np
from scrtbp.system import coeffs
from scrtbp.taylor import integrators
def test_fixed_stepper():
mu = 0.01215
taylor_params = coeffs.generate_taylor_coeffs(mu)
period = 21.1810525829419
n_points = 70
step = period / (n_points - 1)
order = 20
solve = integrators.generate_fixed_step_integrator(taylor_params, step, order)
init_cond = np.array(
[
0.440148176542848,
0.783403421942971,
0.0,
-0.905419824338076,
0.540413382924902,
0.0,
]
)
points = solve(init_cond, n_points)
assert np.allclose(points[0], points[-1], 0.0, 1e-14)
def test_adaptive_dense_integration():
mu = 0.01215
taylor_params = coeffs.generate_taylor_coeffs(mu)
order = 20
solve = integrators.generate_adaptive_dense_integrator(
taylor_params, order, tol_abs=1e-16, tol_rel=1e-16
)
init_cond = np.array(
[
0.440148176542848,
0.783403421942971,
0.0,
-0.905419824338076,
0.540413382924902,
0.0,
]
)
period = 21.1810525829419
t = np.linspace(0.0, period, 1000, endpoint=True)
points = solve(init_cond, t)
assert np.allclose(points[0], points[-1], 0.0, 1e-14)
def test_adaptive_fixed_integration():
mu = 0.01215
taylor_params = coeffs.generate_taylor_coeffs(mu)
order = 20
def exit_condition(state):
y = state[1]
return y < 0.2
solve = integrators.generate_fixed_step_adaptive_integrator(
taylor_params,
order,
tol_abs=1e-16,
tol_rel=1e-16,
py_exit_condition=exit_condition,
)
init_cond = np.array(
[0.39785, 0.7101408311032396, 0.0, -0.9860206223973105, 0.5715886728443681, 0.0]
)
points = solve(init_cond, 20, 5.0)
assert points.shape[0] == 20
last_state = np.array(
[
-0.13265194931562035,
1.0531334795398335,
0.0,
-0.9420753032804786,
-0.08777226445063985,
0.0,
]
)
assert | np.allclose(last_state, points[-1], 0.0, 1e-14) | numpy.allclose |
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
from scipy.spatial import cKDTree
from models import BoundingBox, Frame
from os.path import join, isfile
from os import listdir
from oxt import load_oxts_lite_data, oxts2pose
from frame_handler import FrameHandler
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import time
class BoundingBoxPredictor():
def __init__(self, frame_handler):
self.n_segs = (1,1)
self.n_iter=5
self.n_lpr=500
self.th_seeds=.4
self.th_dist=.2
self.frame_handler = frame_handler
self.oxt_path = "oxts/"
self.oxts = {drive: load_oxts_lite_data(join(FrameHandler.DATASET_DIR, drive), self.frame_handler.drives[drive])
for drive in self.frame_handler.drives.keys()}
self.poses = {drive: oxts2pose(self.oxts[drive]) for drive in self.oxts.keys()}
def transform_coords(self, fname, x, inv=False):
if x.size == 2:
x = np.append(x, [0, 1])
if x.size == 3:
x = np.append(x, [1])
idx = self.frame_handler.frame_names.index(fname)
transform = self.poses[idx]
if inv:
transform = np.linalg.inv(transform)
return transform @ x
def get_velocities(self, prev_frame, cur_frame, ref_fname):
bounding_boxes = sorted(cur_frame.bounding_boxes,
key=lambda box: box.box_id)
velocities = {}
prev_frame_bounding_boxes = {box.box_id:box for box in prev_frame.bounding_boxes}
for i, box in enumerate(bounding_boxes):
box_id = box.box_id
print(box_id)
cur_center = box.center
if box_id in prev_frame_bounding_boxes:
prev_center = prev_frame_bounding_boxes[box_id].center
cur_center_corr = self.transform_coords(cur_frame.fname, cur_center)
prev_center_corr = self.transform_coords(prev_frame.fname, prev_center)
velocities[box_id] = self.transform_coords(ref_fname,
cur_center - prev_center,
inv=True)[:2]
return velocities
def predict_next_frame_bounding_boxes(self, frame):
drivename, fname = frame.fname.split('.')[0].split("/")
print(self.frame_handler.drives[drivename])
idx = self.frame_handler.drives[drivename].index(fname)
next_fname = self.frame_handler.drives[drivename][idx+1]
pc = self.frame_handler.get_pointcloud(drivename, fname, dtype=float, ground_removed=True)
next_pc = self.frame_handler.get_pointcloud(drivename, next_fname, dtype=float, ground_removed=True)
print(fname)
print([box.box_id for box in frame.bounding_boxes])
bounding_boxes = sorted(frame.bounding_boxes,
key=lambda box:box.box_id)
centers = {box.box_id:box.center for box in bounding_boxes}
velocities = {box_id:np.zeros(2) for box_id in centers.keys()}
next_pc[:,2] = 0
next_pc = next_pc[:,:3]
np.random.shuffle(next_pc)
next_pc_small = next_pc[::4]
next_bounding_boxes = {}
for bounding_box in bounding_boxes:
try:
next_bounding_boxes[str(bounding_box.box_id)] = self._predict_next_frame_bounding_box(bounding_box, next_pc_small)
except:
pass
# next_bounding_boxes = {str(bounding_box.box_id):self._predict_next_frame_bounding_box(bounding_box, next_pc_small)
# for bounding_box in bounding_boxes}
return next_bounding_boxes
def _predict_next_frame_bounding_box(self, bounding_box, pc):
start = time.time()
without_cluster, cluster = bounding_box.filter_pointcloud(pc)
np.random.shuffle(cluster)
sample_indices = []
kd_tree = cKDTree(pc)
# for point in cluster:
# dists, nn_indices = kd_tree.query(point, 1)
# sample_indices.append(nn_indices)
point = np.mean(cluster, axis=0)
#trim png
dists, ii = kd_tree.query(point, len(pc))
cutoff_idx = np.where(dists < 6)[0][-1]
pc_trimmed = pc[ii[:cutoff_idx]]
np.random.shuffle(pc_trimmed)
if pc_trimmed.shape[0] > 5000:
pc_trimmed = pc_trimmed[::4]
elif pc_trimmed.shape[0] > 2500:
pc_trimmed = pc_trimmed[::2]
pc_trimmed = pc_trimmed[::2]
kd_tree = cKDTree(pc_trimmed)
# Create random starting points for clustering algorithm
# std = .3
# seeds = np.random.randn(100, 3) * std + point
# seeds = np.vstack((point, seeds))
# seeds = kd_tree.query(point, 50)
dists, sample_indices = kd_tree.query(point, 50)
# cluster_res = self.find_cluster(sample_indices, pc_trimmed, th_dist=.4, num_nn=20, num_samples=20)
# edges, corners = self.search_rectangle_fit(cluster_res['cluster'], variance_criterion)
res = self.predict_bounding_box(point, pc, num_seeds=5, plot=False)
print("time to predict bounding box: ", time.time() - start)
# return self.corners_to_bounding_box(corners, context=bounding_box)
return res
def corners_to_bounding_box(self, corners, context=None):
sorted_corners = sorted(corners, key=lambda x:x[1])
if sorted_corners[2][0] > sorted_corners[3][0]:
sorted_corners[2], sorted_corners[3] = sorted_corners[3], sorted_corners[2]
if sorted_corners[0][0] > sorted_corners[1][0]:
sorted_corners[0], sorted_corners[1] = sorted_corners[1], sorted_corners[0]
top_right_corner = sorted_corners[3]
top_left_corner = sorted_corners[2]
bottom_left_corner = sorted_corners[0]
bottom_right_corner = sorted_corners[1]
center = np.mean(np.vstack((top_right_corner, bottom_left_corner)), axis=0)
w = np.linalg.norm(top_right_corner - top_left_corner)
l = np.linalg.norm(top_left_corner[1] - bottom_left_corner[1])
if w < l:
w, l = l, w
top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner = top_right_corner, bottom_right_corner, bottom_left_corner, top_left_corner
top_right_corner = top_right_corner - top_left_corner
angle = np.arctan2(top_right_corner[1], top_right_corner[0])
top_right_corner += top_left_corner
if context:
candidate_angles = np.array([angle-np.pi, angle, angle+np.pi])
prev_angle = context.angle
angle = candidate_angles[np.argmin(np.abs(candidate_angles - prev_angle))]
bounding_box = {"center":center.tolist(), "angle":angle, "width":w, "length":l,
"corner1":top_right_corner.tolist(), "corner2":bottom_left_corner.tolist()}
return bounding_box
def predict_bounding_box(self, point, pc, num_seeds=5, plot=False):
# png = self.ground_plane_fitting(pc)["png"]
print("point: {}".format(point))
assert len(pc.shape) == 2, "pointcloud must have 2-dimensional shape"
png = pc
if png.shape[1] == 4:
png = png[:,:3]
if point.size == 2:
point = np.append(point, [0])
if point.size == 4:
point = point[:3]
png[:,2] = 0
kd_tree = cKDTree(png)
print(len(png))
#trim png
dists, ii = kd_tree.query(point, len(png))
cutoff_idx = np.where(dists < 6)[0][-1]
png_trimmed = png[ii[:cutoff_idx]]
print(png_trimmed.shape)
np.random.shuffle(png_trimmed)
if png_trimmed.shape[0] > 5000:
png_trimmed = png_trimmed[::4]
elif png_trimmed.shape[0] > 2500:
png_trimmed = png_trimmed[::2]
kd_tree = cKDTree(png_trimmed)
# Create random starting points for clustering algorithm
std = .1
seeds = np.random.randn(num_seeds, 3) * std + point
seeds = np.vstack((point, seeds))
dists, sample_indices = kd_tree.query(seeds)
cluster_res = self.find_cluster(sample_indices, png_trimmed, th_dist=.5, num_nn=20, num_samples=20)
edges, corners = self.search_rectangle_fit(cluster_res["cluster"], variance_criterion)
if plot:
fig = plt.figure(figsize=(8,8))
plt.scatter(cluster_res["cluster"][:,1], cluster_res["cluster"][:,0], c='g')
plt.scatter(corners[:,1], corners[:,0], c='r')
self.plot_edges(corners)
plt.show()
return self.corners_to_bounding_box(corners)
def plot_edges(self, corners, num_samples=100, c='r', label=''):
for i in range(4):
v1, v2 = corners[i], corners[(i+1)%4]
x = np.linspace(v1[0], v2[0], num_samples)
y = np.linspace(v1[1], v2[1], num_samples)
plt.plot(y, x, c=c, label=label)
def search_farthest_nearest_neighbor(self, point, kd_tree, th_dist):
num_nn = 2
dists, nn_indices = kd_tree.query(point, num_nn)
# print("th dist: ", th_dist)
while (dists[-1] < th_dist):
num_nn = num_nn * 2
dists, nn_indices = kd_tree.query(point, num_nn)
return dists, nn_indices
def find_cluster(self, sample_indices, pc, th_dist=.2, density_thresh=10, num_nn=16, num_samples=20, overlap_thresh=.2):
clusters = []
seen_indices = []
kd_tree = cKDTree(pc)
for idx in sample_indices[:num_samples]:
cluster = []
queue = []
seen = set()
seen.add(idx)
queue.append(idx)
while len(queue):
idx = queue.pop(0)
point = pc[idx]
cluster.append(point)
dists, nn_indices = self.search_farthest_nearest_neighbor(point, kd_tree, th_dist)
# dists, nn_indices = kd_tree.query(point, num_nn)
if (len(nn_indices) > density_thresh):
for i in range(len(nn_indices)):
if nn_indices[i] not in seen and dists[i] < th_dist:
seen.add(nn_indices[i])
queue.append(nn_indices[i])
clusters.append(np.vstack(cluster))
seen_indices.append(np.array(list(seen)))
overlapping_clusters = []
# for i in range(len(seen_indices)):
# num_overlapping = sum([len(np.intersect1d(seen_indices[i], seen_indices[j]))/len(seen_indices[i]) > overlap_thresh for j in range(len(seen_indices)) if j!=i])
# overlapping_clusters.append(num_overlapping)
# largest_cluster = np.argmax(overlapping_clusters)
# res = {"cluster": clusters[largest_cluster], "indices": seen_indices[largest_cluster]}
# largest_cluster = np.unique(np.concatenate(seen_indices))
largest_cluster = max(clusters, key=lambda cl:len(cl))
res = {"cluster": largest_cluster, "indices": largest_cluster}
return res
def ground_plane_fitting(self, pc):
x_max, x_min = np.max(pc[:,0]), np.min(pc[:,0])
y_max, y_min = np.max(pc[:,1]), np.min(pc[:,1])
seg_size_x = (x_max - x_min) / self.n_segs[0]
seg_size_y = (y_max - y_min) / self.n_segs[1]
res_pg = []
res_png = []
for i in range(self.n_segs[0]):
for j in range(self.n_segs[1]):
indices = np.intersect1d(np.intersect1d(np.where(pc[:,0] >= x_min + i*seg_size_x)[0],
np.where(pc[:,0] < x_min + (i+1)*seg_size_x)[0]),
np.intersect1d(np.where(pc[:,1] >= y_min + j*seg_size_y)[0],
np.where(pc[:,1] < y_min + (j+1)*seg_size_y)[0]))
if not len(indices):
continue
# print(len(indices))
seg = pc[indices]
pg = self.extract_initial_seeds(seg, self.n_lpr, self.th_seeds)
png = []
for _ in range(self.n_iter):
model = self.estimate_plane(pg)
pg, png = [], [np.zeros((1, 3))]
for p in seg:
if model(p) < self.th_dist:
pg.append(p)
else:
png.append(p)
# print(len(pg), len(png))
pg, png = np.vstack(pg), np.vstack(png)
png = np.delete(png, 0, axis=0)
res_pg.append(pg)
res_png.append(png)
res_pg = np.vstack(list(filter(len, res_pg)))
res_png = np.vstack(list(filter(len, res_png)))
res = {"pg": pg, "png": png}
return res
def extract_initial_seeds(self, pc, n_lpr, th_seeds):
seeds = []
psorted = np.sort(pc[:,2])
LPR = np.mean(psorted[:self.n_lpr])
for i in range(len(pc)):
if pc[i,2] < LPR + self.th_seeds:
seeds.append(pc[i])
return np.vstack(seeds)
def estimate_plane(self, pg):
s_hat = np.mean(pg, axis=0)
cov = sum([np.outer(s - s_hat, s - s_hat) for s in pg])
u, s, vh = np.linalg.svd(cov, full_matrices=True)
n = vh[2]
d = -n @ s_hat
def model(p):
return abs((p - s_hat) @ n)
return model
def search_rectangle_fit(self, pc, criterion):
pc = pc[:,:2]
Q = dict()
delta = np.pi / 180
for theta in np.linspace(0, np.pi/2 - delta, 90):
e1 = np.array([np.cos(theta), np.sin(theta)])
e2 = np.array([-np.sin(theta), np.cos(theta)])
C1 = pc @ e1
C2 = pc @ e2
q = criterion(C1, C2)
Q[theta] = q
theta_star = max(Q.items(), key=lambda kv: kv[1])[0]
# print(theta_star)
C1_star = pc @ np.array([np.cos(theta_star), np.sin(theta_star)])
C2_star = pc @ np.array([-np.sin(theta_star), np.cos(theta_star)])
a1, b1, c1 = np.cos(theta_star), np.sin(theta_star), np.min(C1_star)
a2, b2, c2 = -np.sin(theta_star), np.cos(theta_star), np.min(C2_star)
a3, b3, c3 = np.cos(theta_star), np.sin(theta_star), np.max(C1_star)
a4, b4, c4 = -np.sin(theta_star), np.cos(theta_star), np.max(C2_star)
v1 = line_intersection(a1, b1, c1, a2, b2, c2)
v2 = line_intersection(a2, b2, c2, a3, b3, c3)
v3 = line_intersection(a3, b3, c3, a4, b4, c4)
v4 = line_intersection(a1, b1, c1, a4, b4, c4)
return [(a1, b1, c1), (a2, b2, c2),
(a3, b3, c3), (a4, b4, c4)], np.vstack([v1, v2, v3, v4])
def line_intersection(a1, b1, c1, a2, b2, c2):
x = (c1*b2 - c2*b1) / (a1*b2 - a2*b1)
y = (c1*a2 - c2*a1) / (b1*a2 - b2*a1)
return np.array([x, y])
def variance_criterion(C1, C2):
c1_max, c1_min = np.max(C1), np.min(C1)
c2_max, c2_min = np.max(C2), np.min(C2)
D1 = np.argmin([np.linalg.norm(c1_max - C1), np.linalg.norm(C1 - c1_min)])
D2 = np.argmin([np.linalg.norm(c2_max - C2), np.linalg.norm(C2 - c2_min)])
D1 = [c1_max - C1, C1 - c1_min][D1]
D2 = [c2_max - C2, C2 - c2_min][D2]
E1 = D1[np.where(D1 < D2)[0]]
E2 = D2[np.where(D2 < D1)[0]]
gamma = -np.var(E1) - np.var(E2)
return gamma
def closeness_criterion(C1, C2, d=1e-4):
c1_max, c1_min = np.max(C1), | np.min(C1) | numpy.min |
"""
Genetic algorithm to identify good location for services.
Score_matrix:
0: Number of hospitals
1: Average distance
2: Maximum distance
3: Maximum admissions to any one hospital
4: Minimum admissions to any one hospital
5: Proportion patients within target time/distance
6: Proportion patients attending unit with target admission numbers
7: Proportion of patients meeting distance and admissions target
8: 90th percentile travel
9: 95th percentile travel
10: 99th percentile travel
Programme written based on objects. For object classes are used each with one instance:
"""
# Import general libraries
import pandas as pd
import numpy as np
import random as rn
import os
import sys
import datetime
from scipy.spatial.distance import pdist
class GlobVariables():
"""Global variables"""
def __init__(self):
# Set general model parameters and create output folder if necessary
self.pareto_scores_used = [0, 1, 2, 3, 4, 5, 6, 7, 9]
self.output_location = 'hospital_regions'
self.target_travel = 30 # target distance/time
self.target_min_admissions = 100 # target sustainable admissions
self.maximum_generations = 300 # generations of genetic algorithm
self.fix_hospital_status = False # Allows hospitals to be forced open or closed
self.initial_random_population_size = 10000
self.minimum_population_size = 1000
self.maximum_population_size = 20000
self.mutation_rate = 0.005
self.max_crossover_points = 3
self.use_regions = True # Limits choice of hospital to regions
self.proportion_new_random_each_generation = 0.05
# Create output folder if neeed
self.check_output_folder_exists()
return
def check_output_folder_exists(self):
"""Create new folder if folder does not already exist"""
if not os.path.exists(self.output_location):
os.makedirs(self.output_location)
return
class Data():
"""
Data class loads and stores core data for location algorithm.
"""
def __init__(self, use_regions, fix_hospital_status):
"""Initialise data class"""
# Define attributes
self.admissions = []
self.admissions_index = []
self.hospitals = []
self.travel_matrix = []
self.travel_matrix_LSOA_index = []
self.travel_matrix_hopspital_index = []
self.hospital_count = 0
self.loaded_population = []
self.regions_dictionary = {}
# Load data
self.load_data()
# Identify regions if required
if use_regions:
self.identify_region()
self.create_regions_dictionary(fix_hospital_status)
return
# Check loaded data indicies for hospitals and LSOAs match
def check_loaded_data_indices_match(self):
"""Check hospitals and LSOAs macth in number and text"""
if len(self.admissions_index) != len(self.travel_matrix.index):
sys.exit("LSOA admissions different length from travel matrix")
check_lsoa_match = (self.travel_matrix.index == self.admissions_index).mean()
if not check_lsoa_match == 1:
sys.exit("LSOA admission names do not match travel matrix")
if len(self.hospitals) != len(list(self.travel_matrix)):
sys.exit("Hospital list different length from travel matrix")
check_hospital_match = (list(self.travel_matrix) ==
self.hospitals.index).mean()
if not check_hospital_match == 1:
sys.exit("Hospital list names do not match travel matrix")
return
def create_regions_dictionary(self, use_fixed_status):
hospitals_region = self.hospitals[['index_#', 'region', 'Fixed']]
for index, values in hospitals_region.iterrows():
index_number, region, fix = values
index_number = int(index_number)
# If using fixed hospitals ignore those with fixed status of -1
if not all((use_fixed_status, fix == -1)):
if region in self.regions_dictionary:
self.regions_dictionary[region].append(index_number)
else:
self.regions_dictionary[region] = [index_number]
return
def identify_region(self):
use_admissions_region = True
if use_admissions_region:
self.admissions_region = self.admissions_with_index['region']
else:
# Allocate admissions region to closest possible (used) hospital region
mask = self.hospitals['Fixed']
mask = mask.values != -1
mask = mask.flatten()
open_hospitals = self.hospitals.loc[mask].index
# Get available hospital postcodes
masked_matrix = self.travel_matrix.loc[:, open_hospitals]
closest_hospital_ID = np.argmin(masked_matrix.values, axis=1)
closest_hospital_postcode = open_hospitals[list(closest_hospital_ID)]
self.admissions_region = self.hospitals.loc[closest_hospital_postcode]['region']
# Adjust travel matrix so out of region hospitals have infinite travel distances
x = list(self.travel_matrix) # list of hospitals in travel matrix
matrix_region = list(self.hospitals.loc[x]['region']) # list of regions of hospitals in travel matrix
matrix_hospital_region = np.array([matrix_region, ] * len(self.admissions_region)) # repeat list of regions
matrix_LSOA_region = np.repeat(self.admissions_region, self.hospitals.shape[0]).values.reshape(len(
self.admissions_region), self.hospitals.shape[0])
hospital_not_in_LSOA_region = matrix_LSOA_region != matrix_hospital_region
matrix_correction = np.ones(self.travel_matrix.shape)
matrix_correction[hospital_not_in_LSOA_region] = np.inf
self.travel_matrix += matrix_correction
return
def load_data(self):
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Loading data')
# Load hospital list
self.hospitals = pd.read_csv('data/hospitals.csv', index_col=0)
self.hospital_count = len(self.hospitals)
self.hospitals['index_#'] = list(range(0, self.hospital_count))
# Load admissions and split index from data
self.admissions_with_index = pd.read_csv('data/admissions.csv')
self.admissions_index = self.admissions_with_index['LSOA']
self.admissions = self.admissions_with_index['Admissions']
# Load time/distance matrix
self.travel_matrix = pd.read_csv('data/travel_matrix.csv', index_col=0)
# Check data indices match
self.check_loaded_data_indices_match()
# Load initial population if data/load.csv exists
try:
self.loaded_population = np.loadtxt('data/load.csv', delimiter=',')
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Loaded starting population from file')
except:
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'No initial population loaded from file')
return
class Master():
"""
Main algorithm code.
1) Initialise algorithm object and load data
"""
def __init__(self):
"""
Set up algorithm environment.
Load:
Global variables
Underlying data for algorithm:
List of hospitals
Patients by LSOA
Travel matrix from all LSOA to all hospitals
"""
# Set up class environment
self.global_vars = GlobVariables()
self.data = Data(self.global_vars.use_regions, self.global_vars.fix_hospital_status)
self.pop = Pop()
self.score = ScorePareto(self.global_vars.maximum_generations)
self.generation = 0
return
def initialise_population(self):
"""
This method creates a starting population.
This may consist of a) a random population, b) a loaded population, or c) both.
"""
self.pop.population = []
if self.global_vars.initial_random_population_size > 0:
self.pop.population = self.pop.create_random_population(
self.global_vars.initial_random_population_size,
self.data.hospitals,
self.global_vars.fix_hospital_status,
self.global_vars.use_regions,
self.data.regions_dictionary)
if len(self.data.loaded_population) > 0:
self.pop.population = np.vstack((self.data.loaded_population, self.pop.population))
self.pop.population = np.unique(self.pop.population, axis=0)
return
def run_algorithm(self):
# Create initial population
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Loading coffee and biscuits')
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Starting generations')
self.initialise_population()
(self.pop.population, pareto_size) = self.select_population(0) # 0 indicates base generation
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Generation: %3.0f Patients in target '
'distance/admissions: %1.4f Hamming: %1.4f Pareto size: %6.0f' \
% (0, self.score.progress_track[0], self.score.hamming[0], pareto_size))
for gen in range(1, self.global_vars.maximum_generations):
# Add new random population
new_population_members_required = (int(self.pop.population.shape[
0] * self.global_vars.proportion_new_random_each_generation) + 1)
new_population = self.pop.create_random_population(
new_population_members_required,
self.data.hospitals,
self.global_vars.fix_hospital_status,
self.global_vars.use_regions,
self.data.regions_dictionary)
# Combine populations before breeding
self.pop.population = np.vstack((self.pop.population, new_population))
# Get new children
child_population = (self.pop.generate_child_population(
self.global_vars.max_crossover_points,
self.global_vars.mutation_rate,
self.data.hospitals,
self.global_vars.fix_hospital_status))
# Combine populations
self.pop.population = np.vstack((self.pop.population, child_population))
# Remove scenarios with no hospitals
check_hospitals = np.sum(self.pop.population, axis=1) > 0
self.pop.population = self.pop.population[check_hospitals, :]
# Remove non-unique rows
self.pop.population = np.unique(self.pop.population, axis=0)
# Select new Pareto front population
(self.pop.population, pareto_size) = self.select_population(gen)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Generation: %3.0f Patients in target '
'distance/admissions: %1.4f Hamming: %1.4f Pareto size: %6.0f' \
% (gen, self.score.progress_track[gen], self.score.hamming[gen], pareto_size))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'End\n')
return
def save_pareto(self, scores, population, admissions):
"""Save latest Pareto front each generation"""
scores_headers = (['#Hosp', 'Av_travel', 'Max_travel', 'Max_admissions', 'Min_admissions', 'Target_travel',
'Target_admissions', 'Target_travel+distance', '90th percentile travel',
'95th percentile travel', '99th percentile travel'])
hospital_headers = self.data.hospitals.index
scores_df = pd.DataFrame(scores, columns=scores_headers)
scores_df.index.name = 'Index'
population_df = pd.DataFrame(population, columns=hospital_headers)
population_df.index.name = 'Index'
admissions_df = pd.DataFrame(admissions, columns=hospital_headers)
admissions_df.index.name = 'Index'
scores_df.to_csv(self.global_vars.output_location + '/scores.csv')
population_df.to_csv(self.global_vars.output_location + '/population.csv')
admissions_df.to_csv(self.global_vars.output_location + '/admissions.csv')
return
def select_population(self, generation):
# Score population
(self.score.scores, self.score.hospital_admissions) = (
self.score.score_pop(self.pop.population, self.data.travel_matrix.values,
self.data.admissions,
self.global_vars.target_travel,
self.global_vars.target_min_admissions,
self.data.hospital_count))
# When regionalisation is selected remove solution where some patients are unallocated
if self.global_vars.use_regions:
mask = self.score.scores[:, 1] != np.inf
if sum(mask) == 0:
sys.exit("No feasible solutions in population. Exiting.")
self.score.scores = self.score.scores[mask, :]
self.score.hospital_admissions = self.score.hospital_admissions[mask, :]
self.pop.population = self.pop.population[mask, :]
# Check solution population is at least the minimum taregt population
if self.score.scores.shape[0] < self.global_vars.minimum_population_size:
# Population too small do not select from within population
new_population = self.pop.population
new_population_ids = np.arange(0, new_population.shape[0])
new_population_scores = self.score.scores
if len(new_population_ids) < 20000:
hamming = np.average(pdist(new_population, 'hamming'))
else:
hamming = 999
pareto_front = self.score.identify_pareto(new_population_scores, self.global_vars.pareto_scores_used)
pareto_size = sum(pareto_front)
self.save_pareto(new_population_scores, new_population, self.score.hospital_admissions)
self.score.hamming[generation] = hamming
print('No selection this generation; feasible solution population too small')
else:
unselected_scores = self.score.scores
unselected_population_ids = np.arange(0, self.pop.population.shape[0])
# First Pareto front
pareto_front = self.score.identify_pareto(unselected_scores, self.global_vars.pareto_scores_used)
selected_population_ids = unselected_population_ids[pareto_front]
selected_scores = unselected_scores[pareto_front]
# store first complete Pareto front
pareto_population_ids = selected_population_ids
pareto_size = len(selected_population_ids)
pareto_population_scores = self.score.scores[selected_population_ids, :]
pareto_population = self.pop.population[pareto_population_ids, :]
pareto_hospital_admissions = self.score.hospital_admissions[pareto_population_ids, :]
if len(selected_population_ids) < 20000:
hamming = np.average(pdist(pareto_population, 'hamming'))
else:
hamming = 999
self.score.hamming[generation] = hamming
np.savetxt(self.global_vars.output_location + '/hamming.csv', self.score.hamming, delimiter=',')
self.save_pareto(pareto_population_scores, pareto_population, pareto_hospital_admissions)
# New population may need to be expanded or reduced depedning on required min/max size
new_population_ids = pareto_population_ids # This may be enlarged/reduced
new_population_scores = selected_scores # This may be enlarged/reduced
unselected_population_ids = unselected_population_ids[np.invert(pareto_front)]
unselected_scores = unselected_scores[np.invert(pareto_front)]
# Check whether first Pareto front is within the population size required
select_more = False
if new_population_ids.shape[0] > self.global_vars.maximum_population_size:
selection_size = self.global_vars.maximum_population_size
(new_population_ids, new_population_scores) = self.score.reduce_by_crowding(
new_population_ids, new_population_scores, selection_size)
if new_population_ids.shape[0] < self.global_vars.minimum_population_size:
select_more = True
while select_more:
# Get next pareto front
pareto_front = self.score.identify_pareto(unselected_scores, self.global_vars.pareto_scores_used)
selected_population_ids = unselected_population_ids[pareto_front]
selected_scores = unselected_scores[pareto_front]
if new_population_ids.shape[0] + selected_population_ids.shape[0] > \
self.global_vars.minimum_population_size:
select_more = False
if new_population_ids.shape[0] + selected_population_ids.shape[0] < \
self.global_vars.maximum_population_size:
# New population smaller than maximum permitted
new_population_ids = np.hstack((new_population_ids, selected_population_ids))
new_population_scores = np.vstack((new_population_scores, selected_scores))
else:
# New population larger than permitted; reduce size
selection_size = self.global_vars.minimum_population_size - new_population_ids.shape[0]
(selected_population_ids, selected_scores) = self.score.reduce_by_crowding(
new_population_ids, new_population_scores, selection_size)
new_population_ids = np.hstack((new_population_ids, selected_population_ids))
new_population_scores = | np.vstack((new_population_scores, selected_scores)) | numpy.vstack |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from MyDIPUtils.config import *
# check_ol1: circle[i] and circle[j] overlaps too much
# check_ol2: circle[i] lies too much outside original image
# check_ol3: compare the overlapping area between different stepsize
def direction_map(edge, clockwise):
# edge_pad = np.pad(edge, 1, mode='constant', constant_values=255)
# pdb.set_trace()
edge_copy = edge.copy()
flag = np.zeros_like(edge_copy)
edge_y, edge_x = np.nonzero(edge_copy == 0)
leftmost_x = np.min(edge_x)
leftmost_y = np.max(edge_y[edge_x == leftmost_x])
original_point = (leftmost_x, leftmost_y)
points = []
neigh = edge_copy[leftmost_y-1:leftmost_y+2, leftmost_x-1:leftmost_x+2]
if not clockwise:
direction = 0 if neigh[1, 2] == 0 else 7/4
if direction == 0:
next_point = (leftmost_x+1, leftmost_y)
else:
next_point = (leftmost_x+1, leftmost_y+1)
else:
direction = 0 if neigh[1, 2] == 0 else 1/4
if direction == 0:
next_point = (leftmost_x+1, leftmost_y)
else:
next_point = (leftmost_x+1, leftmost_y-1)
points.append((direction, original_point))
# flag[leftmost_y, leftmost_x] = 1
while next_point != original_point:
x, y = next_point
neigh = edge_copy[y-1:y+2, x-1:x+2]
flag_neigh = flag[y-1:y+2, x-1:x+2]
this_point = next_point
direction, next_point = find_next_direction(neigh, this_point, flag_neigh)
points.append((direction, this_point))
flag[this_point[1], this_point[0]] = 1
# dir_map[y, x] = direction
return points
def find_next_direction(neigh, this_point, flag_neigh):
x, y = this_point
neigh[flag_neigh==1] = 255
# 4-neighbour is prior to 8-neighbour
if neigh[0, 1] == 0:
return 1/2, (x, y-1)
if neigh[1, 2] == 0:
return 0, (x+1, y)
if neigh[2, 1] == 0:
return 3/2, (x, y+1)
if neigh[1, 0] == 0:
return 1, (x-1, y)
if neigh[0, 2] == 0:
return 1/4, (x+1, y-1)
if neigh[0, 0] == 0:
return 3/4, (x-1, y-1)
if neigh[2, 0] == 0:
return 5/4, (x-1, y+1)
if neigh[2, 2] == 0:
return 7/4, (x+1, y+1)
def tangent_line(points, seq_num, img, draw=True):
k = 0
angle = 0
for i in range(1, max_neigh):
s0 = 0
for j in range(i):
s0 += points[j+seq_num][0]
angle += s0/i
k += np.tan(s0*np.pi/(i))
angle /= (max_neigh-1)
k /= (max_neigh-1)
x0, y0 = points[seq_num][1]
y0 = img.shape[0] - y0
b = y0-k*x0
if draw:
line_point(k, b, img)
return k, angle, b
def points_sequence(points):
# points should be passed directly from cv2.goodFeaturesToTrack
# shape is (N, 1, 2)
sequence = []
points = np.squeeze(points)
leftmost = np.argmin(points[:, 0])
sequence.append(points[leftmost])
for direction in ['lr', 'ur', 'ul', 'll']:
next_point = find_next_anticlock(sequence[-1], points, direction)
while np.any(next_point) is not None:
sequence.append(next_point)
next_point = find_next_anticlock(sequence[-1], points, direction)
return sequence
def find_next_anticlock(point, points, direction):
if direction not in ['lr', 'ur', 'ul', 'll']:
raise ValueError('Unknown direction')
x, y = point
if direction == 'lr':
target = points[points[:, 1] > y]
if len(target) == 0:
return None
return target[np.argmin(target[:, 0])]
if direction == 'ur':
target = points[points[:, 0] > x]
if len(target) == 0:
return None
return target[np.argmax(target[:, 1])]
if direction == 'll':
target = points[points[:, 0] < x]
if len(target) == 0:
return None
return target[np.argmin(target[:, 1])]
if direction == 'ul':
target = points[points[:, 1] < y]
if len(target) == 0:
return None
return target[np.argmax(target[:, 0])]
def find_line(point1, point2, img_size, pb):
x1, y1 = point1
x2, y2 = point2
y1, y2 = img_size - y1, img_size - y2
if pb == True:
if np.abs(y1-y2) > l1_norm_threshold:
k = -(x1-x2)/(y1-y2)
b = (y1+y2)/2 - k*(x1+x2)/2
else:
k = None
b = (x1+x2)/2
else:
if np.abs(x1-x2) > l1_norm_threshold:
k = (y1-y2)/(x1-x2)
b = y2 - k*x2
else:
k = None
b = x1
return k, b
def find_para_line(k, point, img_size):
if k != None:
return -k*point[0]+(img_size-point[1])
else:
return point[0]
def line_point(k, b, img):
if k != None:
if b > 0:
point1 = (0, img.shape[0] - int(b))
else:
point1 = (int(-b/k), img.shape[0])
if k*img.shape[0] + b > img.shape[0]:
point2 = (int((img.shape[0] - b)/k), 0)
else:
point2 = (img.shape[0], int(img.shape[0] - (k*img.shape[0] + b)))
else:
point1 = (b, 0)
point2 = (b, img.shape[0])
cv2.line(img, point1, point2, 0)
# return img
def line_gen_1(k, b, img_size):
# img[i, j]: i->y, j->x
if k != None:
return lambda x, y: k*x-(img_size-y)+b
else:
return lambda x, y: x-b
def line_gen_2(k, b, img_size):
# Warning: if k == None, cannot use this function
assert k != None
return lambda x: img_size-(k*x+b)
def distance(x1, y1, x2, y2, norm='l2'):
if norm == 'l1':
return min(np.abs(x1-x2), np.abs(y1-y2))
else:
return np.sqrt((x1-x2)**2+(y1-y2)**2)
def find_center_and_radius(point1, point2, points, img):
# 1. find the side of the arc
k0, b0 = find_line(point1, point2, img.shape[0], pb=False)
line = line_gen_1(k0, b0, img.shape[0])
for point in points:
if not np.any(np.logical_or(point == point1, point == point2)):
flag = np.sign(line(*point))
break
# 2. mask only the interested arc
arc_ma = np.full_like(img, 255, dtype=np.uint8)
arc_y, arc_x = np.nonzero(img != 255)
for i in range(len(arc_x)):
if flag != np.sign(line(arc_x[i], arc_y[i])):
arc_ma[arc_y[i], arc_x[i]] = 0
# 3. further mask only the area between 2 corner point
k, b = find_line(point1, point2, img.shape[0], pb=True)
b1, b2 = find_para_line(k, point1, img.shape[0]), find_para_line(k, point2, img.shape[0])
line1, line2 = line_gen_1(k, b1, img.shape[0]), line_gen_1(k, b2, img.shape[0])
sgn1, sgn2 = np.sign(line1(*point2)), np.sign(line2(*point1))
arc_y, arc_x = np.nonzero(arc_ma != 255)
for i in range(len(arc_x)):
i_sgn1, i_sgn2 = np.sign(line1(arc_x[i], arc_y[i])), np.sign(line2(arc_x[i], arc_y[i]))
if sgn1 != i_sgn1 or sgn2 != i_sgn2:
arc_ma[arc_y[i], arc_x[i]] = 255
# test = draw_points([tuple(point1), tuple(point2)], arc_ma)
# line_point(k, b, test)
# line_point(k0, b0, test)
# imgshow(test)
# plt.figure()
# plt.imshow(arc_ma, cmap='gray')
# 3.find center and radius
arc_y, arc_x = np.nonzero(arc_ma == 0)
len_arc = len(arc_y)
if len_arc < 5:
return None
if k != None:
lower_x = max((point1[0]+point2[0])//2-max_radius, 0)
upper_x = min((point1[0]+point2[0])//2+max_radius, img.shape[0])
line = line_gen_2(k, b, img.shape[0])
dis_var = []
dis = []
for x in range(lower_x, upper_x):
tmp_dis = []
y = line(x)
for i in range(len_arc):
ay, ax = arc_y[i], arc_x[i]
tmp_dis.append(distance(x, y, ax, ay))
dis_var.append(np.var(tmp_dis))
dis.append(np.mean(tmp_dis))
cur = np.argmin(dis_var)
center_x = lower_x + cur
center_y = int(line(center_x))
radius = dis[cur]
else:
lower_y = max((point1[1]+point2[1])//2-max_radius, 0)
upper_y = min((point1[1]+point2[1])//2+max_radius, img.shape[0])
x = b
dis_var = []
dis = []
for y in range(lower_y, upper_y):
tmp_dis = []
for i in range(len_arc):
ay, ax = arc_y[i], arc_x[i]
tmp_dis.append(distance(x, y, ax, ay))
dis_var.append(np.var(tmp_dis))
dis.append(np.mean(tmp_dis))
cur = np.argmin(dis_var)
center_x = b
center_y = lower_y + cur
radius = dis[cur]
return (int(center_x), int(center_y)), int(radius)
def check_close(circles):
flags = [-1 for _ in range(len(circles))]
count = 0
for i in range(len(circles)):
if flags[i] == -1:
color = count
count += 1
else:
color = flags[i]
flags[i] = color
for j in range(len(circles)):
if j != i and distance(*circles[i][0], *circles[j][0]) < distance_threshold:
flags[j] = color
final = []
for i in range(len(flags)):
if flags[i] != -1:
color = flags[i]
flags[i] = -1
tmp_center = [circles[i][0]]
tmp_radius = [circles[i][1]]
for j in range(i+1, len(flags)):
if flags[j] == color:
tmp_center.append(circles[j][0])
tmp_radius.append(circles[j][1])
flags[j] = -1
mean_center = np.mean(tmp_center, axis=0)
mean_radius = np.mean(tmp_radius)
final.append(((int(mean_center[0]), int(mean_center[1])), int(mean_radius)))
return final
def overlapping(circle1, circle2, img_shape):
tmp1 = np.full(img_shape, 255, dtype=np.uint8)
tmp2 = np.full(img_shape, 255, dtype=np.uint8)
cv2.circle(tmp1, circle1[0], circle1[1], 0, cv2.FILLED)
cv2.circle(tmp2, circle2[0], circle2[1], 0, cv2.FILLED)
ol = np.full(img_shape, 255, dtype=np.uint8)
ol[np.logical_and(tmp1==0, tmp2==0)] = 0
area1 = np.sum(tmp1==0)
area2 = np.sum(tmp2==0)
area_ol = np.sum(ol==0)
return area_ol/area1, area_ol/area2
def check_ol1(circles, shape):
final = []
flags = [-1 for _ in range(len(circles))]
for i in range(len(circles)):
if flags[i] == -1:
for j in range(i+1, len(circles)):
if flags[j] == -1:
ol_i, ol_j = overlapping(circles[i], circles[j], shape)
if max(ol_i, ol_j) > overlapping1_threshold:
if max(ol_i, ol_j) == ol_i:
flags[i] = 0
else:
flags[j] = 0
if flags[i] == -1:
final.append(circles[i])
return final
def check_ol2(circles, ori_img):
final = []
for circle in circles:
tmp = np.full(ori_img.shape, 255, dtype=np.uint8)
cv2.circle(tmp, circle[0], circle[1], 0, cv2.FILLED)
ol = np.full(ori_img.shape, 255, dtype=np.uint8)
ol[np.logical_and(tmp==0, ori_img==0)]=0
if np.sum(ol==0)/np.sum(tmp==0) > overlapping2_threshold:
final.append(circle)
return final
def check_ol3(circles, ori_img):
tmp = np.full(ori_img.shape, 255, dtype=np.uint8)
for circle in circles:
cv2.circle(tmp, circle[0], circle[1], 0, cv2.FILLED)
intersec = np.full(ori_img.shape, 255, dtype=np.uint8)
intersec[np.logical_and(tmp==0, ori_img==0)]=0
ol = np.sum(intersec==0)
tmp[intersec==0] = 255
sub = | np.sum(tmp==0) | numpy.sum |
import pandas as pd
import numpy as np
import statsmodels.tsa.filters.filtertools as sm
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
def ewma(table, group_by=None, **params):
check_required_parameters(_ewma, params, ['table'])
if group_by is not None:
return _function_by_group(_ewma, table, group_by=group_by, **params)
else:
return _ewma(table, **params)
def _ewma(table, input_cols, ratio_type, custom_ratio=0.5, period_number=1):
out_table = table.copy()
def ewma_col(column):
result_col = []
for i in range(0, period_number - 1):
result_col.append(None)
result_col.append( | np.mean(out_table[column][0:period_number]) | numpy.mean |
import numpy as np
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.animation import FuncAnimation
from matplotlib.animation import writers
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import Grid
from scipy import interpolate
from timeit import default_timer as timer
if __name__ == "__main__":
mpegWriter = writers['ffmpeg'](fps=2, bitrate=1800)
matplotlib.rcParams.update({'font.size': 12})
labelsize = 13
legendsize = 12
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (60, 60, 60)
(dx, dy, dz) = (0.25, 0.25, 0.25)
higherCutoff = False; cutoffRat = 1.5
betterResolution = True; resRat = 0.5
# (Lx, Ly, Lz) = (21, 21, 21)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
# higherCutoff = False; cutoffRat = 1.5
# betterResolution = False; resRat = 0.5
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
massRat = 1.0
IRrat = 1
print(NGridPoints_cart)
# Toggle parameters
toggleDict = {'Dynamics': 'real'}
# import solarized
# Sol = solarized.Solarized()
# cmap = Sol.cmap(type='linear')
# cmap = 'gist_gray'
cmap = 'afmhot'
# cmap = 'inferno'
my_cmap = matplotlib.cm.get_cmap(cmap)
avmin = 1e-5; avmax = 1e-1
# ---- SET OUTPUT DATA FOLDER ----
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
animpath = '/Users/kis/Dropbox/VariationalResearch/DataAnalysis/figs/rdyn_twophonon/hostGasDensity/NGridPoints_1.11E+08_resRat_0.50'
if higherCutoff is True:
datapath = datapath + '_cutoffRat_{:.2f}'.format(cutoffRat)
if betterResolution is True:
datapath = datapath + '_resRat_{:.2f}'.format(resRat)
datapath = datapath + '/massRatio={:.1f}'.format(massRat)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
cartdatapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}/redyn_cart'.format(1.44e6, 1)
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
cartdatapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}/imdyn_cart'.format(1.44e6, 1)
innerdatapath = innerdatapath + '_spherical'
# # Analysis of Total Dataset
interpdatapath = innerdatapath + '/interp'
aIBi = -5
Pnorm_des = 4.0
# Pnorm_des = 3.0
# Pnorm_des = 2.067
# Pnorm_des = 1.8
# Pnorm_des = 1.4
# Pnorm_des = 1.34
# Pnorm_des = 1.28
# Pnorm_des = 1.22
# Pnorm_des = 1.1
# Pnorm_des = 1.04
# Pnorm_des = 0.8
# Pnorm_des = 0.52
linDimList = [(2, 2), (10, 10)]
linDimMajor, linDimMinor = linDimList[1]
qds_orig = xr.open_dataset(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(Pnorm_des * 0.7926654595212022, aIBi))
n0 = qds_orig.attrs['n0']; gBB = qds_orig.attrs['gBB']; mI = qds_orig.attrs['mI']; mB = qds_orig.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
mc = mI * nu
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
tscale = xi / nu
P = qds_orig.attrs['P']
Pnorm = P / mc
tVals = qds_orig['tc'].values
t = tVals[-1]
# print(tVals)
print(xi, -10 * xi, -5 * xi, -2 * xi)
print('P/mc: {:.2f}'.format(P / mc))
print(P)
print(massRat, aIBi)
print(t / tscale)
# All Plotting:
# # ORIGINAL SPHERICAL DATA PLOTS
# Individual Phonon Momentum Distribution(Original Spherical data)
Bk_2D_orig = (qds_orig['Real_CSAmp'] + 1j * qds_orig['Imag_CSAmp']).sel(tc=t).values
Nph_orig = qds_orig['Nph'].sel(t=t).values
PhDen_orig_Vals = ((1 / Nph_orig) * np.abs(Bk_2D_orig)**2).real.astype(float)
kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds_orig.coords['k'].values); kgrid.initArray_premade('th', qds_orig.coords['th'].values)
kVec = kgrid.getArray('k'); dk = kVec[1] - kVec[0]
thVec = kgrid.getArray('th'); dth = thVec[1] - thVec[0]
print(P, np.max(kVec))
kg, thg = np.meshgrid(kVec, thVec, indexing='ij')
kxg = kg * np.sin(thg)
kzg = kg * np.cos(thg)
# interpmul = 2
# PhDen_orig_da = xr.DataArray(PhDen_orig_Vals, coords=[kVec, thVec], dims=['k', 'th'])
# PhDen_orig_smooth, kg_orig_smooth, thg_orig_smooth = pfc.xinterp2D(PhDen_orig_da, 'k', 'th', interpmul)
# dk_smooth = kg_orig_smooth[1, 0] - kg_orig_smooth[0, 0]
# dth_smooth = thg_orig_smooth[0, 1] - thg_orig_smooth[0, 0]
# kxg_smooth = kg_orig_smooth * np.sin(thg_orig_smooth)
# kzg_smooth = kg_orig_smooth * np.cos(thg_orig_smooth)
# PhDen_orig_sum = np.sum(PhDen_orig_Vals * kg**2 * np.sin(thg) * dk * dth * (2 * np.pi)**(-2))
# PhDen_smooth_sum = np.sum(PhDen_orig_smooth * kg_orig_smooth**2 * np.sin(thg_orig_smooth) * dk_smooth * dth_smooth * (2 * np.pi)**(-2))
# print(PhDen_orig_sum, PhDen_smooth_sum)
fig1, ax1 = plt.subplots()
vmax = np.max(PhDen_orig_Vals)
# vmax = 8414555 # P=2.4
# vmax = 2075494 # P=1.20
# vmax = 1055106 # P=0.38
quad1 = ax1.pcolormesh(kzg, kxg, PhDen_orig_Vals, norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
quad1m = ax1.pcolormesh(kzg, -1 * kxg, PhDen_orig_Vals, norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
ax1.set_xlim([-1 * linDimMajor, linDimMajor])
ax1.set_ylim([-1 * linDimMinor, linDimMinor])
# print(vmax)
ax1.set_xlabel('kz (Impurity Propagation Direction)')
ax1.set_ylabel('kx')
ax1.set_title('Individual Phonon Momentum Distribution (Sph Orig)', size='smaller')
fig1.colorbar(quad1, ax=ax1, extend='both')
# CARTESIAN INTERPOLATION PLOTS
interp_ds = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(P, aIBi, t, linDimMajor, linDimMinor))
# interp_ds = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_lDM_{:.2f}_lDm_{:.2f}_unique.nc'.format(P, aIBi, linDimMajor, linDimMinor)); print('unique')
kxL = interp_ds['kx'].values; dkxL = kxL[1] - kxL[0]
kyL = interp_ds['ky'].values; dkyL = kyL[1] - kyL[0]
kzL = interp_ds['kz'].values; dkzL = kzL[1] - kzL[0]
xL = interp_ds['x'].values
yL = interp_ds['y'].values
zL = interp_ds['z'].values
PI_mag = interp_ds['PI_mag'].values
kxLg_xz_slice, kzLg_xz_slice = np.meshgrid(kxL, kzL, indexing='ij')
xLg_xz, zLg_xz = np.meshgrid(xL, zL, indexing='ij')
xLg_xy, yLg_xy = | np.meshgrid(xL, yL, indexing='ij') | numpy.meshgrid |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ナップサック問題を分枝限定法で解く
# main(重さのリスト, 最大重量, 価値のリスト)
#
import numpy as np
import pandas as pd
class Brand_and_Bound:
def __init__(self, weights, max_, target):
r = np.array(target)/np.array(weights)
df = pd.DataFrame({
"r":r,
"weights":weights,
"target":target})
# target/weight で並び替え
self.df = df.sort_values(by='r')
self.problems = [(list(self.df["weights"]), max_, list(self.df["target"]), 0, [])]
self.constant = 0
# 欲張り法で求めたxを初期xに
self.df["x_initial"] = self.greedy_method(max_)
self.x_tmp = list(self.df["x_initial"])
x_sorted = list(self.df.sort_index()["x_initial"])
self.value_tmp = np.dot( | np.array(target) | numpy.array |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.tf_utils."""
import os
import numpy as np
from packaging import version
import tensorflow as tf
from tensorflow_transform import analyzers
from tensorflow_transform import annotators
from tensorflow_transform import tf_utils
from tensorflow_transform import test_case
import unittest
from tensorflow.python.framework import composite_tensor # pylint: disable=g-direct-tensorflow-import
_CONSTRUCT_TABLE_PARAMETERS = [
dict(testcase_name='_string', asset_path_input_fn=lambda x: x),
dict(testcase_name='_string_tensor', asset_path_input_fn=tf.constant),
]
def _construct_table(asset_file_path,
key_dtype=tf.string,
key_index=0,
value_dtype=tf.int64,
value_index=1,
default_value=-1):
initializer = tf.lookup.TextFileInitializer(
asset_file_path,
key_dtype=key_dtype,
key_index=key_index,
value_dtype=value_dtype,
value_index=value_index)
return tf.lookup.StaticHashTable(initializer, default_value=default_value)
def _value_to_tensor(value):
if isinstance(value, tf.compat.v1.SparseTensorValue):
return tf.compat.v1.convert_to_tensor_or_sparse_tensor(value)
elif isinstance(value, tf.compat.v1.ragged.RaggedTensorValue):
return tf.ragged.constant(value.to_list())
else:
return tf.constant(value)
class _SparseTensorSpec:
def __init__(self, shape, dtype):
self._shape = shape
self._dtype = dtype
if not hasattr(tf, 'SparseTensorSpec'):
tf.SparseTensorSpec = _SparseTensorSpec
class TFUtilsTest(test_case.TransformTestCase):
def _assertCompositeRefEqual(self, left, right):
"""Asserts that a two `tf_util._CompositeTensorRef`s are equal."""
self.assertEqual(left.type_spec, right.type_spec)
self.assertAllEqual(left.list_of_refs, right.list_of_refs)
def test_copy_tensors_produces_different_tensors(self):
with tf.compat.v1.Graph().as_default():
tensors = {
'dense':
tf.compat.v1.placeholder(
tf.int64, (None,), name='my_dense_input'),
'sparse':
tf.compat.v1.sparse_placeholder(tf.int64, name='my_sparse_input'),
'ragged':
tf.compat.v1.ragged.placeholder(
tf.int64, ragged_rank=2, name='my_ragged_input')
}
copied_tensors = tf_utils.copy_tensors(tensors)
self.assertNotEqual(tensors['dense'], copied_tensors['dense'])
self.assertNotEqual(tensors['sparse'].indices,
copied_tensors['sparse'].indices)
self.assertNotEqual(tensors['sparse'].values,
copied_tensors['sparse'].values)
self.assertNotEqual(tensors['sparse'].dense_shape,
copied_tensors['sparse'].dense_shape)
self.assertNotEqual(tensors['ragged'].values,
copied_tensors['ragged'].values)
self.assertNotEqual(tensors['ragged'].row_splits,
copied_tensors['ragged'].row_splits)
def test_copy_tensors_produces_equivalent_tensors(self):
with tf.compat.v1.Graph().as_default():
tensors = {
'dense':
tf.compat.v1.placeholder(
tf.int64, (None,), name='my_dense_input'),
'sparse':
tf.compat.v1.sparse_placeholder(tf.int64, name='my_sparse_input'),
'ragged':
tf.compat.v1.ragged.placeholder(
tf.int64, ragged_rank=1, name='my_ragged_input')
}
copied_tensors = tf_utils.copy_tensors(tensors)
with tf.compat.v1.Session() as session:
dense_value = [1, 2]
sparse_value = tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1]],
values=[3, 4, 5],
dense_shape=[2, 3])
ragged_value = tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3, 4, 5], dtype=np.int64),
row_splits=np.array([0, 2, 3], dtype=np.int64))
sample_tensors = session.run(
copied_tensors,
feed_dict={
tensors['dense']: dense_value,
tensors['sparse']: sparse_value,
tensors['ragged']: ragged_value
})
self.assertAllEqual(sample_tensors['dense'], dense_value)
self.assertAllEqual(sample_tensors['sparse'].indices,
sparse_value.indices)
self.assertAllEqual(sample_tensors['sparse'].values,
sparse_value.values)
self.assertAllEqual(sample_tensors['sparse'].dense_shape,
sparse_value.dense_shape)
self.assertAllEqual(sample_tensors['ragged'].values,
ragged_value.values)
self.assertAllEqual(sample_tensors['ragged'].row_splits,
ragged_value.row_splits)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='2d',
tensor=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 2, 4])),
rowids=[0, 0, 1, 1],
tensor_spec=tf.RaggedTensorSpec([None, None], tf.float32)),
dict(
testcase_name='3d',
tensor=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 3, 4])),
row_splits=np.array([0, 1, 1, 2])),
rowids=[0, 0, 0, 2],
tensor_spec=tf.RaggedTensorSpec([None, None, None], tf.float32)),
]))
def test_get_ragged_batch_value_rowids(self, tensor, rowids, tensor_spec,
function_handler):
@function_handler(input_signature=[tensor_spec])
def get_ragged_batch_value_rowids(tensor):
return tf_utils._get_ragged_batch_value_rowids(tensor)
self.assertAllEqual(get_ragged_batch_value_rowids(tensor), rowids)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1',
x=['a', 'b', 'a'],
x_spec=tf.TensorSpec(None, tf.string),
weights=[1, 1, 2],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[3, 1]),
dict(
testcase_name='rank2',
x=[['a', 'b\n', 'a'], ['b\n', 'a', 'b\n']],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[1, 2, 1], [1, 2, 2]],
filter_regex=None,
expected_unique_x=[b'a', b'b\n'],
expected_summed_weights_per_x=[4, 5]),
dict(
testcase_name='rank3',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[[1, 1, 2], [1, 2, 1]], [[1, 2, 1], [1, 2, 1]]],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[9, 7]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [2, 1]],
values=['a', 'a', 'b'],
dense_shape=[4, 2]),
x_spec=tf.SparseTensorSpec([4, 2], tf.string),
weights=[2, 3, 4],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[5, 4]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue( # pylint: disable=g-long-lambda
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array(['a', 'b', 'b', 'a']),
row_splits=np.array([0, 2, 4])),
row_splits=np.array([0, 2])),
x_spec=tf.RaggedTensorSpec([None, None, None], tf.string),
weights=[2, 3, 4, 6],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[8, 7]),
dict(
testcase_name='regex_filtering',
x=[['a\n', '', '\n\r'], ['\r', 'a', 'b']],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[1, 2, 1], [1, 2, 2]],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[2, 2]),
dict(
testcase_name='regex_filtering_invalid_utf8',
x=[[b'\xe1\n', b'\xa9', b'\n\xb8\r'],
[b'\xe8\r', b'\xc6', b'\n\xb3']],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[1, 3, 1], [1, 4, 2]],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_unique_x=[b'\xa9', b'\xc6'],
expected_summed_weights_per_x=[3, 4]),
]))
def test_reduce_batch_weighted_counts(self, x, x_spec, weights, filter_regex,
expected_unique_x,
expected_summed_weights_per_x,
function_handler):
input_signature = [x_spec, tf.TensorSpec(None, tf.float32)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_counts(x, weights):
(unique_x, summed_weights_per_x, summed_positive_per_x_and_y,
counts_per_x) = tf_utils.reduce_batch_weighted_counts(
x, weights, filter_regex=filter_regex)
self.assertIsNone(summed_positive_per_x_and_y)
self.assertIsNone(counts_per_x)
return unique_x, summed_weights_per_x
unique_x, summed_weights_per_x = _reduce_batch_weighted_counts(x, weights)
self.assertAllEqual(unique_x,
expected_unique_x)
self.assertAllEqual(summed_weights_per_x,
expected_summed_weights_per_x)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1',
x=['a', 'b', 'a'],
filter_regex=None,
expected_result=[b'a', b'b', b'a'],
),
dict(
testcase_name='rank2',
x=[['a', 'b\r', 'a'], ['b\r', 'a', 'b\r']],
filter_regex=None,
expected_result=[b'a', b'b\r', b'a', b'b\r', b'a', b'b\r'],
),
dict(
testcase_name='rank3',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
filter_regex=None,
expected_result=[
b'a', b'b', b'a', b'b', b'a', b'b', b'a', b'b', b'a', b'b',
b'a', b'b'
],
),
dict(
testcase_name='regex_filtering_empty_result',
x=['a\n\r', 'b\n', 'a\r', '', 'a\rsd', ' \r', '\nas'],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_result=[],
),
]))
def test_reduce_batch_weighted_counts_weights_none(self, x, filter_regex,
expected_result,
function_handler):
input_signature = [tf.TensorSpec(None, tf.string)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_counts(x):
(unique_x, summed_weights_per_x, summed_positive_per_x_and_y,
counts_per_x) = tf_utils.reduce_batch_weighted_counts(
x, force=False, filter_regex=filter_regex)
self.assertIsNone(summed_weights_per_x)
self.assertIsNone(summed_positive_per_x_and_y)
self.assertIsNone(counts_per_x)
return unique_x
unique_x = _reduce_batch_weighted_counts(x)
self.assertAllEqual(unique_x, expected_result)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1',
x=['a', 'b', 'a'],
filter_regex=None,
expected_result=([b'a', b'b'], [2, 1]),
),
dict(
testcase_name='rank3',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
filter_regex=None,
expected_result=([b'a', b'b'], [6, 6]),
),
dict(
testcase_name='regex_filtering',
x=['a\n\r', 'b\n', 'a\r', '', 'asd', ' ', '\nas'],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_result=([b'asd', b' '], [1, 1]),
),
dict(
testcase_name='regex_filtering_empty_result',
x=['a\n\r', 'b\n', 'a\r', '', 'a\rsd', ' \r', '\nas'],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_result=([], []),
),
]))
def test_reduce_batch_weighted_counts_weights_none_force(
self, x, filter_regex, expected_result, function_handler):
input_signature = [tf.TensorSpec(None, tf.string)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_counts(x):
(unique_x, summed_weights_per_x, summed_positive_per_x_and_y,
counts_per_x) = tf_utils.reduce_batch_weighted_counts(
x, force=True, filter_regex=filter_regex)
self.assertIsNone(summed_weights_per_x)
self.assertIsNone(summed_positive_per_x_and_y)
return unique_x, counts_per_x
expected_unique_x, expected_counts_per_x = expected_result
unique_x, counts_per_x = _reduce_batch_weighted_counts(x)
self.assertAllEqual(unique_x, expected_unique_x)
self.assertAllEqual(counts_per_x, expected_counts_per_x)
@test_case.named_parameters([
dict(testcase_name='constant', get_value_fn=lambda: tf.constant([1.618])),
dict(testcase_name='op', get_value_fn=lambda: tf.identity),
dict(testcase_name='int', get_value_fn=lambda: 4),
dict(testcase_name='object', get_value_fn=object),
dict(
testcase_name='sparse',
get_value_fn=lambda: tf.SparseTensor( # pylint: disable=g-long-lambda
indices=[[0, 0], [2, 1]],
values=['a', 'b'],
dense_shape=[4, 2])),
dict(
testcase_name='ragged',
get_value_fn=lambda: tf.RaggedTensor.from_row_splits( # pylint: disable=g-long-lambda
values=['a', 'b'],
row_splits=[0, 1, 2])),
dict(
testcase_name='ragged_multi_dimension',
get_value_fn=lambda: tf.RaggedTensor.from_row_splits( # pylint: disable=g-long-lambda
values=tf.RaggedTensor.from_row_splits(
values=[[0, 1], [2, 3]], row_splits=[0, 1, 2]),
row_splits=[0, 2])),
])
def test_hashable_tensor_or_op(self, get_value_fn):
with tf.compat.v1.Graph().as_default():
input_value = get_value_fn()
input_ref = tf_utils.hashable_tensor_or_op(input_value)
input_dict = {input_ref: input_value}
input_deref = tf_utils.deref_tensor_or_op(input_ref)
if isinstance(input_value, composite_tensor.CompositeTensor):
self._assertCompositeRefEqual(
input_ref, tf_utils.hashable_tensor_or_op(input_deref))
else:
self.assertAllEqual(input_ref,
tf_utils.hashable_tensor_or_op(input_deref))
if isinstance(input_value, tf.SparseTensor):
input_deref = input_deref.values
input_dict[input_ref] = input_dict[input_ref].values
input_value = input_value.values
self.assertAllEqual(input_value, input_deref)
self.assertAllEqual(input_value, input_dict[input_ref])
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1_with_weights_and_binary_y',
x=['a', 'b', 'a'],
weights=[1, 1, 2],
y=[0, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 1, 4],
[[1, 2], [0, 1], [1, 3]], [2, 1, 3]),
filter_regex=None,
),
dict(
testcase_name='rank1_with_weights_and_multi_class_y',
x=['a', 'b\n', 'a', 'a'],
weights=[1, 1, 2, 2],
y=[0, 2, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b\n', b'global_y_count_sentinel'], [5, 1, 6],
[[1, 4, 0], [0, 0, 1], [1, 4, 1]], [3, 1, 4]),
filter_regex=None,
),
dict(
testcase_name='rank1_with_weights_and_missing_y_values',
x=['a', 'b', 'a', 'a'],
weights=[1, 1, 2, 2],
y=[3, 5, 6, 6],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [5, 1, 6],
[[0, 0, 0, 1, 0, 0, 4], [0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 4]], [3, 1, 4]),
filter_regex=None,
),
dict(
testcase_name='rank2_with_weights_and_binary_y',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
weights=[[1, 2, 1], [1, 2, 2]],
y=[[1, 0, 1], [1, 0, 0]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [4, 5, 9],
[[2, 2], [4, 1], [6, 3]], [3, 3, 6]),
filter_regex=None,
),
dict(
testcase_name='rank3_with_weights_and_binary_y',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
weights=[[[1, 1, 2], [1, 2, 1]], [[1, 2, 1], [1, 2, 1]]],
y=[[[1, 1, 0], [1, 0, 1]], [[1, 0, 1], [1, 0, 1]]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [9, 7, 16],
[[6, 3], [2, 5], [8, 8]], [6, 6, 12]),
filter_regex=None,
),
dict(
testcase_name='rank1_with_weights_multi_class_y_and_filtering',
x=['\na\r', '', '\na\r', 'a', ''],
weights=[1, 1, 2, 2, 3],
y=[0, 2, 1, 1, 2],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'global_y_count_sentinel'], [2, 9],
[[0, 2, 0], [1, 4, 4]], [1, 5]),
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
),
dict(
testcase_name='rank1_with_weights_filtering_empty_result',
x=['\na\r', '', '\na\r', '\ra', ''],
weights=[1, 1, 2, 2, 3],
y=[0, 2, 1, 1, 2],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'global_y_count_sentinel'], [9], [[1, 4, 4]], [5]),
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
),
]))
def test_reduce_batch_coocurrences(self, x, weights, y, expected_result,
filter_regex, function_handler):
input_signature = [tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64),
tf.TensorSpec(None, tf.int64)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_cooccurrences(x, y, weights):
return tf_utils.reduce_batch_weighted_cooccurrences(
x, y, weights, filter_regex=filter_regex)
result = _reduce_batch_weighted_cooccurrences(x, y, weights)
self.assertAllEqual(result.unique_x,
expected_result.unique_x)
self.assertAllEqual(result.summed_weights_per_x,
expected_result.summed_weights_per_x)
self.assertAllEqual(result.summed_positive_per_x_and_y,
expected_result.summed_positive_per_x_and_y)
self.assertAllEqual(result.counts_per_x,
expected_result.counts_per_x)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1_with_binary_y',
x=['a', 'b', 'a'],
y=[0, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [2, 1, 3],
[[1, 1], [0, 1], [1, 2]], [2, 1, 3]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank1_with_multi_class_y',
x=['yes', 'no', 'yes', 'may\rbe', 'yes'],
y=[1, 1, 0, 2, 3],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'yes', b'no', b'may\rbe', b'global_y_count_sentinel'],
[3, 1, 1, 5],
[[1, 1, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0], [1, 2, 1, 1]],
[3, 1, 1, 5]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank2_with_binary_y',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
y=[[1, 0, 1], [1, 0, 0]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 3, 6],
[[1, 2], [2, 1], [3, 3]], [3, 3, 6]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank2_with_missing_y_values',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
y=[[2, 0, 2], [2, 0, 0]],
# The label 1 isn't in the batch but it will have a position (with
# weights of 0) in the resulting array.
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 3, 6],
[[1, 0, 2], [2, 0, 1], [3, 0, 3]], [3, 3, 6]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank2_with_multi_class_y',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
y=[[1, 0, 1], [1, 0, 2]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 3, 6],
[[1, 2, 0], [1, 1, 1], [2, 3, 1]], [3, 3, 6]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank3_with_binary_y',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
y=[[[1, 1, 0], [1, 0, 1]], [[1, 0, 1], [1, 0, 1]]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [6, 6, 12],
[[3, 3], [1, 5], [4, 8]], [6, 6, 12]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [2, 1]],
values=['a', 'b'],
dense_shape=[4, 2]),
y=[0, 1, 0, 0],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [1, 1, 4],
[[1, 0], [1, 0], [3, 1]], [1, 1, 4]),
input_signature=[
tf.SparseTensorSpec([None, 2], tf.string),
tf.TensorSpec([None], tf.int64)
],
filter_regex=None),
dict(
testcase_name='empty_sparse',
x=tf.compat.v1.SparseTensorValue(
indices=np.empty([0, 2]), values=[], dense_shape=[4, 2]),
y=[1, 0, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'global_y_count_sentinel'], [4], [[1, 3]], [4]),
input_signature=[
tf.SparseTensorSpec([None, 2], tf.string),
tf.TensorSpec([None], tf.int64)
],
filter_regex=None),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array(['a', 'b', 'a', 'b', 'b']),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
y=[1, 0],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [2, 3, 2],
[[0, 2], [1, 2], [1, 1]], [2, 3, 2]),
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.string),
tf.TensorSpec([None], tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank1_with_filtering',
x=['yes\n', 'no', 'yes\n', '', 'yes\n'],
y=[1, 1, 0, 2, 3],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'no', b'global_y_count_sentinel'], [1, 5],
[[0, 1, 0, 0], [1, 2, 1, 1]], [1, 5]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX),
]))
def test_reduce_batch_coocurrences_no_weights(self, x, y, expected_result,
input_signature, filter_regex,
function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_cooccurrences_no_weights(x, y):
return tf_utils.reduce_batch_weighted_cooccurrences(
x, y, filter_regex=filter_regex)
result = _reduce_batch_weighted_cooccurrences_no_weights(x, y)
self.assertAllEqual(result.unique_x,
expected_result.unique_x)
self.assertAllEqual(result.summed_weights_per_x,
expected_result.summed_weights_per_x)
self.assertAllEqual(result.summed_positive_per_x_and_y,
expected_result.summed_positive_per_x_and_y)
self.assertAllEqual(result.counts_per_x,
expected_result.counts_per_x)
@test_case.parameters(
([[1], [2]], [[1], [2], [3]], None, None, tf.errors.InvalidArgumentError,
'Condition x == y did not hold element-wise:'),
([[1], [2], [3]], [[1], [2], [3]], [None, None], [None], ValueError,
r'Shapes \(None, None\) and \(None,\) are incompatible'),
)
def test_same_shape_exceptions(self, x_input, y_input, x_shape, y_shape,
exception_cls, error_string):
with tf.compat.v1.Graph().as_default():
x = tf.compat.v1.placeholder(tf.int32, x_shape)
y = tf.compat.v1.placeholder(tf.int32, y_shape)
with tf.compat.v1.Session() as sess:
with self.assertRaisesRegexp(exception_cls, error_string):
sess.run(tf_utils.assert_same_shape(x, y), {x: x_input, y: y_input})
@test_case.named_parameters(test_case.FUNCTION_HANDLERS)
def test_same_shape(self, function_handler):
input_signature = [tf.TensorSpec(None, tf.int64),
tf.TensorSpec(None, tf.int64)]
@function_handler(input_signature=input_signature)
def _assert_shape(x, y):
x_return, _ = tf_utils.assert_same_shape(x, y)
return x_return
input_list = [[1], [2], [3]]
x_return = _assert_shape(input_list, input_list)
self.assertAllEqual(x_return, input_list)
@test_case.named_parameters([
dict(
testcase_name='_all_keys_in_vocab',
query_list=['a', 'a', 'b', 'a', 'b'],
key_vocab_list=['a', 'b'],
query_shape=[None],
expected_output=[0, 0, 1, 0, 1]),
dict(
testcase_name='_missing_keys_in_vocab',
query_list=['a', 'c', 'b', 'a', 'b'],
key_vocab_list=['a', 'b'],
query_shape=[None],
expected_output=[0, -1, 1, 0, 1]),
dict(
testcase_name='_nd_keys',
query_list=[['a', 'c', 'b'], ['a', 'b', 'a']],
key_vocab_list=['a', 'b'],
query_shape=[None, None],
expected_output=[[0, -1, 1], [0, 1, 0]]),
dict(
testcase_name='_empty_vocab',
query_list=['a', 'c', 'b', 'a', 'b'],
key_vocab_list=[],
query_shape=[None],
expected_output=[-1, -1, -1, -1, -1]),
dict(
testcase_name='_empty_query',
query_list=[],
key_vocab_list=['a'],
query_shape=[None],
expected_output=[]),
])
def test_lookup_key(self, query_list, key_vocab_list, query_shape,
expected_output):
with tf.compat.v1.Graph().as_default():
query_ph = tf.compat.v1.placeholder(
dtype=tf.string, shape=query_shape, name='query')
key_vocab_ph = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='key_vocab')
key_indices = tf_utils.lookup_key(query_ph, key_vocab_ph)
with tf.compat.v1.Session().as_default() as sess:
output = sess.run(
key_indices,
feed_dict={
query_ph.name: query_list,
key_vocab_ph.name: key_vocab_list
})
self.assertAllEqual(expected_output, output)
@test_case.named_parameters([
dict(
testcase_name='_with_default',
with_default_value=True,
input_keys=['<KEY>']),
dict(
testcase_name='_wihout_default',
with_default_value=False,
input_keys=['<KEY>']),
dict(
testcase_name='_single_oov_key',
with_default_value=False,
input_keys=['e'])
])
def test_apply_per_key_vocab(self, with_default_value, input_keys):
default_value = '-7,-5' if with_default_value else None
vocab_data = [('0,0', 'a'), ('1,-1', 'b'), ('-1,1', 'c'), ('-2,2', 'd')]
expected_missing_key_result = [-7, -5] if default_value else [0, 0]
expected_lookup_results = {
'a': [0, 0],
'b': [1, -1],
'c': [-1, 1],
'd': [-2, 2],
}
with tf.compat.v1.Graph().as_default():
input_tensor = _value_to_tensor(input_keys)
vocab_filename = os.path.join(self.get_temp_dir(), 'test.txt')
encoded_vocab = '\n'.join([' '.join(pair) for pair in vocab_data])
with tf.io.gfile.GFile(vocab_filename, 'w') as f:
f.write(encoded_vocab)
output_tensor = tf_utils.apply_per_key_vocabulary(
tf.constant(vocab_filename),
input_tensor,
default_value=default_value)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.tables_initializer())
output = output_tensor.eval()
expected_data = [
expected_lookup_results.get(key, expected_missing_key_result)
for key in input_keys
]
self.assertAllEqual(output, expected_data)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[[1], [2]], [[1], [2]]],
expected_result=4,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.int64)]),
dict(
testcase_name='dense_with_nans',
x=[[[1], [np.nan]], [[1], [2]]],
expected_result=3,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise',
x=[[[1], [2]], [[1], [2]]],
expected_result=[[2], [2]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.int64)]),
dict(
testcase_name='dense_elementwise_with_nans',
x=[[[1], [2]], [[1], [np.nan]]],
expected_result=[[2], [1]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0]],
values=[1., 2., 3., 4.],
dense_shape=[2, 4, 1]),
expected_result=4,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='sparse_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0],
[1, 3, 0]],
values=[1., 2., 3., 4., np.nan],
dense_shape=[2, 4, 1]),
expected_result=4,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='sparse_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0]],
values=[1., 2., 3., 4.],
dense_shape=[2, 4, 1]),
expected_result=[[1], [1], [2], [0]],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='sparse_elementwise_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0],
[1, 3, 0]],
values=[1., 2., 3., 4., np.nan],
dense_shape=[2, 4, 1]),
expected_result=[[1], [1], [2], [0]],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=5,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 3, 4, 6])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=5,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 2, 4, 5])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=[[[2, 1], [0., 0], [1, 1]],
[[0, 0], [0, 0], [0, 0]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 2, 4, 6])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=[[[2, 1], [0., 0], [1, 1]],
[[0, 0], [0, 0], [0, 0]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
]))
def test_reduce_batch_count(self, x, input_signature, expected_result,
reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count(x):
result = tf_utils.reduce_batch_count(
x, reduce_instance_dims=reduce_instance_dims)
# Verify that the output shape is maintained.
# TODO(b/178189903): This will fail if _dense_shape_default isn't set in
# reduce_batch_count.
if (not isinstance(x, tf.RaggedTensor) and not reduce_instance_dims and
x.get_shape().ndims):
self.assertEqual(x.get_shape()[1:].as_list(),
result.get_shape().as_list())
return result
result = _reduce_batch_count(x)
self.assertAllEqual(result, expected_result)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[[1], [2]], [[3], [4]]],
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_with_nans',
x=[[[1], [2]], [[3], [np.nan]], [[np.nan], [4]]],
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise',
x=[[[1], [2]], [[3], [4]]],
expected_count=[[2.], [2.]],
expected_mean=[[2.], [3.]],
expected_var=[[1.], [1.]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise_with_nans',
x=[[[1], [2]], [[3], [np.nan]], [[np.nan], [4]]],
expected_count=[[2.], [2.]],
expected_mean=[[2.], [3.]],
expected_var=[[1.], [1.]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2]],
values=[1., 2., 3., 4.],
dense_shape=[2, 4]),
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4], tf.float32)]),
dict(
testcase_name='sparse_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [1, 3]],
values=[1., 2., 3., 4., np.nan],
dense_shape=[2, 4]),
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4], tf.float32)]),
dict(
testcase_name='sparse_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 3], [1, 1], [1, 3]],
values=[1., 2., 3., 4.],
dense_shape=[2, 5]),
expected_count=[1.0, 1.0, 0.0, 2.0, 0.0],
expected_mean=[1.0, 3.0, 0.0, 3.0, 0.0],
expected_var=[0.0, 0.0, 0.0, 1.0, 0.0],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 5], tf.float32)]),
dict(
testcase_name='sparse_elementwise_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 3], [1, 1], [1, 2], [1, 3]],
values=[1., 2., 3., np.nan, 4.],
dense_shape=[2, 5]),
expected_count=[1.0, 1.0, 0.0, 2.0, 0.0],
expected_mean=[1.0, 3.0, 0.0, 3.0, 0.0],
expected_var=[0.0, 0.0, 0.0, 1.0, 0.0],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 5], tf.float32)]),
dict(
testcase_name='sparse_3d_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 3], [0, 1, 0], [0, 1, 3], [1, 1, 1],
[1, 1, 3]],
values=[-10., 1., 2., 3., 4.],
dense_shape=[2, 3, 5]),
expected_count=[[0, 0, 0, 1, 0], [1, 1, 0, 2, 0], [0] * 5],
expected_mean=[[0, 0, 0, -10, 0], [1, 3, 0, 3, 0], [0] * 5],
expected_var=[[0] * 5, [0, 0, 0, 1, 0], [0] * 5],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 3, 5], tf.float32)]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=5,
expected_mean=3,
expected_var=2,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 3, 4, 6])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=5,
expected_mean=3,
expected_var=2,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 2, 4, 5])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=[[[2., 1.], [0., 0.], [1., 1.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_mean=[[[3., 2.], [0., 0.], [3., 4.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_var=[[[4., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 2, 4, 6])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=[[[2., 1.], [0., 0.], [1., 1.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_mean=[[[3., 2.], [0., 0.], [3., 4.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_var=[[[4., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
]))
def test_reduce_batch_count_mean_and_var(
self, x, input_signature, expected_count, expected_mean, expected_var,
reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count_mean_and_var(x):
result = tf_utils.reduce_batch_count_mean_and_var(
x, reduce_instance_dims=reduce_instance_dims)
# Verify that the output shapes are maintained.
# TODO(b/178189903): This will fail if _dense_shape_default isn't set in
# reduce_batch_count.
if (not isinstance(x, tf.RaggedTensor) and not reduce_instance_dims and
x.get_shape().ndims):
for tensor in result:
self.assertEqual(x.get_shape()[1:].as_list(),
tensor.get_shape().as_list())
return result
count, mean, var = _reduce_batch_count_mean_and_var(x)
self.assertAllEqual(expected_count, count)
self.assertAllEqual(expected_mean, mean)
self.assertAllEqual(expected_var, var)
@test_case.named_parameters([
dict(
testcase_name='num_samples_1',
num_samples=1,
dtype=tf.float32,
expected_counts=np.array([1, 0, 0, 0], np.float32),
expected_factors=np.array([[1.0], [0.0], [0.0], [0.0]], np.float32)),
dict(
testcase_name='num_samples_2',
num_samples=2,
dtype=tf.float32,
expected_counts=np.array([2, 1, 0, 0], np.float32),
expected_factors=np.array(
[[1. / 2., 1. / 2.], [-1. / 2., 1. / 2.], [0., 0.], [0., 0.]],
np.float32)),
dict(
testcase_name='num_samples_3',
num_samples=3,
dtype=tf.float32,
expected_counts=np.array([3, 3, 1, 0], np.float32),
expected_factors=np.array(
[[1. / 3., 1. / 3., 1. / 3.], [-1. / 3., 0., 1. / 3.],
[1. / 3., -2. / 3., 1. / 3.], [0., 0., 0.]], np.float32)),
dict(
testcase_name='num_samples_4',
num_samples=4,
dtype=tf.float32,
expected_counts=np.array([4, 6, 4, 1], np.float32),
expected_factors=np.array(
[[1. / 4., 1. / 4., 1. / 4., 1. / 4.],
[-3. / 12., -1. / 12., 1. / 12., 3. / 12.],
[1. / 4., -1. / 4., -1. / 4., 1. / 4.],
[-1. / 4., 3. / 4., -3. / 4., 1. / 4.]], np.float32))
])
def test_num_terms_and_factors(
self, num_samples, dtype, expected_counts, expected_factors):
results = tf_utils._num_terms_and_factors(num_samples, dtype)
counts = results[0:4]
assert len(expected_counts) == len(counts), (expected_counts, counts)
for result, expected_count in zip(counts, expected_counts):
self.assertEqual(result.dtype, dtype)
self.assertAllClose(result, expected_count)
factors = results[4:]
assert len(expected_factors) == len(factors), (expected_factors, factors)
for result, expected_factor in zip(factors, expected_factors):
self.assertEqual(result.dtype, dtype)
self.assertAllClose(result, expected_factor)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[[1], [2]], [[3], [4]]],
expected_counts=np.array([4., 6., 4., 1.], np.float32),
expected_moments=np.array([2.5, 10.0 / 12.0, 0.0, 0.0],
np.float32),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_large',
x=[2.0, 3.0, 4.0, 2.4, 5.5, 1.2, 5.4, 2.2, 7.1, 1.3, 1.5],
expected_counts=np.array(
[11, 11 * 10 // 2, 11 * 10 * 9 // 6, 11 * 10 * 9 * 8 // 24],
np.float32),
expected_moments=np.array([
3.2363636363636363, 1.141818181818182, 0.31272727272727263,
0.026666666666666616
], np.float32),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_very_large',
x=-np.log(1.0 - np.arange(0, 1, 1e-6, dtype=np.float32)),
expected_counts=np.array([
1000000, 499999500000.0, 1.66666166667e+17,
4.1666416667125e+22
], np.float32),
expected_moments=np.array([
0.99999217330, 0.4999936732947, 0.166660839941,
0.0833278399134
], np.float32),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise',
x=[[[1], [2]], [[3], [4]]],
expected_counts=np.array(
[[[2], [2]], [[1], [1]], [[0], [0]], [[0], [0]]], np.float32),
expected_moments=np.array([[[2.0], [3.0]], [[1.0], [1.0]],
[[0.0], [0.0]], [[0.0], [0.0]]],
np.float32),
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [2, 0], [2, 2]],
values=[1., 2., 3., 4.],
dense_shape=[3, 4]),
expected_counts=np.array([4, 6, 4, 1], np.float32),
expected_moments=np.array([2.5, 10.0 / 12.0, 0.0, 0.0],
np.float32),
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4], tf.float32)]),
dict(
testcase_name='sparse_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [2, 0, 0], [2, 2, 0],
[3, 3, 0]],
values=[1., 2., 3., 4., 5.],
dense_shape=[3, 5, 1]),
expected_counts=np.array(
[[[2], [0], [2], [1], [0]], [[1], [0], [1], [0], [0]],
[[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]]],
np.float32),
expected_moments=np.array([[[2.0], [0.0], [3.0], [5.0], [0.0]],
[[1.0], [0.0], [1.0], [0.0], [0.0]],
[[0.0], [0.0], [0.0], [0.0], [0.0]],
[[0.0], [0.0], [0.0], [0.0], [0.0]]],
np.float32),
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 5, 1], tf.float32)]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_counts=np.array([5., 10., 10., 5.], np.float32),
expected_moments=np.array([3., 1., 0., 0.], np.float32),
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
]))
def test_reduce_batch_count_l_moments(
self, x, input_signature, expected_counts, expected_moments,
reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count_l_moments(x):
result = tf_utils.reduce_batch_count_l_moments(
x, reduce_instance_dims=reduce_instance_dims)
for tensor in result:
if not reduce_instance_dims and x.get_shape().ndims:
self.assertEqual(x.get_shape()[1:].as_list(),
tensor.get_shape().as_list())
return result
count_and_moments = _reduce_batch_count_l_moments(x)
counts = count_and_moments[0::2]
moments = count_and_moments[1::2]
for i in range(0, 4):
self.assertEqual(counts[i].dtype, expected_counts[i].dtype)
self.assertAllClose(counts[i], expected_counts[i], rtol=1e-8)
self.assertEqual(moments[i].dtype, expected_moments[i].dtype)
self.assertAllClose(moments[i], expected_moments[i], rtol=1e-8)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[1], [2], [3], [4], [4]],
key=['a', 'a', 'a', 'b', 'a'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4., 1.],
expected_mean=[2.5, 4.],
expected_var=[1.25, 0.],
reduce_instance_dims=True,
input_signature=[
tf.TensorSpec([None, 1], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='dense_with_nans',
x=[[1], [2], [3], [4], [4], [np.nan], [np.nan]],
key=['a', 'a', 'a', 'b', 'a', 'a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4., 1.],
expected_mean=[2.5, 4.],
expected_var=[1.25, 0.],
reduce_instance_dims=True,
input_signature=[
tf.TensorSpec([None, 1], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='dense_elementwise',
x=[[1, 2], [3, 4], [1, 2]],
key=['a', 'a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[[2., 2.], [1., 1.]],
expected_mean=[[2., 3.], [1., 2.]],
expected_var=[[1., 1.], [0., 0.]],
reduce_instance_dims=False,
input_signature=[
tf.TensorSpec([None, 2], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='dense_elementwise_with_nans',
x=[[1, 2], [3, 4], [1, 2], [np.nan, np.nan]],
key=['a', 'a', 'b', 'a'],
expected_key_vocab=[b'a', b'b'],
expected_count=[[2., 2.], [1., 1.]],
expected_mean=[[2., 3.], [1., 2.]],
expected_var=[[1., 1.], [0., 0.]],
reduce_instance_dims=False,
input_signature=[
tf.TensorSpec([None, 2], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 3]],
values=[1., 2., 3., 4., 4.],
dense_shape=[3, 4]),
key=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 3]],
values=['a', 'a', 'a', 'a', 'b'],
dense_shape=[3, 4]),
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[2.5, 4],
expected_var=[1.25, 0],
reduce_instance_dims=True,
input_signature=[
tf.SparseTensorSpec([None, 4], tf.float32),
tf.SparseTensorSpec([None, 4], tf.string)
]),
dict(
testcase_name='sparse_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 2], [2, 3]],
values=[1., 2., 3., 4., np.nan, 4.],
dense_shape=[3, 4]),
key=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 2], [2, 3]],
values=['a', 'a', 'a', 'a', 'a', 'b'],
dense_shape=[3, 4]),
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[2.5, 4],
expected_var=[1.25, 0],
reduce_instance_dims=True,
input_signature=[
tf.SparseTensorSpec([None, 4], tf.float32),
tf.SparseTensorSpec([None, 4], tf.string)
]),
dict(
testcase_name='sparse_x_dense_key',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 3]],
values=[1., 2., 3., 4., 4.],
dense_shape=[3, 4]),
key=['a', 'a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[2.5, 4],
expected_var=[1.25, 0],
reduce_instance_dims=True,
input_signature=[
tf.SparseTensorSpec([None, 4], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
key=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array(['a', 'a', 'b', 'a', 'b']),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_key_vocab=[b'a', b'b'],
expected_count=[3, 2],
expected_mean=[3, 4],
expected_var=[np.float32(0.666667), 1.],
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32),
tf.RaggedTensorSpec([None, None, None, None], tf.string)
]),
dict(
testcase_name='ragged_x_dense_key',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
key=['a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[3, 5],
expected_var=[.5, 0.],
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([2, None, None, None], tf.float32),
tf.TensorSpec([2], tf.string)
]),
dict(
testcase_name='ragged_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 3, 4, 6])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
key=['a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[3, 5],
expected_var=[.5, 0.],
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([2, None, None, None], tf.float32),
tf.TensorSpec([2], tf.string)
]),
]))
def test_reduce_batch_count_mean_and_var_per_key(
self, x, key, input_signature, expected_key_vocab, expected_count,
expected_mean, expected_var, reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count_mean_and_var_per_key(x, key):
return tf_utils.reduce_batch_count_mean_and_var_per_key(
x, key, reduce_instance_dims=reduce_instance_dims)
key_vocab, count, mean, var = _reduce_batch_count_mean_and_var_per_key(
x, key)
self.assertAllEqual(key_vocab, expected_key_vocab)
self.assertAllEqual(count, expected_count)
self.assertAllEqual(mean, expected_mean)
self.assertAllEqual(var, expected_var)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2]],
values=[3, 2, -1],
dense_shape=[1, 5]),
expected_x_minus_min=1,
expected_x_max=3,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, None], tf.int64)]),
dict(
testcase_name='float',
x=[[1, 5, 2]],
expected_x_minus_min=-1,
expected_x_max=5,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec([None, None], tf.float32)]),
dict(
testcase_name='sparse_float_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [1, 0]],
values=[3, 2, -1],
dense_shape=[2, 3]),
expected_x_minus_min=[1, -2, np.nan],
expected_x_max=[3, 2, np.nan],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, None], tf.float32)]),
dict(
testcase_name='float_elementwise',
x=[[1, 5, 2], [2, 3, 4]],
reduce_instance_dims=False,
expected_x_minus_min=[-1, -3, -2],
expected_x_max=[2, 5, 4],
input_signature=[tf.TensorSpec([None, None], tf.float32)]),
dict(
testcase_name='sparse_int64_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [1, 0]],
values=[3, 2, -1],
dense_shape=[2, 3]),
reduce_instance_dims=False,
expected_x_minus_min=[1, -2, tf.int64.min + 1],
expected_x_max=[3, 2, tf.int64.min + 1],
input_signature=[tf.SparseTensorSpec([None, None], tf.int64)]),
dict(
testcase_name='sparse_int32_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [1, 0]],
values=[3, 2, -1],
dense_shape=[2, 3]),
reduce_instance_dims=False,
expected_x_minus_min=[1, -2, tf.int32.min + 1],
expected_x_max=[3, 2, tf.int32.min + 1],
input_signature=[tf.SparseTensorSpec([None, None], tf.int32)]),
dict(
testcase_name='sparse_float64_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [1, 0]],
values=[3, 2, -1],
dense_shape=[2, 3]),
reduce_instance_dims=False,
expected_x_minus_min=[1, -2, np.nan],
expected_x_max=[3, 2, np.nan],
input_signature=[tf.SparseTensorSpec([None, None], tf.float64)]),
dict(
testcase_name='sparse_float32_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [1, 0]],
values=[3, 2, -1],
dense_shape=[2, 3]),
reduce_instance_dims=False,
expected_x_minus_min=[1, -2, np.nan],
expected_x_max=[3, 2, np.nan],
input_signature=[tf.SparseTensorSpec([None, None], tf.float32)]),
dict(
testcase_name='sparse_3d_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 0, 1], [1, 0, 1]],
values=[3, 2, -1],
dense_shape=[2, 3, 3]),
reduce_instance_dims=False,
expected_x_minus_min=[[-3, 1, np.nan], [np.nan] * 3,
[np.nan] * 3],
expected_x_max=[[3, 2, np.nan], [np.nan] * 3, [np.nan] * 3],
input_signature=[
tf.SparseTensorSpec([None, None, None], tf.float32)
]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 5])),
row_splits=np.array([0, 2, 3])),
reduce_instance_dims=True,
expected_x_minus_min=-1.,
expected_x_max=5.,
input_signature=[
tf.RaggedTensorSpec([2, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 2, 4, 5])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
reduce_instance_dims=False,
expected_x_minus_min=[[[-1.0, -2.0], [np.nan, np.nan],
[-3.0, -4.0]],
[[np.nan, np.nan], [np.nan, np.nan],
[np.nan, np.nan]]],
expected_x_max=[[[5.0, 2.0], [np.nan, np.nan], [3.0, 4.0]],
[[np.nan, np.nan], [np.nan, np.nan],
[np.nan, np.nan]]],
input_signature=[
tf.RaggedTensorSpec([2, None, None, None], tf.float32)
]),
dict(
testcase_name='all_nans',
x=[[np.nan, np.nan, np.nan]],
# Output of `tf.reduce_max` if all inputs are NaNs for older
# versions of TF is -inf.
expected_x_minus_min=(-np.inf if version.parse(tf.__version__) <
version.parse('2.4') else np.nan),
expected_x_max=(-np.inf if version.parse(tf.__version__) <
version.parse('2.4') else np.nan),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec([None, None], tf.float32)]),
dict(
testcase_name='empty_batch',
x=[[]],
expected_x_minus_min=-np.inf,
expected_x_max=-np.inf,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec([None, None], tf.float32)]),
]))
def test_reduce_batch_minus_min_and_max(
self, x, expected_x_minus_min, expected_x_max, reduce_instance_dims,
input_signature, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_minus_min_and_max(x):
result = tf_utils.reduce_batch_minus_min_and_max(
x, reduce_instance_dims=reduce_instance_dims)
# Verify that the output shapes are maintained.
if (not reduce_instance_dims and not isinstance(x, tf.RaggedTensor)):
for tensor in result:
self.assertEqual(x.get_shape()[1:].as_list(),
tensor.get_shape().as_list())
return result
x_minus_min, x_max = _reduce_batch_minus_min_and_max(x)
self.assertAllEqual(x_minus_min, expected_x_minus_min)
self.assertAllEqual(x_max, expected_x_max)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 1], [2, 2], [3, 1]],
values=[3, 2, -1, 3],
dense_shape=[4, 5]),
key=['<KEY>'],
expected_key_vocab=[b'a', b'b'],
expected_x_minus_min=[1, -3],
expected_x_max=[3, 3],
input_signature=[
tf.SparseTensorSpec([None, None], tf.int64),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='float',
x=[[1], [5], [2], [3]],
key=['<KEY>'],
expected_key_vocab=[b'a', b'b'],
expected_x_minus_min=[-1, -3],
expected_x_max=[5, 3],
input_signature=[
tf.TensorSpec([None, None], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='float3dims',
x=[[[1, 5], [1, 1]], [[5, 1], [5, 5]], [[2, 2], [2, 5]],
[[3, -3], [3, 3]]],
key=['<KEY>'],
expected_key_vocab=[b'a', b'b'],
expected_x_minus_min=[-1, 3],
expected_x_max=[5, 3],
input_signature=[
tf.TensorSpec([None, None, None], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
key=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array(['a', 'a', 'b', 'a', 'b']),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_key_vocab=[b'a', b'b'],
expected_x_minus_min=[-2., -3.],
expected_x_max=[4., 5.],
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32),
tf.RaggedTensorSpec([None, None, None, None], tf.string)
]),
]))
def test_reduce_batch_minus_min_and_max_per_key(
self, x, key, expected_key_vocab, expected_x_minus_min, expected_x_max,
input_signature, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_minus_min_and_max_per_key(x, key):
return tf_utils.reduce_batch_minus_min_and_max_per_key(x, key)
key_vocab, x_minus_min, x_max = _reduce_batch_minus_min_and_max_per_key(
x, key)
self.assertAllEqual(key_vocab, expected_key_vocab)
self.assertAllEqual(x_minus_min, expected_x_minus_min)
self.assertAllEqual(x_max, expected_x_max)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
key=['a', 'a', 'a', 'b'],
spec=tf.TensorSpec([None], tf.string),
expected_key_vocab=[b'a', b'b'],
expected_count=[3, 1]),
dict(
testcase_name='sparse',
key=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 1], [2, 2], [3, 1]],
values=[3, 2, -1, 3],
dense_shape=[4, 5]),
spec=tf.SparseTensorSpec([4, 5], tf.int64),
expected_key_vocab=[b'3', b'2', b'-1'],
expected_count=[2, 1, 1]),
dict(
testcase_name='ragged',
key=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 2, 4])),
row_splits=np.array([0, 2])),
spec=tf.RaggedTensorSpec([1, None, None], tf.float32),
expected_key_vocab=[b'1.200000', b'1.000000'],
expected_count=[2, 2]),
]))
def test_reduce_batch_count_per_key(self, key, spec, expected_key_vocab,
expected_count, function_handler):
@function_handler(input_signature=[spec])
def _reduce_batch_count_per_key(key):
return tf_utils.reduce_batch_count_per_key(key)
key_vocab, key_counts = _reduce_batch_count_per_key(key)
self.assertAllEqual(key_vocab, expected_key_vocab)
self.assertAllEqual(key_counts, expected_count)
@test_case.named_parameters(test_case.cross_with_function_handlers([
dict(
testcase_name='full',
bucket_vocab=['1', '2', '0'],
counts=[3, 1, 4],
boundary_size=3,
expected_counts=[4, 3, 1]),
dict(
testcase_name='missing',
bucket_vocab=['1', '3', '0'],
counts=[3, 1, 4],
boundary_size=5,
expected_counts=[4, 3, 0, 1, 0]),
]))
def test_reorder_histogram(
self, bucket_vocab, counts, boundary_size,
expected_counts, function_handler):
input_signature = [tf.TensorSpec([None], tf.string),
tf.TensorSpec([None], tf.int64),
tf.TensorSpec([], tf.int32)]
@function_handler(input_signature=input_signature)
def _reorder_histogram(bucket_vocab, counts, boundary_size):
return tf_utils.reorder_histogram(bucket_vocab, counts, boundary_size)
counts = _reorder_histogram(bucket_vocab, counts, boundary_size)
self.assertAllEqual(counts, expected_counts)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='simple',
x=[0.0, 2.0, 3.5, 4.0],
x_spec=tf.TensorSpec([None], tf.float32),
boundaries=[[1.0, 2.0, 3.0, 3.9]],
boundaries_spec=tf.TensorSpec([1, None], tf.float32),
side=tf_utils.Side.LEFT,
expected_buckets=[0, 1, 3, 3]),
dict(
testcase_name='simple_right',
x=[0.0, 2.0, 3.5, 4.0],
x_spec=tf.TensorSpec([None], tf.float32),
boundaries=[1.0, 2.0, 3.0, 3.9],
boundaries_spec=tf.TensorSpec([None], tf.float32),
side=tf_utils.Side.RIGHT,
expected_buckets=[0, 2, 3, 4]),
dict(
testcase_name='2dim',
x=[[0.0, 4.0, 3.5, 2.0, 1.7]],
x_spec=tf.TensorSpec([1, None], tf.float32),
boundaries=[[1.0, 2.0, 3.0, 5.0]],
boundaries_spec=tf.TensorSpec([1, None], tf.float32),
side=tf_utils.Side.LEFT,
expected_buckets=[[0, 3, 3, 1, 1]]),
dict(
testcase_name='large_buckets',
x=[[50_000_000]],
x_spec=tf.TensorSpec([1, None], tf.int64),
boundaries=[0, 50_000_001, 100_000_001],
boundaries_spec=tf.TensorSpec([None], tf.int64),
side=tf_utils.Side.RIGHT,
expected_buckets=[[1]]),
]))
def test_assign_buckets(self, x, x_spec, boundaries, boundaries_spec, side,
expected_buckets, function_handler):
@function_handler(input_signature=[x_spec, boundaries_spec])
def _assign_buckets(x, boundaries):
return tf_utils.assign_buckets(x, boundaries, side)
buckets = _assign_buckets(x, boundaries)
self.assertAllEqual(buckets, expected_buckets)
def test_sparse_indices(self):
exception_cls = tf.errors.InvalidArgumentError
error_string = 'Condition x == y did not hold element-wise:'
value = tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 1], [2, 2], [3, 1]],
values=[3, 2, -1, 3],
dense_shape=[4, 5])
key_value = tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 2], [2, 2], [3, 1]],
values=['a', 'a', 'a', 'b'],
dense_shape=[4, 5])
with tf.compat.v1.Graph().as_default():
x = tf.compat.v1.sparse_placeholder(tf.int64, shape=[None, None])
key = tf.compat.v1.sparse_placeholder(tf.string, shape=[None, None])
with tf.compat.v1.Session() as sess:
with self.assertRaisesRegexp(exception_cls, error_string):
sess.run(tf_utils.reduce_batch_minus_min_and_max_per_key(x, key),
feed_dict={x: value, key: key_value})
def test_convert_sparse_indices(self):
exception_cls = tf.errors.InvalidArgumentError
error_string = 'Condition x == y did not hold element-wise:'
sparse = tf.SparseTensor(
indices=[[0, 0, 0], [1, 0, 1], [2, 0, 2], [3, 0, 1]],
values=[3, 2, -1, 3],
dense_shape=[4, 2, 5])
dense = tf.constant(['a', 'b', 'c', 'd'])
x, key = tf_utils._validate_and_get_dense_value_key_inputs(sparse, sparse)
self.assertAllEqual(self.evaluate(x), sparse.values)
self.assertAllEqual(self.evaluate(key), sparse.values)
x, key = tf_utils._validate_and_get_dense_value_key_inputs(sparse, dense)
self.assertAllEqual(self.evaluate(x), sparse.values)
self.assertAllEqual(self.evaluate(key), dense)
with tf.compat.v1.Graph().as_default():
sparse1 = tf.compat.v1.sparse_placeholder(
tf.int64, shape=[None, None, None])
sparse2 = tf.compat.v1.sparse_placeholder(
tf.int64, shape=[None, None, None])
sparse_value1 = tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [1, 0, 1], [2, 0, 2], [3, 0, 1]],
values=[3, 2, -1, 3],
dense_shape=[4, 2, 5])
sparse_value2 = tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [1, 0, 2], [2, 0, 2], [3, 0, 1]],
values=[3, 2, -1, 3],
dense_shape=[4, 2, 5])
with tf.compat.v1.Session() as sess:
with self.assertRaisesRegexp(exception_cls, error_string):
sess.run(tf_utils._validate_and_get_dense_value_key_inputs(sparse1,
sparse2),
feed_dict={sparse1: sparse_value1, sparse2: sparse_value2})
def test_convert_ragged_indices(self):
exception_cls = tf.errors.InvalidArgumentError
error_string = 'Condition x == y did not hold element-wise:'
ragged = tf.RaggedTensor.from_row_splits(
values=tf.RaggedTensor.from_row_splits(
values=np.array([1.2, 1., 1.2, 1.]), row_splits=np.array([0, 2,
4])),
row_splits=np.array([0, 1, 2]))
dense = tf.constant(['a', 'b'])
dense_result = tf.constant(['a', 'a', 'b', 'b'])
x, key = tf_utils._validate_and_get_dense_value_key_inputs(ragged, ragged)
self.assertAllEqual(self.evaluate(x), ragged.flat_values)
self.assertAllEqual(self.evaluate(key), ragged.flat_values)
x, key = tf_utils._validate_and_get_dense_value_key_inputs(ragged, dense)
self.assertAllEqual(self.evaluate(x), ragged.flat_values)
self.assertAllEqual(self.evaluate(key), dense_result)
with tf.compat.v1.Graph().as_default():
ragged1 = tf.compat.v1.ragged.placeholder(tf.float32, 2)
ragged2 = tf.compat.v1.ragged.placeholder(tf.float32, 2)
ragged_value1 = tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 2, 4])),
row_splits=np.array([0, 2]))
ragged_value2 = tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 3, 4])),
row_splits=np.array([0, 2]))
with tf.compat.v1.Session() as sess:
with self.assertRaisesRegex(exception_cls, error_string):
sess.run(
tf_utils._validate_and_get_dense_value_key_inputs(
ragged1, ragged2),
feed_dict={
ragged1: ragged_value1,
ragged2: ragged_value2
})
@test_case.named_parameters(
dict(
testcase_name='dense_tensor',
key=['b', 'a', 'b'],
key_vocab=['a', 'b'],
reductions=([1, 2], [3, 4]),
x=[5, 6, 7],
expected_results=([2, 1, 2], [4, 3, 4])),
dict(
testcase_name='sparse_tensor_dense_key',
key=['b', 'a', 'b'],
key_vocab=['a', 'b'],
reductions=([1, 2], [3, 4]),
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 2], [2, 2], [2, 3]],
values=[3, 2, -1, 3],
dense_shape=[3, 5]),
expected_results=([2, 1, 2, 2], [4, 3, 4, 4])),
dict(
testcase_name='sparse_tensor_sparse_key',
key=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 2], [2, 2], [2, 3]],
values=['b', 'a', 'b', 'b'],
dense_shape=[3, 5]),
key_vocab=['a', 'b'],
reductions=([1, 2], [3, 4]),
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 2], [2, 2], [2, 3]],
values=[3, 2, -1, 3],
dense_shape=[3, 5]),
expected_results=([2, 1, 2, 2], [4, 3, 4, 4])),
dict(
testcase_name='ragged_tensor_dense_key',
key=['a', 'b', 'a'],
key_vocab=['a', 'b'],
reductions=([1, 2], [3, 4]),
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values= | np.array([1.2, 1., 1.2, 1.]) | numpy.array |
"""
Gaussian copula mutual information estimation.
| **Authors** : <NAME>
| **Original code** : https://github.com/robince/gcmi
| **Reference** :
| RAA Ince, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>
"A statistical framework for neuroimaging data analysis based on mutual
information estimated via a Gaussian copula" Human Brain Mapping (2017)
38 p. 1541-1573 doi:10.1002/hbm.23471
"""
import numpy as np
import scipy as sp
from frites.core import copnorm_nd, copnorm_cat_nd
def ent_1d_g(x, biascorrect=True):
"""Entropy of a Gaussian variable in bits.
H = ent_g(x) returns the entropy of a (possibly multidimensional) Gaussian
variable x with bias correction.
Parameters
----------
x : array_like
Array of data of shape (n_epochs,)
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
Returns
-------
hx : float
Entropy of the gaussian variable (in bits)
"""
x = np.atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be at most 2d") # noqa
nvarx, ntrl = x.shape
# demean data
x = x - x.mean(axis=1)[:, np.newaxis]
# covariance
c = np.dot(x, x.T) / float(ntrl - 1)
chc = np.linalg.cholesky(c)
# entropy in nats
hx = np.sum(np.log(np.diagonal(chc))) + .5 * nvarx * (
np.log(2 * np.pi) + 1.)
ln2 = np.log(2)
if biascorrect:
psiterms = sp.special.psi((ntrl - np.arange(1, nvarx + 1).astype(
np.float)) / 2.) / 2.
dterm = (ln2 - np.log(ntrl - 1.)) / 2.
hx = hx - nvarx * dterm - psiterms.sum()
# convert to bits
return hx / ln2
def mi_1d_gg(x, y, biascorrect=True, demeaned=False):
"""Mutual information (MI) between two Gaussian variables in bits.
I = mi_gg(x,y) returns the MI between two (possibly multidimensional)
Gaussian variables, x and y, with bias correction.
Parameters
----------
x, y : array_like
Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs)
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
Returns
-------
i : float
Information shared by x and y (in bits)
"""
x, y = np.atleast_2d(x), np.atleast_2d(y)
if (x.ndim > 2) or (y.ndim > 2):
raise ValueError("x and y must be at most 2d")
nvarx, ntrl = x.shape
nvary = y.shape[0]
nvarxy = nvarx + nvary
if y.shape[1] != ntrl:
raise ValueError("number of trials do not match")
# joint variable
xy = np.vstack((x, y))
if not demeaned:
xy = xy - xy.mean(axis=1)[:, np.newaxis]
cxy = np.dot(xy, xy.T) / float(ntrl - 1)
# submatrices of joint covariance
cx = cxy[:nvarx, :nvarx]
cy = cxy[nvarx:, nvarx:]
chcxy = np.linalg.cholesky(cxy)
chcx = np.linalg.cholesky(cx)
chcy = np.linalg.cholesky(cy)
# entropies in nats
# normalizations cancel for mutual information
hx = np.sum(np.log(np.diagonal(chcx)))
hy = np.sum(np.log(np.diagonal(chcy)))
hxy = np.sum(np.log(np.diagonal(chcxy)))
ln2 = np.log(2)
if biascorrect:
psiterms = sp.special.psi(
(ntrl - np.arange(1, nvarxy + 1)).astype(np.float) / 2.) / 2.
dterm = (ln2 - np.log(ntrl - 1.)) / 2.
hx = hx - nvarx * dterm - psiterms[:nvarx].sum()
hy = hy - nvary * dterm - psiterms[:nvary].sum()
hxy = hxy - nvarxy * dterm - psiterms[:nvarxy].sum()
# MI in bits
i = (hx + hy - hxy) / ln2
return i
def gcmi_1d_cc(x, y):
"""Gaussian-Copula MI between two continuous variables.
I = gcmi_cc(x,y) returns the MI between two (possibly multidimensional)
continuous variables, x and y, estimated via a Gaussian copula.
Parameters
----------
x, y : array_like
Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs)
Returns
-------
i : float
Information shared by x and y (in bits)
"""
x, y = np.atleast_2d(x), np.atleast_2d(y)
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y must be at most 2d")
nvarx, ntrl = x.shape
if y.shape[1] != ntrl:
raise ValueError("number of trials do not match")
# copula normalization
cx, cy = copnorm_nd(x, axis=1), copnorm_nd(y, axis=1)
# parametric Gaussian MI
return mi_1d_gg(cx, cy, True, True)
def mi_model_1d_gd(x, y, biascorrect=True, demeaned=False):
"""Mutual information between a Gaussian and a discrete variable in bits.
This method is based on ANOVA style model comparison.
I = mi_model_gd(x,y) returns the MI between the (possibly multidimensional)
Gaussian variable x and the discrete variable y.
Parameters
----------
x, y : array_like
Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y
must be an array of integers
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
Returns
-------
i : float
Information shared by x and y (in bits)
"""
x, y = np.atleast_2d(x), np.squeeze(y)
if x.ndim > 2:
raise ValueError("x must be at most 2d")
if y.ndim > 1:
raise ValueError("only univariate discrete variables supported")
if not np.issubdtype(y.dtype, np.integer):
raise ValueError("y should be an integer array")
nvarx, ntrl = x.shape
ym = np.unique(y)
if y.size != ntrl:
raise ValueError("number of trials do not match")
if not demeaned:
x = x - x.mean(axis=1)[:, np.newaxis]
# class-conditional entropies
ntrl_y = np.zeros(len(ym))
hcond = np.zeros(len(ym))
for n_yi, yi in enumerate(ym):
idx = y == yi
xm = x[:, idx]
ntrl_y[n_yi] = xm.shape[1]
xm = xm - xm.mean(axis=1)[:, np.newaxis]
cm = np.dot(xm, xm.T) / float(ntrl_y[n_yi] - 1)
chcm = np.linalg.cholesky(cm)
hcond[n_yi] = np.sum(np.log(np.diagonal(chcm)))
# class weights
w = ntrl_y / float(ntrl)
# unconditional entropy from unconditional Gaussian fit
cx = np.dot(x, x.T) / float(ntrl - 1)
chc = np.linalg.cholesky(cx)
hunc = np.sum(np.log(np.diagonal(chc))) # + c*nvarx
ln2 = np.log(2)
if biascorrect:
vars = np.arange(1, nvarx + 1)
psiterms = sp.special.psi((ntrl - vars).astype(np.float) / 2.) / 2.
dterm = (ln2 - np.log(float(ntrl - 1))) / 2.
hunc = hunc - nvarx * dterm - psiterms.sum()
dterm = (ln2 - np.log((ntrl_y - 1).astype(np.float))) / 2.0
psiterms = np.zeros(len(ym))
for vi in vars:
idx = ntrl_y - vi
psiterms = psiterms + sp.special.psi(idx.astype(np.float) / 2.)
hcond = hcond - nvarx * dterm - (psiterms / 2.)
# MI in bits
i = (hunc - np.sum(w * hcond)) / ln2
return i
def gcmi_model_1d_cd(x, y):
"""Gaussian-Copula MI between a continuous and a discrete variable.
This method is based on ANOVA style model comparison.
I = gcmi_model_cd(x,y,Ym) returns the MI between the (possibly
multidimensional) continuous variable x and the discrete variable y.
Parameters
----------
x, y : array_like
Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y
must be an array of integers
Returns
-------
i : float
Information shared by x and y (in bits)
"""
x, y = np.atleast_2d(x), np.squeeze(y)
if x.ndim > 2:
raise ValueError("x must be at most 2d")
if y.ndim > 1:
raise ValueError("only univariate discrete variables supported")
if not np.issubdtype(y.dtype, np.integer):
raise ValueError("y should be an integer array")
nvarx, ntrl = x.shape
if y.size != ntrl:
raise ValueError("number of trials do not match")
# copula normalization
cx = copnorm_nd(x, axis=1)
# parametric Gaussian MI
return mi_model_1d_gd(cx, y, True, True)
def mi_mixture_1d_gd(x, y):
"""Mutual information between a Gaussian and a discrete variable in bits.
This method evaluate MI from a Gaussian mixture.
I = mi_mixture_gd(x,y) returns the MI between the (possibly
multidimensional)
Parameters
----------
x, y : array_like
Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y
must be an array of integers
Returns
-------
i : float
Information shared by x and y (in bits)
"""
x, y = np.atleast_2d(x), np.squeeze(y)
if x.ndim > 2:
raise ValueError("x must be at most 2d")
if y.ndim > 1:
raise ValueError("only univariate discrete variables supported")
if not np.issubdtype(y.dtype, np.integer):
raise ValueError("y should be an integer array")
nvarx, ntrl = x.shape
ym = np.unique(y)
if y.size != ntrl:
raise ValueError("number of trials do not match")
# class-conditional entropies
ntrl_y = np.zeros((len(ym),))
hcond = np.zeros((len(ym),))
m = np.zeros((len(ym), nvarx))
w = np.zeros((len(ym),))
cc = .5 * ( | np.log(2. * np.pi) | numpy.log |
"""
The pymaf submodule is designed for working with MAF files. It implements
``pymaf.MafFrame`` which stores MAF data as ``pandas.DataFrame`` to allow
fast computation and easy manipulation. The ``pymaf.MafFrame`` class also
contains many useful plotting methods such as ``MafFrame.plot_oncoplot`` and
``MafFrame.plot_summary``. The submodule strictly adheres to the
standard `MAF specification
<https://docs.gdc.cancer.gov/Data/File_Formats/MAF_Format/>`_.
A typical MAF file contains many columns ranging from gene symbol to
protein change. However, most of the analysis in pymaf uses the
following columns:
+-----+------------------------+----------------------+-------------------------------+
| No. | Name | Description | Examples |
+=====+========================+======================+===============================+
| 1 | Hugo_Symbol | HUGO gene symbol | 'TP53', 'Unknown' |
+-----+------------------------+----------------------+-------------------------------+
| 2 | Chromosome | Chromosome name | 'chr1', '1', 'X' |
+-----+------------------------+----------------------+-------------------------------+
| 3 | Start_Position | Start coordinate | 119031351 |
+-----+------------------------+----------------------+-------------------------------+
| 4 | End_Position | End coordinate | 44079555 |
+-----+------------------------+----------------------+-------------------------------+
| 5 | Variant_Classification | Translational effect | 'Missense_Mutation', 'Silent' |
+-----+------------------------+----------------------+-------------------------------+
| 6 | Variant_Type | Mutation type | 'SNP', 'INS', 'DEL' |
+-----+------------------------+----------------------+-------------------------------+
| 7 | Reference_Allele | Reference allele | 'T', '-', 'ACAA' |
+-----+------------------------+----------------------+-------------------------------+
| 8 | Tumor_Seq_Allele1 | First tumor allele | 'A', '-', 'TCA' |
+-----+------------------------+----------------------+-------------------------------+
| 9 | Tumor_Seq_Allele2 | Second tumor allele | 'A', '-', 'TCA' |
+-----+------------------------+----------------------+-------------------------------+
| 10 | Tumor_Sample_Barcode | Sample ID | 'TCGA-AB-3002' |
+-----+------------------------+----------------------+-------------------------------+
| 11 | Protein_Change | Protein change | 'p.L558Q' |
+-----+------------------------+----------------------+-------------------------------+
It is also recommended to include additional custom columns such as variant
allele frequecy (VAF) and transcript name.
If sample annotation data are available for a given MAF file, use
the :class:`common.AnnFrame` class to import the data.
There are nine nonsynonymous variant classifcations that pymaf primarily
uses: Missense_Mutation, Frame_Shift_Del, Frame_Shift_Ins, In_Frame_Del,
In_Frame_Ins, Nonsense_Mutation, Nonstop_Mutation, Splice_Site, and
Translation_Start_Site.
"""
import re
import copy
import warnings
import itertools
from . import pyvcf, common
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from matplotlib_venn import venn2, venn3
from scipy.stats import fisher_exact
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.gridspec as gridspec
CHROM_LENGTHS = {
'hg18': [
247249719, 242951149, 199501827, 191273063, 180857866, 170899992,
158821424, 146274826, 140273252, 135374737, 134452384, 132349534,
114142980, 106368585, 100338915, 88827254, 78774742, 76117153,
63811651, 62435964, 46944323, 49691432, 154913754, 57772954
],
'hg19': [
249250621, 243199373, 198022430, 191154276, 180915260, 171115067,
159138663, 146364022, 141213431, 135534747, 135006516, 133851895,
115169878, 107349540, 102531392, 90354753, 81195210, 78077248,
59128983, 63025520, 48129895, 51304566, 155270560, 59373566
],
'hg38': [
248956422, 242193529, 198295559, 190214555, 181538259, 170805979,
159345973, 145138636, 138394717, 133797422, 135086622, 133275309,
114364328, 107043718, 101991189, 90338345, 83257441, 80373285,
58617616, 64444167, 46709983, 50818468, 156040895, 57227415
],
}
COMMON_COLUMNS = [
'Hugo_Symbol', 'Entrez_Gene_Id', 'Center', 'NCBI_Build', 'Chromosome',
'Start_Position', 'End_Position', 'Strand', 'Variant_Classification',
'Variant_Type', 'Reference_Allele', 'Tumor_Seq_Allele1',
'Tumor_Seq_Allele2', 'Tumor_Sample_Barcode', 'Protein_Change'
]
# Below is the list of calculated variant consequences from Ensembl VEP:
# https://m.ensembl.org/info/genome/variation/prediction/predicted_data.html
# (accessed on 2021-05-31)
#
# Note that both frameshift_variant and protein_altering_variant require
# additional information to find their correct Variant_Classification.
VEP_CONSEQUENCES = {
'transcript_ablation': 'Splice_Site',
'splice_acceptor_variant': 'Splice_Site',
'splice_donor_variant': 'Splice_Site',
'stop_gained': 'Nonsense_Mutation',
'frameshift_variant': 'AMBIGUOUS',
'stop_lost': 'Nonstop_Mutation',
'start_lost': 'Translation_Start_Site',
'transcript_amplification': 'Intron',
'inframe_insertion': 'In_Frame_Ins',
'inframe_deletion': 'In_Frame_Del',
'missense_variant': 'Missense_Mutation',
'protein_altering_variant': 'AMBIGUOUS',
'splice_region_variant': 'Splice_Region',
'incomplete_terminal_codon_variant': 'Silent',
'start_retained_variant': 'Silent',
'stop_retained_variant': 'Silent',
'synonymous_variant': 'Silent',
'coding_sequence_variant': 'Missense_Mutation',
'mature_miRNA_variant': 'RNA',
'5_prime_UTR_variant': "5'UTR",
'3_prime_UTR_variant': "3'UTR",
'non_coding_transcript_exon_variant': 'RNA',
'intron_variant': 'Intron',
'NMD_transcript_variant': 'Silent',
'non_coding_transcript_variant': 'RNA',
'upstream_gene_variant': "5'Flank",
'downstream_gene_variant': "3'Flank",
'TFBS_ablation': 'Targeted_Region',
'TFBS_amplification': 'Targeted_Region',
'TF_binding_site_variant': 'IGR',
'regulatory_region_ablation': 'Targeted_Region',
'regulatory_region_amplification': 'Targeted_Region',
'feature_elongation': 'Targeted_Region',
'regulatory_region_variant': 'IGR',
'feature_truncation': 'Targeted_Region',
'intergenic_variant': 'IGR',
}
VARCLS_LIST = [
"3'Flank",
"3'UTR",
"5'Flank",
"5'UTR",
'De_novo_Start_InFrame',
'De_novo_Start_OutOfFrame',
'Frame_Shift_Del',
'Frame_Shift_Ins',
'IGR',
'In_Frame_Del',
'In_Frame_Ins',
'Intron',
'Missense_Mutation',
'Nonsense_Mutation',
'Nonstop_Mutation',
'RNA',
'Silent',
'Splice_Region',
'Splice_Site',
'Start_Codon_Ins',
'Start_Codon_SNP',
'Stop_Codon_Del',
'Targeted_Region',
'Translation_Start_Site',
'lincRNA',
]
NONSYN_NAMES = [
'Missense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins',
'In_Frame_Del', 'In_Frame_Ins', 'Nonsense_Mutation',
'Nonstop_Mutation', 'Splice_Site', 'Translation_Start_Site'
]
NONSYN_COLORS = [
'tab:green', 'tab:blue', 'tab:purple', 'tab:olive', 'tab:red',
'tab:cyan', 'tab:pink', 'tab:orange', 'tab:brown'
]
SNV_CLASSES = {
'A>C': {'class': 'T>G', 'type': 'Tv'},
'A>G': {'class': 'T>C', 'type': 'Ti'},
'A>T': {'class': 'T>A', 'type': 'Tv'},
'C>A': {'class': 'C>A', 'type': 'Tv'},
'C>G': {'class': 'C>G', 'type': 'Tv'},
'C>T': {'class': 'C>T', 'type': 'Ti'},
'G>A': {'class': 'C>T', 'type': 'Ti'},
'G>C': {'class': 'C>G', 'type': 'Tv'},
'G>T': {'class': 'C>A', 'type': 'Tv'},
'T>A': {'class': 'T>A', 'type': 'Tv'},
'T>C': {'class': 'T>C', 'type': 'Ti'},
'T>G': {'class': 'T>G', 'type': 'Tv'},
}
SNV_CLASS_ORDER = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G']
class MafFrame:
"""Class for storing MAF data.
Parameters
----------
df : pandas.DataFrame
DataFrame containing MAF data.
See Also
--------
MafFrame.from_file
Construct MafFrame from a MAF file.
"""
def __init__(self, df):
self.df = df.reset_index(drop=True)
@property
def shape(self):
"""tuple : Dimensionality of MafFrame (variants, samples)."""
return (self.df.shape[0], len(self.samples))
@property
def samples(self):
"""list : List of the sample names."""
return list(self.df.Tumor_Sample_Barcode.unique())
@property
def genes(self):
"""list : List of the genes."""
return list(self.df.Hugo_Symbol.unique())
def copy(self):
"""Return a copy of the MafFrame."""
return self.__class__(self.df.copy())
def compute_clonality(self, vaf_col, threshold=0.25):
"""
Compute the clonality of variants based on
:ref:`VAF <glossary:Variant allele frequency (VAF)>`.
A mutation will be defined as "Subclonal" if the VAF is less than the
threshold percentage (e.g. 25%) of the highest VAF in the sample and
is defined as "Clonal" if it is equal to or above this threshold.
Parameters
----------
vaf_col : str
MafFrame column containing VAF data.
threshold : float
Minimum VAF to be considered as "Clonal".
Returns
-------
panda.Series
Clonality for each variant.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.df['Clonality'] = mf.compute_clonality('i_TumorVAF_WU')
>>> mf.df['Clonality'][:10]
0 Clonal
1 Clonal
2 Clonal
3 Clonal
4 Clonal
5 Clonal
6 Clonal
7 Clonal
8 Clonal
9 Clonal
Name: Clonality, dtype: object
"""
d = self.df.groupby('Tumor_Sample_Barcode')[vaf_col].max().to_dict()
def one_row(r):
m = d[r.Tumor_Sample_Barcode]
if r[vaf_col] < m * threshold:
result = 'Subclonal'
else:
result = 'Clonal'
return result
s = self.df.copy().apply(one_row, axis=1)
return s
@classmethod
def from_file(cls, fn):
"""
Construct MafFrame from a MAF file.
Parameters
----------
fn : str
MAF file (compressed or uncompressed).
Returns
-------
MafFrame
MafFrame object.
See Also
--------
MafFrame
MafFrame object creation using constructor.
Examples
--------
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
"""
# Read the input MAF file.
df = pd.read_table(fn)
# Check the letter case of column names. This will help distinguish
# missing columns from columns with incorrect letter case (e.g.
# 'End_Position' vs. 'End_position').
lower_names = [x.lower() for x in COMMON_COLUMNS]
for col in df.columns:
if col.lower() in lower_names:
i = lower_names.index(col.lower())
if col != COMMON_COLUMNS[i]:
message = (
f"Input column '{col}' will be renamed "
f"as '{COMMON_COLUMNS[i]}'."
)
warnings.warn(message)
df = df.rename(columns={col: COMMON_COLUMNS[i]})
# Set the data type of chromosomes as string (e.g. 'chr1' vs. '1').
if 'Chromosome' in df.columns:
df.Chromosome = df.Chromosome.astype(str)
return cls(df)
@classmethod
def from_vcf(cls, vcf, keys=None, names=None):
"""
Construct MafFrame from a VCF file or VcfFrame.
It is recommended that the input VCF data be functionally annotated
by an annotation tool such as Ensembl VEP, SnpEff, and ANNOVAR;
however, the method can handle unannotated VCF data as well.
The preferred tool for functional annotation is Ensembl VEP with
"RefSeq transcripts" as the transcript database and the filtering
option "Show one selected consequence per variant".
Parameters
----------
vcf : str or VcfFrame
VCF file or VcfFrame.
keys : str or list
Genotype key (e.g. 'AD', 'AF') or list of genotype keys to be
added to the MafFrame.
names : str or list
Column name or list of column names for ``keys`` (must be the
same length). By default, the genotype keys will be used as
column names.
Examples
--------
Below is a simple example:
>>> from fuc import pyvcf, pymaf
>>> data = {
... 'CHROM': ['chr1', 'chr2'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['CSQ=T|missense_variant|MODERATE|MTOR|2475|Transcript|NM_001386500.1|protein_coding|47/58||||6792|6644|2215|S/Y|tCt/tAt|rs587777894&COSV63868278&COSV63868313||-1||EntrezGene||||||||G|G||deleterious(0)|possibly_damaging(0.876)||||||||||||||||||likely_pathogenic&pathogenic|0&1&1|1&1&1|26619011&27159400&24631838&26018084&27830187|||||', 'CSQ=C|splice_donor_variant|HIGH|MTOR|2475|Transcript|NM_001386500.1|protein_coding||46/57||||||||||-1||EntrezGene||||||||A|A|||||||||||||||||||||||||||||'],
... 'FORMAT': ['GT:AD:DP:AF', 'GT:AD:DP:AF'],
... 'A': ['0/1:176,37:213:0.174', '0/1:966,98:1064:0.092']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . CSQ=T|missense_variant|MODERATE|MTOR|2475|Tran... GT:AD:DP:AF 0/1:176,37:213:0.174
1 chr2 101 . T C . . CSQ=C|splice_donor_variant|HIGH|MTOR|2475|Tran... GT:AD:DP:AF 0/1:966,98:1064:0.092
>>> mf = pymaf.MafFrame.from_vcf(vf)
>>> mf.df
Hugo_Symbol Entrez_Gene_Id Center NCBI_Build Chromosome Start_Position End_Position Strand Variant_Classification Variant_Type Reference_Allele Tumor_Seq_Allele1 Tumor_Seq_Allele2 Protein_Change Tumor_Sample_Barcode
0 MTOR 2475 . . chr1 100 100 - Missense_Mutation SNP G A A p.S2215Y A
1 MTOR 2475 . . chr2 101 101 - Splice_Site SNP T C C . A
We can add genotype keys such as AD and AF:
>>> mf = pymaf.MafFrame.from_vcf(vf, keys=['AD', 'AF'])
>>> mf.df
Hugo_Symbol Entrez_Gene_Id Center NCBI_Build Chromosome Start_Position End_Position Strand Variant_Classification Variant_Type Reference_Allele Tumor_Seq_Allele1 Tumor_Seq_Allele2 Protein_Change Tumor_Sample_Barcode AD AF
0 MTOR 2475 . . chr1 100 100 - Missense_Mutation SNP G A A p.S2215Y A 176,37 0.174
1 MTOR 2475 . . chr2 101 101 - Splice_Site SNP T C C . A 966,98 0.092
The method can accept a VCF file as input instead of VcfFrame:
>>> mf = pymaf.MafFrame.from_vcf('annotated.vcf')
The method can also handle unannotated VCF data:
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 200, 300],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'C', 'TTC'],
... 'ALT': ['A', 'CAG', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'A': ['0/1', '0/1', '0/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT 0/1
1 chr1 200 . C CAG . . . GT 0/1
2 chr1 300 . TTC T . . . GT 0/1
>>> mf = pymaf.MafFrame.from_vcf(vf)
>>> mf.df
Hugo_Symbol Entrez_Gene_Id Center NCBI_Build Chromosome Start_Position End_Position Strand Variant_Classification Variant_Type Reference_Allele Tumor_Seq_Allele1 Tumor_Seq_Allele2 Protein_Change Tumor_Sample_Barcode
0 . . . . chr1 100 100 . . SNP G A A . A
1 . . . . chr1 200 201 . . INS - AG AG . A
2 . . . . chr1 301 302 . . DEL TC - - . A
"""
# Parse the input VCF.
if isinstance(vcf, str):
vf = pyvcf.VcfFrame.from_file(vcf)
else:
vf = vcf
# Set some default values in case the VCF is not annotated.
ncbi_build = '.'
# Get the NCBI_Build data.
for line in vf.meta:
if line.startswith('##VEP'):
ncbi_build = re.search(r'assembly="(.*?)"', line).group(1)
break
# Define the conversion algorithm.
def one_row(r):
has_annot = 'CSQ=' in r.INFO
# Set some default values in case the VCF is not annotated.
strand = '.'
variant_classification = '.'
protein_change = '.'
hugo_symbol = '.'
entrez_gene_id = '.'
# Get the sequence data.
inframe = abs(len(r.REF) - len(r.ALT)) / 3 == 0
if len(r.REF) == len(r.ALT) == 1:
variant_type = 'SNP'
start_position = r.POS
end_position = r.POS
reference_allele = r.REF
tumor_seq_allele1 = r.ALT
tumor_seq_allele2 = r.ALT
elif len(r.REF) > len(r.ALT):
variant_type = 'DEL'
start_position = r.POS + 1
end_position = r.POS + len(r.REF) - len(r.ALT)
reference_allele = r.REF[1:]
tumor_seq_allele1 = '-'
tumor_seq_allele2 = '-'
else:
variant_type = 'INS'
start_position = r.POS
end_position = r.POS + 1
reference_allele = '-'
tumor_seq_allele1 = r.ALT[1:]
tumor_seq_allele2 = r.ALT[1:]
fields = r.INFO.replace('CSQ=', '').split(',')[0].split('|')
# Get the Strand data.
if has_annot:
strand = '+' if fields[19] == '1' else '-'
# Get the Variant_Classification data.
if has_annot:
consequence = fields[1].split('&')[0]
if consequence == 'frameshift_variant':
if variant_type == 'DEL':
variant_classification = 'Frame_Shift_Del'
else:
variant_classification = 'Frame_Shift_Ins'
elif consequence == 'protein_altering_variant':
if inframe:
if variant_type == 'DEL':
variant_classification = 'In_Frame_Del'
else:
variant_classification = 'In_Frame_Ins'
else:
if variant_type == 'DEL':
variant_classification = 'Frame_Shift_Del'
else:
variant_classification = 'Frame_Shift_Ins'
elif consequence in VEP_CONSEQUENCES:
variant_classification = VEP_CONSEQUENCES[consequence]
else:
m = f'Found unknown Ensembl VEP consequence: {consequence}'
raise ValueError(m)
# Get the Tumor_Sample_Barcode data.
s = r[9:].apply(pyvcf.gt_hasvar)
tumor_sample_barcode = ','.join(s[s].index.to_list())
# Get the Protein_Change data.
if has_annot:
pos = fields[14]
aa = fields[15].split('/')
if len(aa) > 1:
protein_change = f'p.{aa[0]}{pos}{aa[1]}'
# Get other data.
if has_annot:
hugo_symbol = fields[3]
entrez_gene_id = fields[4]
d = dict(
Hugo_Symbol = hugo_symbol,
Entrez_Gene_Id = entrez_gene_id,
Center = '.',
NCBI_Build = ncbi_build,
Chromosome = r.CHROM,
Start_Position = start_position,
End_Position = end_position,
Strand = strand,
Variant_Classification = variant_classification,
Variant_Type = variant_type,
Reference_Allele = reference_allele,
Tumor_Seq_Allele1 = tumor_seq_allele1,
Tumor_Seq_Allele2 = tumor_seq_allele2,
Tumor_Sample_Barcode = tumor_sample_barcode,
Protein_Change = protein_change,
CHROM = r.CHROM, # will be dropped
POS = r.POS, # will be dropped
REF = r.REF, # will be dropped
ALT = r.ALT, # will be dropped
)
return pd.Series(d)
# Apply the conversion algorithm.
df = vf.df.apply(one_row, axis=1)
# Expand the Tumor_Sample_Barcode column to multiple rows.
s = df['Tumor_Sample_Barcode'].str.split(',').apply(
pd.Series, 1).stack()
s.index = s.index.droplevel(-1)
s.name = 'Tumor_Sample_Barcode'
del df['Tumor_Sample_Barcode']
df = df.join(s)
# Append extra genotype keys, if necessary.
if keys is not None:
if names is None:
names = keys
if isinstance(keys, str):
keys = [keys]
if isinstance(names, str):
names = [names]
for i, key in enumerate(keys):
temp_df = vf.extract_format(key)
temp_df = pd.concat([vf.df.iloc[:, :9], temp_df], axis=1)
temp_df = temp_df.drop(
columns=['ID', 'QUAL', 'FILTER', 'INFO', 'FORMAT'])
temp_df = pd.melt(
temp_df,
id_vars=['CHROM', 'POS', 'REF', 'ALT'],
var_name='Tumor_Sample_Barcode',
)
temp_df = temp_df[temp_df.value != '.']
df = df.merge(temp_df,
on=['CHROM', 'POS', 'REF', 'ALT', 'Tumor_Sample_Barcode'])
df = df.rename(columns={'value': names[i]})
# Drop the extra columns.
df = df.drop(columns=['CHROM', 'POS', 'REF', 'ALT'])
return cls(df)
def matrix_prevalence(self):
"""
Compute a matrix of variant counts with a shape of (genes, samples).
Returns
-------
pandas.DataFrame
The said matrix.
"""
s = self.df.groupby(
'Hugo_Symbol')['Tumor_Sample_Barcode'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index='Hugo_Symbol',
columns='Tumor_Sample_Barcode', values='Count')
df.columns.name = ''
df = df.fillna(0)
return df
def matrix_genes(self, mode='variants', count=10):
"""
Compute a matrix of counts with a shape of (genes, variant
classifications).
This method only considers the nine nonsynonymous variant
classifications.
Parameters
----------
mode : {'variants', 'samples'}, default: 'variants'
Determines how to identify top mutated genes:
* 'variants': Count the number of observed variants.
* 'samples': Count the number of affected samples. Using this
option will create an additional variant classification called
'Multi_Hit'.
count : int, default: 10
Number of top mutated genes to include.
Returns
-------
pandas.DataFrame
The said matrix.
"""
if mode == 'variants':
df = self.df[self.df.Variant_Classification.isin(NONSYN_NAMES)]
df = df.groupby('Hugo_Symbol')[
'Variant_Classification'].value_counts().to_frame()
df.columns = ['Count']
df = df.reset_index()
df = df.pivot(index='Hugo_Symbol', columns='Variant_Classification',
values='Count')
df = df.fillna(0)
for varcls in NONSYN_NAMES:
if varcls not in df.columns:
df[varcls] = 0
i = df.sum(axis=1).sort_values(ascending=False).index
df = df.reindex(index=i)
df = df[NONSYN_NAMES]
df = df[:count]
df = df.rename_axis(None, axis=1)
elif mode == 'samples':
df = self.matrix_waterfall(count)
df = df.apply(lambda r: r.value_counts(), axis=1)
for varcls in NONSYN_NAMES + ['Multi_Hit']:
if varcls not in df.columns:
df[varcls] = np.nan
df = df[NONSYN_NAMES + ['Multi_Hit']]
df = df.fillna(0)
else:
raise ValueError(f'Found incorrect mode: {mode}')
return df
def matrix_tmb(self):
"""
Compute a matrix of variant counts with a shape of (samples, variant
classifications).
Returns
-------
pandas.DataFrame
The said matrix.
"""
df = self.df[self.df.Variant_Classification.isin(NONSYN_NAMES)]
df = df.groupby('Tumor_Sample_Barcode')[
'Variant_Classification'].value_counts().to_frame()
df.columns = ['Count']
df = df.reset_index()
df = df.pivot(index='Tumor_Sample_Barcode',
columns='Variant_Classification', values='Count')
df = df.fillna(0)
for varcls in NONSYN_NAMES:
if varcls not in df.columns:
df[varcls] = 0
i = df.sum(axis=1).sort_values(ascending=False).index
df = df.reindex(index=i)
df = df[NONSYN_NAMES]
df = df.rename_axis(None, axis=1)
return df
def matrix_waterfall(self, count=10, keep_empty=False):
"""
Compute a matrix of variant classifications with a shape of
(genes, samples).
If there are multiple variant classifications available for a given
cell, they will be replaced as 'Multi_Hit'.
Parameters
----------
count : int, default: 10
Number of top mutated genes to include.
keep_empty : bool, default: False
If True, keep samples with all ``NaN``'s.
Returns
-------
pandas.DataFrame
The said matrix.
"""
df = self.df[self.df.Variant_Classification.isin(NONSYN_NAMES)]
f = lambda x: ''.join(x) if len(x) == 1 else 'Multi_Hit'
df = df.groupby(['Hugo_Symbol', 'Tumor_Sample_Barcode'])[
'Variant_Classification'].apply(f).to_frame()
df = df.reset_index()
df = df.pivot(index='Hugo_Symbol', columns='Tumor_Sample_Barcode',
values='Variant_Classification')
# Sort the rows (genes).
i = df.isnull().sum(axis=1).sort_values(ascending=True).index
df = df.reindex(index=i)
# Select the top mutated genes.
df = df[:count]
# Drop samples with all NaN's.
if not keep_empty:
df = df.dropna(axis=1, how='all')
# Sort the columns (samples).
c = df.applymap(lambda x: 0 if pd.isnull(x) else 1).sort_values(
df.index.to_list(), axis=1, ascending=False).columns
df = df[c]
df = df.fillna('None')
df = df.rename_axis(None, axis=1)
return df
def plot_genes(
self, mode='variants', count=10, flip=False, ax=None, figsize=None,
**kwargs
):
"""
Create a bar plot showing variant distirbution for top mutated genes.
Parameters
----------
mode : {'variants', 'samples'}, default: 'variants'
Determines how to identify top mutated genes:
* 'variants': Count the number of observed variants.
* 'samples': Count the number of affected samples. Using this
option will create an additional variant classification called
'Multi_Hit'.
count : int, default: 10
Number of top mutated genes to display.
flip : bool, default: False
If True, flip the x and y axes.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`pandas.DataFrame.plot.bar` or
:meth:`pandas.DataFrame.plot.barh`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
By default (``mode='variants'``), the method identifies top mutated
genes by counting the number of observed variants:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_genes()
>>> plt.tight_layout()
We can also identify top mutated genes by counting the number of
affected samples:
.. plot::
:context: close-figs
>>> mf.plot_genes(mode='samples')
>>> plt.tight_layout()
"""
if mode == 'variants':
colors = NONSYN_COLORS
elif mode == 'samples':
colors = NONSYN_COLORS + ['k']
else:
raise ValueError(f'Found incorrect mode: {mode}')
df = self.matrix_genes(count=count, mode=mode)
df = df.iloc[::-1]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if flip:
df = df.iloc[::-1]
kind = 'bar'
xlabel, ylabel = '', 'Count'
else:
kind = 'barh'
xlabel, ylabel = 'Count', ''
df.plot(
kind=kind, ax=ax, stacked=True, legend=False,
color=colors, **kwargs
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_oncoplot(
self, count=10, keep_empty=False, figsize=(15, 10), label_fontsize=15,
ticklabels_fontsize=15, legend_fontsize=15
):
"""
Create an oncoplot.
See this :ref:`tutorial <tutorials:Create customized oncoplots>` to
learn how to create customized oncoplots.
Parameters
----------
count : int, default: 10
Number of top mutated genes to display.
keep_empty : bool, default: False
If True, display samples that do not have any mutations.
figsize : tuple, default: (15, 10)
Width, height in inches. Format: (float, float).
label_fontsize : float, default: 15
Font size of labels.
ticklabels_fontsize : float, default: 15
Font size of tick labels.
legend_fontsize : float, default: 15
Font size of legend texts.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_oncoplot()
"""
g = {'height_ratios': [1, 10, 1], 'width_ratios': [10, 1]}
fig, axes = plt.subplots(3, 2, figsize=figsize, gridspec_kw=g)
[[ax1, ax2], [ax3, ax4], [ax5, ax6]] = axes
# Create the TMB plot.
samples = list(self.matrix_waterfall(count=count,
keep_empty=keep_empty).columns)
self.plot_tmb(ax=ax1, samples=samples, width=0.95)
ax1.set_xlabel('')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_xlim(-0.5, len(samples)-0.5)
ax1.set_ylabel('TMB', fontsize=label_fontsize)
ax1.set_yticks([0, self.matrix_tmb().sum(axis=1).max()])
ax1.tick_params(axis='y', which='major',
labelsize=ticklabels_fontsize)
# Remove the top right plot.
ax2.remove()
# Create the waterfall plot.
self.plot_waterfall(count=count, ax=ax3, linewidths=1, keep_empty=keep_empty)
ax3.set_xlabel('')
ax3.tick_params(axis='y', which='major', labelrotation=0,
labelsize=ticklabels_fontsize)
# Create the genes plot.
self.plot_genes(count=count, ax=ax4, mode='samples', width=0.95)
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_visible(False)
ax4.spines['top'].set_visible(False)
ax4.set_yticks([])
ax4.set_xlabel('Samples', fontsize=label_fontsize)
ax4.set_xticks([0, self.matrix_genes(
count=10, mode='samples').sum(axis=1).max()])
ax4.set_ylim(-0.5, count-0.5)
ax4.tick_params(axis='x', which='major',
labelsize=ticklabels_fontsize)
# Create the legend.
handles = common.legend_handles(NONSYN_NAMES+['Multi_Hit'],
colors=NONSYN_COLORS+['k'])
ax5.legend(
handles=handles,
title='Variant_Classification',
loc='upper center',
ncol=4,
fontsize=legend_fontsize,
title_fontsize=legend_fontsize
)
ax5.axis('off')
# Remove the bottom right plot.
ax6.remove()
plt.tight_layout()
plt.subplots_adjust(wspace=0.01, hspace=0.01)
def plot_oncoplot_matched(
self, af, patient_col, group_col, group_order, colors='Set2',
figsize=(15, 10), label_fontsize=12, ticklabels_fontsize=12,
legend_fontsize=12
):
"""
Create an oncoplot for mached samples.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
patient_col : str
AnnFrame column containing patient information.
group_col : str
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
colors : str
Colormap name for the sample groups.
figsize : tuple, default: (15, 10)
Width, height in inches. Format: (float, float).
label_fontsize : float, default: 12
Font size of labels.
ticklabels_fontsize : float, default: 12
Font size of tick labels.
legend_fontsize : float, default: 12
Font size of legend texts.
"""
fig, axes = plt.subplots(3, 2, figsize=figsize,
gridspec_kw={'height_ratios': [1, 10, 1.5], 'width_ratios': [10, 1]}
)
[[ax1, ax2], [ax3, ax4], [ax5, ax6]] = axes
patients = self.matrix_waterfall_matched(af, patient_col, group_col, group_order).columns
self.plot_tmb_matched(
af, patient_col, group_col, group_order=group_order, ax=ax1,
legend=False, patients=patients, width=0.90,
color=sns.color_palette(colors)[:3]
)
ax1.set_xticks([])
ax1.set_xlim(-0.5, 53-0.5)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_ylabel('TMB', fontsize=label_fontsize)
ax1.tick_params(axis='y', which='major',
labelsize=ticklabels_fontsize)
ax2.remove()
self.plot_waterfall_matched(af, patient_col, group_col, group_order=group_order, ax=ax3)
ax3.set_xticks([])
ax3.tick_params(axis='y', which='major', labelrotation=0,
labelsize=ticklabels_fontsize)
self.plot_mutated_matched(
af, patient_col, group_col, group_order=group_order, ax=ax4, palette=colors
)
ax4.set_yticks([])
ax4.legend().remove()
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_visible(False)
ax4.spines['top'].set_visible(False)
ax4.tick_params(axis='x', which='major',
labelsize=ticklabels_fontsize)
ax4.set_xlabel('Patients', fontsize=label_fontsize)
# Create the legends.
handles1 = common.legend_handles(NONSYN_NAMES+['Multi_Hit'],
colors=NONSYN_COLORS+['k'])
handles2 = common.legend_handles(group_order, colors=colors)
leg1 = ax5.legend(handles=handles1, loc=(0, 0), title='Variant_Classification', ncol=4, fontsize=legend_fontsize, title_fontsize=legend_fontsize)
leg2 = ax5.legend(handles=handles2, loc=(0.8, 0), title=group_col, fontsize=legend_fontsize, title_fontsize=legend_fontsize)
ax5.add_artist(leg1)
ax5.add_artist(leg2)
ax5.axis('off')
# Remove the bottom right plot.
ax6.remove()
plt.tight_layout()
plt.subplots_adjust(wspace=0.01, hspace=0.01)
def plot_clonality(
self, vaf_col, af=None, group_col=None, group_order=None, count=10,
threshold=0.25, subclonal=False, ax=None, figsize=None
):
"""
Create a bar plot summarizing the clonality of variants in top
mutated genes.
Clonality will be calculated based on VAF using
:meth:`MafFrame.compute_clonality`.
Parameters
----------
vaf_col : str
MafFrame column containing VAF data.
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
count : int, defualt: 10
Number of top mutated genes to display.
threshold : float, default: 0.25
VAF threshold percentage.
subclonal : bool, default: False
If True, display subclonality (1 - clonality).
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
MafFrame.compute_clonality
Compute the clonality of variants based on VAF.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_clonality('i_TumorVAF_WU')
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_clonality('i_TumorVAF_WU',
... af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
df = self.df.copy()
df['Clonality'] = self.compute_clonality(vaf_col, threshold=threshold)
if group_col is None:
s = df.groupby('Hugo_Symbol')['Clonality'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index='Hugo_Symbol', columns='Clonality', values='Count')
else:
df = df.merge(af.df[group_col], left_on='Tumor_Sample_Barcode', right_index=True)
s = df.groupby(['Hugo_Symbol', group_col])['Clonality'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index=['Hugo_Symbol', group_col], columns='Clonality', values='Count')
df = df.reset_index()
df = df.fillna(0)
l = ['Clonal', 'Subclonal']
df[l] = df[l].div(df[l].sum(axis=1), axis=0)
genes = self.matrix_genes(count=count).index
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if subclonal:
y = 'Subclonal'
else:
y = 'Clonal'
sns.barplot(
x='Hugo_Symbol', y=y, data=df, order=genes, hue=group_col,
hue_order=group_order, ax=ax
)
ax.set_xlabel('')
return ax
def plot_evolution(
self, samples, vaf_col, anchor=None, normalize=True, count=5,
ax=None, figsize=None, **kwargs
):
"""
Create a line plot visualizing changes in VAF between specified
samples.
Parameters
----------
samples : list
List of samples to display.
vaf_col : str
MafFrame column containing VAF data.
anchor : str, optional
Sample to use as the anchor. If absent, use the first sample in
the list.
normalize : bool, default: True
If False, do not normalize VAF by the maximum value.
count : int, default: 5
Number of top variants to display.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.lineplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
"""
df = self.df[self.df.Tumor_Sample_Barcode.isin(samples)]
if df.empty:
message = f'No variants to display for the samples: {samples}.'
raise ValueError(message)
df = df[df.Variant_Classification.isin(NONSYN_NAMES)]
def one_row(r):
if r.Protein_Change == '.':
variant_name = f'{r.Hugo_Symbol} ({r.Variant_Classification})'
else:
variant_name = f'{r.Hugo_Symbol} ({r.Protein_Change})'
return variant_name
df['Variant_Name'] = df.apply(one_row, axis=1)
df = df.pivot(index=['Variant_Name'],
columns=['Tumor_Sample_Barcode'], values=[vaf_col])
df.columns = df.columns.get_level_values(1)
df.columns.name = ''
df = df.fillna(0)
for sample in samples:
if sample not in df.columns:
df[sample] = 0
df = df[samples]
if anchor is None:
anchor = samples[0]
df = df.sort_values(by=anchor, ascending=False)
if normalize:
df = df / df.max()
df = df.fillna(0)
df = df.iloc[:count, :].T
df = df.loc[samples]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.lineplot(data=df, ax=ax, **kwargs)
ax.set_ylabel('Fraction')
return ax
def plot_genepair(
self, x, y, vaf_col, af=None, group_col=None, group_order=None,
ax=None, figsize=None, **kwargs
):
"""
Create a scatter plot of VAF between Gene X and Gene Y.
Parameters
----------
x, y : str
Gene names.
vaf_col : str
MafFrame column containing VAF data.
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.scatterplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_genepair('DNMT3A', 'FLT3', 'i_TumorVAF_WU')
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_genepair('DNMT3A', 'FLT3', 'i_TumorVAF_WU',
... af=af,
... group_col='FAB_classification')
>>> plt.tight_layout()
"""
df = self.df[self.df.Hugo_Symbol.isin([x, y])]
df = df[['Tumor_Sample_Barcode', 'Hugo_Symbol', vaf_col]]
df = df.sort_values(vaf_col, ascending=False)
df = df.drop_duplicates(subset=['Tumor_Sample_Barcode', 'Hugo_Symbol'])
df = df.pivot(index='Tumor_Sample_Barcode',
columns='Hugo_Symbol', values=vaf_col)
df = df.fillna(0)
if group_col is not None:
df = df.merge(af.df[group_col], left_index=True, right_index=True)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.scatterplot(
x=x, y=y, data=df, ax=ax, hue=group_col, hue_order=group_order,
**kwargs
)
# Print summary statistics including R-squared and p-value.
results = smf.ols(f'{y} ~ {x}', data=df).fit()
print(f'Results for {y} ~ {x}:')
print(f'R^2 = {results.rsquared:.2f}')
print(f' P = {results.f_pvalue:.2e}')
return ax
def plot_regplot(
self, af, group_col, a, b, a_size=None, b_size=None, genes=None,
count=10, to_csv=None, ax=None, figsize=None, **kwargs
):
"""
Create a scatter plot with a linear regression model fit visualizing
correlation between gene mutation frequencies in two sample groups
A and B.
Each point in the plot represents a gene.
The method will automatically calculate and print summary statistics
including R-squared and p-value.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
group_col : str
AnnFrame column containing sample group information.
a, b : str
Sample group names.
a_size, b_size : int, optional
Sample group sizes to use as denominator. By default, these are
inferred from the MafFrame and AnnFrame objects.
genes : list, optional
Genes to display. When absent, top mutated genes (``count``) will
be used.
count : int, defualt: 10
Number of top mutated genes to display. Ignored if ``genes`` is
specified.
to_csv : str, optional
Write the plot's data to a CSV file.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.regplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_regplot(af, 'FAB_classification', 'M1', 'M2')
Results for M2 ~ M1:
R^2 = 0.43
P = 3.96e-02
>>> plt.tight_layout()
"""
df1 = self.matrix_prevalence()
df2 = af.df[af.df.index.isin(df1.columns)]
i_a = df2[df2[group_col] == a].index
i_b = df2[df2[group_col] == b].index
# Determine which genes to display.
if genes is None:
genes = self.matrix_genes(count=count).index.to_list()
# Determine each group's sample size.
if a_size is None:
a_size = len(i_a)
if b_size is None:
b_size = len(i_b)
f = lambda x: 0 if x == 0 else 1
s_a = df1.T.loc[i_a].applymap(f).sum().loc[genes] / a_size
s_b = df1.T.loc[i_b].applymap(f).sum().loc[genes] / b_size
df3 = pd.concat([s_a, s_b], axis=1)
df3.columns = [a, b]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
# Draw the main plot.
sns.regplot(x=a, y=b, data=df3, ax=ax, **kwargs)
# Write the DataFrame to a CSV file.
if to_csv is not None:
df3.to_csv(to_csv)
# Print summary statistics including R-squared and p-value.
results = smf.ols(f'{b} ~ {a}', data=df3).fit()
print(f'Results for {b} ~ {a}:')
print(f'R^2 = {results.rsquared:.2f}')
print(f' P = {results.f_pvalue:.2e}')
return ax
def plot_interactions(
self, count=10, cmap=None, ax=None, figsize=None, **kwargs
):
"""
Create a heatmap representing mutually exclusive or co-occurring set
of genes.
This method performs pair-wise Fisher’s Exact test to detect such
significant pair of genes.
Parameters
----------
count : int, defualt: 10
Number of top mutated genes to display.
cmap : str, optional
Color map.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.heatmap`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_interactions(count=25, cmap='BrBG')
>>> plt.tight_layout()
"""
df = self.matrix_prevalence()
genes = self.matrix_genes(count=count, mode='samples').index.to_list()
df = df.loc[genes]
df = df.applymap(lambda x: True if x else False)
df = df.T
pairs = list(itertools.combinations(genes, 2))
data = []
def one_pair(a, b):
s_a = df[a].to_list()
s_b = df[b].to_list()
ab = 0
AB = 0
aB = 0
Ab = 0
for i in range(len(s_a)):
if s_a[i] and s_b[i]:
AB += 1
elif s_a[i] and not s_b[i]:
Ab += 1
elif not s_a[i] and s_b[i]:
aB += 1
else:
ab += 1
return (ab, AB, aB, Ab)
for pair in pairs:
a = pair[0]
b = pair[1]
ab, AB, aB, Ab = one_pair(a, b)
event = 'Co_Occurence' if AB else 'Mutually_Exclusive'
data.append([a, b, ab, AB, aB, Ab, event])
df = pd.DataFrame(data,
columns=['A', 'B', 'ab', 'AB', 'aB', 'Ab', 'Event'])
def one_row(r):
oddsr, p = fisher_exact([[r.AB, r.aB], [r.Ab, r.ab]],
alternative='two-sided')
return pd.Series([oddsr, p], index=['Odds_Ratio', 'P_Value'])
df = pd.concat([df.apply(one_row, axis=1), df], axis=1)
df = df.sort_values('P_Value')
def one_row(r):
r['Log_P_Value'] = -np.log10(r.P_Value)
if r.P_Value < 0.05:
r['Label'] = '*'
elif r.P_Value < 0.1:
r['Label'] = '.'
else:
r['Label'] = ''
if r.Event == 'Mutually_Exclusive':
r.Log_P_Value *= -1
return r
df = df.apply(one_row, axis=1)
annot = df.pivot(index='A', columns='B', values='Label')
annot = annot.fillna('')
df = df.pivot(index='A', columns='B', values='Log_P_Value')
df = df.fillna(0)
for gene in genes:
if gene not in df.columns:
df[gene] = 0
if gene not in annot.columns:
annot[gene] = ''
df = df.T
annot = annot.T
for gene in genes:
if gene not in df.columns:
df[gene] = 0
if gene not in annot.columns:
annot[gene] = ''
annot = annot[genes]
annot = annot.loc[genes]
df = df[genes]
df = df.loc[genes]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
# Create a mask for the heatmap.
corr = np.corrcoef(np.random.randn(count, 200))
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(
df, annot=annot, fmt='', cmap=cmap, mask=mask, vmax=3, vmin=-3,
center=0, ax=ax, **kwargs
)
ax.set_xlabel('')
ax.set_ylabel('')
return ax
def plot_lollipop(
self, gene, alpha=0.7, ax=None, figsize=None, legend=True
):
"""
Create a lollipop or stem plot showing amino acid changes of a gene.
Parameters
----------
gene : str
Name of the gene.
alpha : float, default: 0.7
Set the color transparency. Must be within the 0-1 range,
inclusive.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_lollipop('DNMT3A')
>>> plt.tight_layout()
"""
# Only select variants from the gene.
df1 = self.df[self.df.Hugo_Symbol == gene]
# Raise an error if there are no SNVs to plot.
if df1.empty:
raise ValueError(f"No variants to plot for the gene: '{gene}'.")
# Count each amino acid change.
df2 = df1.Protein_Change.value_counts().to_frame().reset_index()
df2.columns = ['Protein_Change', 'Count']
# Identify variant classification for each amino acid change.
df3 = df1[['Protein_Change', 'Variant_Classification']
].drop_duplicates(subset=['Protein_Change'])
df4 = pd.merge(df2, df3, on='Protein_Change')
# Extract amino acid positions. Sort the counts by position.
def one_row(r):
digits = [x for x in r.Protein_Change if x.isdigit()]
if not digits:
return np.nan
return int(''.join(digits))
df4['Protein_Position'] = df4.apply(one_row, axis=1)
df4 = df4.dropna(subset=['Protein_Position'])
df4 = df4.sort_values(['Protein_Position'])
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
for i, nonsyn_name in enumerate(NONSYN_NAMES):
temp = df4[df4.Variant_Classification == nonsyn_name]
color = NONSYN_COLORS[i]
ax.vlines(temp.Protein_Position, ymin=0, ymax=temp.Count,
alpha=alpha, color=color)
ax.plot(temp.Protein_Position, temp.Count, 'o', alpha=alpha,
color=color, label=nonsyn_name)
ax.set_xlabel('Position')
ax.set_ylabel('Count')
if legend:
ax.legend()
return ax
def plot_mutated(
self, af=None, group_col=None, group_order=None, genes=None,
count=10, ax=None, figsize=None
):
"""
Create a bar plot visualizing the mutation prevalence of top
mutated genes.
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
genes : list, optional
Genes to display. When absent, top mutated genes (``count``) will
be used.
count : int, defualt: 10
Number of top mutated genes to display. Ignored if ``genes`` is
specified.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_mutated()
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_mutated(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
df = self.matrix_prevalence()
# Determine which genes to display.
if genes is None:
genes = self.matrix_genes(count=count).index.to_list()
df = df.loc[genes]
df = df.applymap(lambda x: True if x else False)
if group_col is None:
df = (df.sum(axis=1) / df.shape[1]).to_frame().reset_index()
df.columns.values[1] = 'Prevalence'
else:
df = df.T
df = pd.merge(df, af.df[group_col], left_index=True, right_index=True)
df = df.groupby([group_col]).mean().reset_index()
df = df.melt(id_vars=[group_col])
df.columns = [group_col, 'Hugo_Symbol', 'Prevalence']
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x='Hugo_Symbol', y='Prevalence', data=df, hue=group_col,
hue_order=group_order, ax=ax
)
ax.set_xlabel('')
return ax
def plot_mutated_matched(
self, af, patient_col, group_col, group_order, ax=None, figsize=None,
**kwargs
):
"""
Create a bar plot visualizing the mutation prevalence of top
mutated genes.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
patient_col : str
AnnFrame column containing patient information.
group_col : str
AnnFrame column containing sample group information.
group_order : list
List of sample group names.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
"""
df = self.matrix_waterfall_matched(af, patient_col, group_col, group_order)
df = df.applymap(lambda x: 0 if x == 'None' else 1)
s = df.sum(axis=1) / len(df.columns) * 100
s.name = 'Count'
df = s.to_frame().reset_index()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x='Count', y='Gene', hue='Group', data=df, hue_order=group_order,
orient='h', ax=ax, **kwargs
)
ax.set_xlabel('Patients (%)')
ax.set_ylabel('')
return ax
def plot_rainfall(
self, sample, palette=None, legend='auto', ax=None, figsize=None,
**kwargs
):
"""
Create a rainfall plot visualizing inter-variant distance on a linear
genomic scale for single sample.
Parameters
----------
sample : str
Name of the sample.
palette : str, optional
Name of the seaborn palette. See the :ref:`tutorials:Control plot
colors` tutorial for details.
legend : {'auto', 'brief', 'full', False}, default: 'auto'
Display setting of the legend according to
:meth:`seaborn.scatterplot`.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.scatterplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('brca')
>>> maf_file = '~/fuc-data/brca/brca.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_rainfall('TCGA-A8-A08B',
... figsize=(14, 7),
... palette=sns.color_palette('Set2')[:6])
>>> plt.tight_layout()
"""
# Select variants from the sample.
df = self.df[self.df.Tumor_Sample_Barcode == sample]
# Remove indels.
df = df[df.Variant_Type == 'SNP']
# Raise an error if there are no SNVs to plot.
if df.empty:
message = (
'There are no SNVs to be drawn '
f"for the sample: '{sample}'."
)
raise ValueError(message)
# Get SNV class for each variant.
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
df['SNV_Class'] = df.apply(one_row, axis=1)
# Convert string chromosomes to integers for ordering.
def one_row(r):
r.Chromosome = int(r.Chromosome.replace(
'chr', '').replace('X', '23').replace('Y', '24'))
return r
df = df.apply(one_row, axis=1)
df = df[['Chromosome', 'Start_Position', 'SNV_Class']]
df = df.sort_values(['Chromosome', 'Start_Position'])
# Update positions as if all chromosomes are one long molecule.
def one_row(r):
if r.Chromosome == 1:
return r
r.Start_Position += sum(CHROM_LENGTHS['hg19'][:r.Chromosome-1])
return r
df = df.apply(one_row, axis=1)
s = np.diff(df.Start_Position)
s = np.insert(s, 0, 0)
s = | np.log10(s + 1) | numpy.log10 |
"""
General tools for dealing with ATAC-Seq data using Python.
@author: <NAME>, Greenleaf Lab, Stanford University
"""
import numpy as np
import matplotlib.pyplot as plt
from pyatac.bedgraph import BedGraphFile
from pyatac.chunk import Chunk
from pyatac.utils import smooth
from pyatac.fragments import getInsertions, getStrandedInsertions
from pyatac.seq import get_sequence, seq_to_mat, complement
class Track(Chunk):
"""Generic class for various types of signal tracks"""
def __init__(self, chrom, start, end, name = "track", vals=None , log = False):
Chunk.__init__(self, chrom, start, end, name = name)
self.log = log
if vals is None:
self.vals = None
elif len(vals) == self.length():
self.vals = vals
else:
raise Exception("Input vals must be of length as set by start and end!")
def assign_track(self, vals, start = None, end = None):
"""Assign values to track"""
if start:
self.start = start
if end:
self.end = end
if len(vals)!= self.end - self.start:
raise Exception("The values being assigned to track do not \
span the start to end of the track")
self.vals = vals
def write_track(self, handle, start = None, end = None, vals = None, write_zero = True):
"""Write track to output file handle
If vals are specified use those values
Othersise use self.vals
"""
if start is None:
start = self.start
if end is None:
end = self.end
if vals is None:
vals=self.vals
if len(vals)!=self.end-self.start:
print(len(vals),self.end-self.start)
raise Exception("Error! Inconsistency between length of \
values and start/end values")
prev_value = None
start_range = 0
output = ""
for i in range(len(vals)):
if vals[i] == prev_value:
pass
elif | np.isnan(vals[i]) | numpy.isnan |
import numpy as np
import cv2
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import os
import scipy
import imageio
from scipy.ndimage import gaussian_filter1d, gaussian_filter
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from matplotlib.colors import ListedColormap
import statsmodels.api as sm
import pandas as pd
from statsmodels.stats.anova import AnovaRM
from sklearn import linear_model
from helper_code.registration_funcs import model_arena, get_arena_details
from helper_code.processing_funcs import speed_colors
from helper_code.analysis_funcs import *
from important_code.shuffle_test import permutation_test, permutation_correlation
plt.rcParams.update({'font.size': 30})
def plot_traversals(self):
''' plot all traversals across the arena '''
# initialize parameters
sides = ['back', 'front']
# sides = ['back']
types = ['spontaneous'] #, 'evoked']
fast_color = np.array([.5, 1, .5])
slow_color = np.array([1, .9, .9])
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
edge_vector_color = | np.array([.98, .9, .6]) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#################
## Import modules
#################
import sys
# walk directories
import glob
# access to OS functionality
import os
# copy things
import copy
# numpy
import numpy as np
# open3d
import open3d
# matplotlib for colormaps
import matplotlib.cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# struct for reading binary ply files
import struct
# the main class that loads raw 3D scans
class Kitti360Viewer3DRaw(object):
# Constructor
def __init__(self, seq=0, mode='velodyne'):
if 'KITTI360_DATASET' in os.environ:
kitti360Path = os.environ['KITTI360_DATASET']
else:
kitti360Path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..', '..')
if mode=='velodyne':
self.sensor_dir='velodyne_points'
elif mode=='sick':
self.sensor_dir='sick_points'
else:
raise RuntimeError('Unknown sensor type!')
sequence = '2013_05_28_drive_%04d_sync' % seq
self.raw3DPcdPath = os.path.join(kitti360Path, 'data_3d_raw', sequence, self.sensor_dir, 'data')
def loadVelodyneData(self, frame=0):
pcdFile = os.path.join(self.raw3DPcdPath, '%010d.bin' % frame)
if not os.path.isfile(pcdFile):
raise RuntimeError('%s does not exist!' % pcdFile)
pcd = np.fromfile(pcdFile, dtype=np.float32)
pcd = np.reshape(pcd,[-1,4])
return pcd
def loadSickData(self, frame=0):
pcdFile = os.path.join(self.raw3DPcdPath, '%010d.bin' % frame)
if not os.path.isfile(pcdFile):
raise RuntimeError('%s does not exist!' % pcdFile)
pcd = np.fromfile(pcdFile, dtype=np.float32)
pcd = np.reshape(pcd,[-1,2])
pcd = np.concatenate([np.zeros_like(pcd[:,0:1]), -pcd[:,0:1], pcd[:,1:2]], axis=1)
return pcd
def projectVeloToImage(cam_id=0, seq=0):
from kitti360scripts.devkits.commons.loadCalibration import loadCalibrationCameraToPose, loadCalibrationRigid
from kitti360scripts.helpers.project import CameraPerspective, CameraFisheye
from PIL import Image
import matplotlib.pyplot as plt
if 'KITTI360_DATASET' in os.environ:
kitti360Path = os.environ['KITTI360_DATASET']
else:
kitti360Path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..', '..')
sequence = '2013_05_28_drive_%04d_sync'%seq
# perspective camera
if cam_id in [0,1]:
camera = CameraPerspective(kitti360Path, sequence, cam_id)
# fisheye camera
elif cam_id in [2,3]:
camera = CameraFisheye(kitti360Path, sequence, cam_id)
else:
raise RuntimeError('Unknown camera ID!')
# object for parsing 3d raw data
velo = Kitti360Viewer3DRaw(mode='velodyne', seq=seq)
# cam_0 to velo
fileCameraToVelo = os.path.join(kitti360Path, 'calibration', 'calib_cam_to_velo.txt')
TrCam0ToVelo = loadCalibrationRigid(fileCameraToVelo)
# all cameras to system center
fileCameraToPose = os.path.join(kitti360Path, 'calibration', 'calib_cam_to_pose.txt')
TrCamToPose = loadCalibrationCameraToPose(fileCameraToPose)
# velodyne to all cameras
TrVeloToCam = {}
for k, v in TrCamToPose.items():
# Tr(cam_k -> velo) = Tr(cam_k -> cam_0) @ Tr(cam_0 -> velo)
TrCamkToCam0 = | np.linalg.inv(TrCamToPose['image_00']) | numpy.linalg.inv |
#! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
"""
import sys
import os
from os.path import join
import glob
import numpy as n
import astropy.io.fits as fits
import SpectraStackingEBOSS as sse
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import lineListVac as ll
em_line_list = [
[1240.14, 'N V' , 'darkgreen'],
[1305.53, 'O I' , 'darkgreen'],
[1335.31, 'C II', 'darkgreen' ],
[1397.61, 'Si IV', 'darkgreen' ],
[1399.8, 'Si IV + O IV', 'darkgreen' ],
[ll.C4_1548, r'C IV', 'darkgreen'],
[1640.42, 'He II', 'darkgreen'],
[1750.26, 'N III]', 'darkgreen'],
[ll.C3_1908 , r'C III', 'darkgreen' ],
[2327.00, 'CII]', 'darkgreen'],
[2396.36, 'FeII*', 'darkgreen'],
[2626.45, 'FeII*', 'darkgreen'],
[3346.82, '[Ne V]', 'darkgreen'],
[3426.84, '[Ne V]', 'darkgreen'],
[ll.O2_mean , r'[O II]', 'darkgreen'],
[3759.99, '[Fe VII]', 'darkgreen'],
[ll.Ne3_3869 , r'[Ne III]', 'darkgreen'],
# [ll.Ne3_3968 , r'[Ne III]', 'darkgreen'],
[ll.O3_4363 , r'[O III]' , 'darkgreen'],
[ll.O3_4960 , r'[O III]' , 'darkgreen'],
[ll.O3_5007 , r'[O III]' , 'darkgreen'],
[5160.33, '[Fe VII]', 'darkgreen'],
[ll.O1_5578 , r'O I', 'darkgreen' ],
[5722.30, '[Fe VII]', 'darkgreen'],
[5877.29, 'He I', 'darkgreen'],
[6087.98, '[Fe VII]', 'darkgreen'],
[ll.O1_6302 , r'O I' , 'darkgreen'],
[ll.O1_6365 , r'O I' , 'darkgreen'],
[ll.N2_5756 , r'[N II]' , 'darkgreen'],
[ll.N2_6549 , r'[N II]' , 'darkgreen'],
[ll.N2_6585 , r'[N II]' , 'darkgreen'],
[ll.S2_6718 , r'[S II]', 'darkgreen'],
[ll.S2_6732 , r'[S II]', 'darkgreen'],
[ll.Ar3_7137 , r'[Ar III]' , 'darkgreen'],
]
abs_line_list = [
[911.753, r'Ly$_{limit}$', 'black'],
[1025.7220, r'Ly$_\beta$', 'black'],
[ll.H1_1216, r'Ly$_\alpha$', 'black'],
[1857.40, 'Al III', 'darkgreen'],
#
[2344.21, 'FeII', 'darkgreen'],
[2382.76, 'Fe II', 'darkgreen'],
[2600.17, 'FeII', 'darkgreen'],
[2798.75, 'MgII', 'darkgreen'],
#
[3835.397, r'H$\eta$', 'black'],
[3889.064, r'H$\zeta$', 'black'],
[3934.777, 'Ca(K)', 'magenta'],
[3969.588, 'Ca(H)', 'magenta'],
[ll.H1_3970 , r'H$_\epsilon$', 'black'],
#
[ll.H1_4102 , r'H$_\delta$', 'black'],
[4305.61, 'G', 'magenta'],
[ll.H1_4341 , r'H$_\gamma$', 'black'],
[ll.He2_4686 , r'He II', 'darkgreen'],
[ll.H1_4862 , r'H$_\beta$', 'black'],
#
[5176.7, 'MgI b', 'magenta'],
[ll.He2_5411, r'He II', 'darkgreen'],
[5895.6, r'NaI D$_{1,2}$', 'magenta'],
[ll.H1_6564 , r'H$_\alpha$', 'black'],
#
[8500.36, 'Ca II', 'magenta'],
[8544.44, 'Ca II', 'magenta'],
[8664.52, 'Ca II', 'magenta'],
]
# line_list_abs = n.array([ 2249.88, 2260.78, 2344.21, 2374.46, 2382.76, 2576.88, 2586.65, 2594.50, 2600.17, 2606.46, 2796.35, 2803.53, 2852.96])
# line_list_abs_names = n.array(['FeII' , 'FeII', 'FeII', 'FeII', 'FeII', 'MnII', 'FeII', 'MnII', 'FeII', 'MnII', 'MgII', 'MgII', 'MgI'])
# line_list_em = n.array([2327, 2365.55, 2396.36, 2612.65,2626.45])
# line_list_em_names = n.array(['CII]', 'FeII*', 'FeII*', 'FeII*', 'FeII*'])
#stack_dir = join( os.environ['HOME'], "SDSS/stacks/v2" )
stack_dir = join( os.environ['HOME'], "SDSS/stacks" )
file_out = join(stack_dir,"X_AGN", "DR16_ELG-stitched-stack.fits")
def plot_spec( p_2_stack = file_out ):
print('plots', p_2_stack)
# fig=p.figure(7, (14.0, 14.0), frameon=False)
# fig.add_subplot(411, ylabel=r'F$_\lambda$')
fig=p.figure(5, (14.0, 8.0))#, frameon=False)
fig.add_subplot(111, ylabel=r'F$_\lambda$', xlabel='Wavelength rest-frame [Angstrom]')
stack = fits.open(p_2_stack)[1].data
s1 = (stack['wavelength']>0)
stack = stack[s1]
y_min = n.min(stack['medianStack'])
y_max = n.max(stack['medianStack'])
delta_y = y_max - y_min
p.xlim((n.min(stack['wavelength']), 9500 )) # n.max(stack['wavelength'])))
p.ylim((y_min - delta_y * 0.2 , y_max + delta_y * 0.2 ))
#p.xscale('log')
# lines above
for elem in em_line_list:
print(elem)
if elem[0]> | n.min(stack['wavelength'][5]) | numpy.min |
#The DF of a tidal stream
import copy
import multiprocessing
import warnings
from pkg_resources import parse_version
import numpy
import scipy
from scipy import special, interpolate, integrate, optimize
_SCIPY_VERSION= parse_version(scipy.__version__)
if _SCIPY_VERSION < parse_version('0.10'): #pragma: no cover
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION < parse_version('0.19'): #pragma: no cover
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from ..orbit import Orbit
from .df import df
from ..util import coords, fast_cholesky_invert, \
conversion, multi, plot, stable_cho_factor, ars
from ..util.conversion import physical_conversion, _APY_UNITS, _APY_LOADED
from ..actionAngle.actionAngleIsochroneApprox import dePeriod
from ..potential import flatten as flatten_potential
from ..util import galpyWarning
if _APY_LOADED:
from astropy import units
_INTERPDURINGSETUP= True
_USEINTERP= True
_USESIMPLE= True
# cast a wide net
_TWOPIWRAPS= numpy.arange(-4,5)*2.*numpy.pi
_labelDict= {'x': r'$X$',
'y': r'$Y$',
'z': r'$Z$',
'r': r'$R$',
'phi': r'$\phi$',
'vx':r'$V_X$',
'vy':r'$V_Y$',
'vz':r'$V_Z$',
'vr':r'$V_R$',
'vt':r'$V_T$',
'll':r'$\mathrm{Galactic\ longitude\, (deg)}$',
'bb':r'$\mathrm{Galactic\ latitude\, (deg)}$',
'dist':r'$\mathrm{distance\, (kpc)}$',
'pmll':r'$\mu_l\,(\mathrm{mas\,yr}^{-1})$',
'pmbb':r'$\mu_b\,(\mathrm{mas\,yr}^{-1})$',
'vlos':r'$V_{\mathrm{los}}\,(\mathrm{km\,s}^{-1})$'}
class streamdf(df):
"""The DF of a tidal stream"""
def __init__(self,sigv,progenitor=None,pot=None,aA=None,useTM=False,
tdisrupt=None,sigMeanOffset=6.,leading=True,
sigangle=None,
deltaAngleTrack=None,nTrackChunks=None,nTrackIterations=None,
progIsTrack=False,
ro=None,vo=None,
Vnorm=None,Rnorm=None,
R0=8.,Zsun=0.0208,vsun=[-11.1,8.*30.24,7.25],
multi=None,interpTrack=_INTERPDURINGSETUP,
useInterp=_USEINTERP,nosetup=False,nospreadsetup=False,
approxConstTrackFreq=False,useTMHessian=False,
custom_transform=None):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
sigv - radial velocity dispersion of the progenitor (can be Quantity)
tdisrupt= (5 Gyr) time since start of disruption (can be Quantity)
leading= (True) if True, model the leading part of the stream
if False, model the trailing part
progenitor= progenitor orbit as Orbit instance (will be re-integrated, so don't bother integrating the orbit before)
progIsTrack= (False) if True, then the progenitor (x,v) is actually the (x,v) of the stream track at zero angle separation; useful when initializing with an orbit fit; the progenitor's position will be calculated
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
useTM= (False) if set to an actionAngleTorus instance, use this to speed up calculations
sigMeanOffset= (6.) offset between the mean of the frequencies
and the progenitor, in units of the largest
eigenvalue of the frequency covariance matrix
(along the largest eigenvector), should be positive;
to model the trailing part, set leading=False
sigangle= (sigv/122/[1km/s]=1.8sigv in natural coordinates)
estimate of the angle spread of the debris initially (can be Quantity)
deltaAngleTrack= (None) angle to estimate the stream track over (rad; or can be Quantity)
nTrackChunks= (floor(deltaAngleTrack/0.15)+1) number of chunks to divide the progenitor track in
nTrackIterations= Number of iterations to perform when establishing the track; each iteration starts from a previous approximation to the track in (x,v) and calculates a new track based on the deviation between the previous track and the desired track in action-angle coordinates; if not set, an appropriate value is determined based on the magnitude of the misalignment between stream and orbit, with larger numbers of iterations for larger misalignments
interpTrack= (might change), interpolate the stream track while
setting up the instance (can be done by hand by
calling self._interpolate_stream_track() and
self._interpolate_stream_track_aA())
useInterp= (might change), use interpolation by default when
calculating approximated frequencies and angles
nosetup= (False) if True, don't setup the stream track and anything
else that is expensive
nospreadsetup= (False) if True, don't setup the spread around the stream track (only for nosetup is False)
multi= (None) if set, use multi-processing
Coordinate transformation inputs:
vo= (220) circular velocity to normalize velocities with [used to be Vnorm; can be Quantity]
ro= (8) Galactocentric radius to normalize positions with [used to be Rnorm; can be Quantity]
R0= (8) Galactocentric radius of the Sun (kpc) [can be different from ro; can be Quantity]
Zsun= (0.0208) Sun's height above the plane (kpc; can be Quantity)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center) (can be Quantity)
custom_transform= (None) matrix implementing the rotation from (ra,dec) to a custom set of sky coordinates
approxConstTrackFreq= (False) if True, approximate the stream assuming that the frequency is constant along the stream (only works with useTM, for which this leads to a significant speed-up)
useTMHessian= (False) if True, compute the basic Hessian dO/dJ_prog using TM; otherwise use aA
OUTPUT:
object
HISTORY:
2013-09-16 - Started - Bovy (IAS)
2013-11-25 - Started over - Bovy (IAS)
"""
if ro is None and not Rnorm is None:
warnings.warn("WARNING: Rnorm keyword input to streamdf is deprecated in favor of the standard ro keyword", galpyWarning)
ro= Rnorm
if vo is None and not Vnorm is None:
warnings.warn("WARNING: Vnorm keyword input to streamdf is deprecated in favor of the standard vo keyword", galpyWarning)
vo= Vnorm
df.__init__(self,ro=ro,vo=vo)
sigv= conversion.parse_velocity(sigv,vo=self._vo)
self._sigv= sigv
if tdisrupt is None:
self._tdisrupt= 5./conversion.time_in_Gyr(self._vo,self._ro)
else:
self._tdisrupt= conversion.parse_time(tdisrupt,ro=self._ro,vo=self._vo)
self._sigMeanOffset= sigMeanOffset
if pot is None: #pragma: no cover
raise IOError("pot= must be set")
self._pot= flatten_potential(pot)
self._aA= aA
if not self._aA._pot == self._pot:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
self._check_consistent_units()
if useTM:
self._useTM= True
self._aAT= useTM # confusing, no?
self._approxConstTrackFreq= approxConstTrackFreq
if not self._aAT._pot == self._pot:
raise IOError("Potential in useTM=actionAngleTorus instance does not appear to be the same as given potential pot")
else:
self._useTM= False
if (multi is True): #if set to boolean, enable cpu_count processes
self._multi= multiprocessing.cpu_count()
else:
self._multi= multi
self._progenitor_setup(progenitor,leading,useTMHessian)
sigangle= conversion.parse_angle(sigangle)
deltaAngleTrack= conversion.parse_angle(deltaAngleTrack)
self._offset_setup(sigangle,leading,deltaAngleTrack)
# if progIsTrack, calculate the progenitor that gives a track that is approximately the given orbit
if progIsTrack:
self._setup_progIsTrack()
R0= conversion.parse_length_kpc(R0)
Zsun= conversion.parse_length_kpc(Zsun)
vsun= conversion.parse_velocity_kms(vsun)
vsun[0]= conversion.parse_velocity_kms(vsun[0])
vsun[1]= conversion.parse_velocity_kms(vsun[1])
vsun[2]= conversion.parse_velocity_kms(vsun[2])
self._setup_coord_transform(R0,Zsun,vsun,progenitor,custom_transform)
#Determine the stream track
if not nosetup:
self._determine_nTrackIterations(nTrackIterations)
self._determine_stream_track(nTrackChunks)
self._useInterp= useInterp
if interpTrack or self._useInterp:
self._interpolate_stream_track()
self._interpolate_stream_track_aA()
self.calc_stream_lb()
if not nospreadsetup: self._determine_stream_spread()
return None
def _progenitor_setup(self,progenitor,leading,useTMHessian):
"""The part of the setup relating to the progenitor's orbit"""
#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor
self._progenitor= progenitor() #call to get new Orbit
# Make sure we do not use physical coordinates
self._progenitor.turn_physical_off()
acfs= self._aA.actionsFreqsAngles(self._progenitor,
_firstFlip=(not leading),
use_physical=False)
self._progenitor_jr= acfs[0][0]
self._progenitor_lz= acfs[1][0]
self._progenitor_jz= acfs[2][0]
self._progenitor_Omegar= acfs[3]
self._progenitor_Omegaphi= acfs[4]
self._progenitor_Omegaz= acfs[5]
self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3)
self._progenitor_angler= acfs[6]
self._progenitor_anglephi= acfs[7]
self._progenitor_anglez= acfs[8]
self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3)
#Calculate dO/dJ Jacobian at the progenitor
if useTMHessian:
h, fr,fp,fz,e= self._aAT.hessianFreqs(self._progenitor_jr,
self._progenitor_lz,
self._progenitor_jz)
self._dOdJp= h
# Replace frequencies with TM frequencies
self._progenitor_Omegar= fr
self._progenitor_Omegaphi= fp
self._progenitor_Omegaz= fz
self._progenitor_Omega= numpy.array([self._progenitor_Omegar,
self._progenitor_Omegaphi,
self._progenitor_Omegaz]).reshape(3)
else:
self._dOdJp= calcaAJac(self._progenitor.vxvv[0],
self._aA,dxv=None,dOdJ=True,
_initacfs=acfs)
self._dOdJpInv= numpy.linalg.inv(self._dOdJp)
self._dOdJpEig= numpy.linalg.eig(self._dOdJp)
return None
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
#Estimate the frequency covariance matrix from a diagonal J matrix x dOdJ
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
#Estimate angle spread as the ratio of the largest to the middle eigenvalue
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
#Estimate the frequency mean as lying along the direction of the largest eigenvalue
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
#Make sure we are modeling the correct part of the stream
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
#numpy.dot(self._dOdJp,
# numpy.array([self._sigjr,self._siglz,self._sigjz]))
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
#Store cholesky of sigomatrix for fast evaluation
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None
def _setup_coord_transform(self,R0,Zsun,vsun,progenitor,custom_transform):
#Set the coordinate-transformation parameters; check that these do not conflict with those in the progenitor orbit object; need to use the original, since this objects _progenitor has physical turned off
if progenitor._roSet \
and (numpy.fabs(self._ro-progenitor._ro) > 10.**-.8 \
or numpy.fabs(R0-progenitor._ro) > 10.**-8.):
warnings.warn("Warning: progenitor's ro does not agree with streamdf's ro and R0; this may have unexpected consequences when projecting into observables", galpyWarning)
if progenitor._voSet \
and numpy.fabs(self._vo-progenitor._vo) > 10.**-8.:
warnings.warn("Warning: progenitor's vo does not agree with streamdf's vo; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.fabs(Zsun-progenitor._zo) > 10.**-8.:
warnings.warn("Warning: progenitor's zo does not agree with streamdf's Zsun; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.any(numpy.fabs(vsun-numpy.array([0.,self._vo,0.])\
-progenitor._solarmotion) > 10.**-8.):
warnings.warn("Warning: progenitor's solarmotion does not agree with streamdf's vsun (after accounting for vo); this may have unexpected consequences when projecting into observables", galpyWarning)
self._R0= R0
self._Zsun= Zsun
self._vsun= vsun
self._custom_transform= custom_transform
return None
def _setup_progIsTrack(self):
"""If progIsTrack, the progenitor orbit that was passed to the
streamdf initialization is the track at zero angle separation;
this routine computes an actual progenitor position that gives
the desired track given the parameters of the streamdf"""
# We need to flip the sign of the offset, to go to the progenitor
self._sigMeanSign*= -1.
# Use _determine_stream_track_single to calculate the track-progenitor
# offset at zero angle separation
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
# Setup the new progenitor orbit
progenitor= Orbit(prog_stream_offset[3])
# Flip the offset sign again
self._sigMeanSign*= -1.
# Now re-do the previous setup
self._progenitor_setup(progenitor,self._leading,False)
self._offset_setup(self._sigangle,self._leading,
self._deltaAngleTrack)
return None
@physical_conversion('angle',pop=True)
def misalignment(self,isotropic=False,**kwargs):
"""
NAME:
misalignment
PURPOSE:
calculate the misalignment between the progenitor's frequency
and the direction along which the stream disrupts
INPUT:
isotropic= (False), if True, return the misalignment assuming an isotropic action distribution
OUTPUT:
misalignment in rad
HISTORY:
2013-12-05 - Written - Bovy (IAS)
2017-10-28 - Changed output unit to rad - Bovy (UofT)
"""
warnings.warn("In versions >1.3, the output unit of streamdf.misalignment has been changed to radian (from degree before)",galpyWarning)
if isotropic:
dODir= self._dOdJpEig[1][:,numpy.argmax(numpy.fabs(self._dOdJpEig[0]))]
else:
dODir= self._dsigomeanProgDirection
out= numpy.arccos(numpy.sum(self._progenitor_Omega*dODir)/numpy.sqrt(numpy.sum(self._progenitor_Omega**2.)))
if out > numpy.pi/2.: return out-numpy.pi
else: return out
def freqEigvalRatio(self,isotropic=False):
"""
NAME:
freqEigvalRatio
PURPOSE:
calculate the ratio between the largest and 2nd-to-largest (in abs)
eigenvalue of sqrt(dO/dJ^T V_J dO/dJ)
(if this is big, a 1D stream will form)
INPUT:
isotropic= (False), if True, return the ratio assuming an isotropic action distribution (i.e., just of dO/dJ)
OUTPUT:
ratio between eigenvalues of fabs(dO / dJ)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
sortedEig= sorted(numpy.fabs(self._dOdJpEig[0]))
return sortedEig[2]/sortedEig[1]
else:
return numpy.sqrt(self._sortedSigOEig)[2]\
/numpy.sqrt(self._sortedSigOEig)[1]
@physical_conversion('time',pop=True)
def estimateTdisrupt(self,deltaAngle):
"""
NAME:
estimateTdisrupt
PURPOSE:
estimate the time of disruption
INPUT:
deltaAngle- spread in angle since disruption
OUTPUT:
time in natural units
HISTORY:
2013-11-27 - Written - Bovy (IAS)
"""
return deltaAngle\
/numpy.sqrt(numpy.sum(self._dsigomeanProg**2.))
def subhalo_encounters(self,venc=numpy.inf,sigma=150./220.,
nsubhalo=0.3,bmax=0.025,yoon=False):
"""
NAME:
subhalo_encounters
PURPOSE:
estimate the number of encounters with subhalos over the lifetime of this stream, using a formalism similar to that of Yoon et al. (2011)
INPUT:
venc= (numpy.inf) count encounters with (relative) speeds less than this (relative radial velocity in cylindrical stream frame, unless yoon is True) (can be Quantity)
sigma= (150/220) velocity dispersion of the DM subhalo population (can be Quantity)
nsubhalo= (0.3) spatial number density of subhalos (can be Quantity)
bmax= (0.025) maximum impact parameter (if larger than width of stream) (can be Quantity)
yoon= (False) if True, use erroneous Yoon et al. formula
OUTPUT:
number of encounters
HISTORY:
2016-01-19 - Written - Bovy (UofT)
"""
venc= conversion.parse_velocity(venc,vo=self._vo)
sigma= conversion.parse_velocity(sigma,vo=self._vo)
nsubhalo= conversion.parse_numdens(nsubhalo,ro=self._ro)
bmax= conversion.parse_length(bmax,ro=self._ro)
Ravg= numpy.mean(numpy.sqrt(self._progenitor.orbit[0,:,0]**2.
+self._progenitor.orbit[0,:,3]**2.))
if numpy.isinf(venc):
vencFac= 1.
elif yoon:
vencFac= (1.-(1.+venc**2./4./sigma**2.)\
*numpy.exp(-venc**2./4./sigma**2.))
else:
vencFac= (1.-numpy.exp(-venc**2./2./sigma**2.))
if yoon:
yoonFac= 2*numpy.sqrt(2.)
else:
yoonFac= 1.
# Figure out width of stream
w= self.sigangledAngle(self._meandO*self._tdisrupt,simple=True,
use_physical=False)
if bmax < w*Ravg/2.: bmax= w*Ravg/2.
return yoonFac/numpy.sqrt(2.)*numpy.sqrt(numpy.pi)*Ravg*sigma\
*self._tdisrupt**2.*self._meandO\
*bmax*nsubhalo*vencFac
############################STREAM TRACK FUNCTIONS#############################
def plotTrack(self,d1='x',d2='z',interp=True,spread=0,simple=_USESIMPLE,
*args,**kwargs):
"""
NAME:
plotTrack
PURPOSE:
plot the stream track
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
interp= (True) if True, use the interpolated stream track
spread= (0) if int > 0, also plot the spread around the track as spread x sigma
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
simple= (False), if True, use a simple estimate for the spread in perpendicular angle
galpy.util.plot.plotplot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
if not hasattr(self,'_ObsTrackLB') and \
(d1.lower() == 'll' or d1.lower() == 'bb'
or d1.lower() == 'dist' or d1.lower() == 'pmll'
or d1.lower() == 'pmbb' or d1.lower() == 'vlos'
or d2.lower() == 'll' or d2.lower() == 'bb'
or d2.lower() == 'dist' or d2.lower() == 'pmll'
or d2.lower() == 'pmbb' or d2.lower() == 'vlos'):
self.calc_stream_lb()
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_track_dim(d1,interp=interp,phys=phys)
ty= self._parse_track_dim(d2,interp=interp,phys=phys)
plot.plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
if spread:
addx, addy= self._parse_track_spread(d1,d2,interp=interp,phys=phys,
simple=simple)
if ('ls' in kwargs and kwargs['ls'] == 'none') \
or ('linestyle' in kwargs \
and kwargs['linestyle'] == 'none'):
kwargs.pop('ls',None)
kwargs.pop('linestyle',None)
spreadls= 'none'
else:
spreadls= '-.'
spreadmarker= kwargs.pop('marker',None)
spreadcolor= kwargs.pop('color',None)
spreadlw= kwargs.pop('lw',1.)
plot.plot(tx+spread*addx,ty+spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
plot.plot(tx-spread*addx,ty-spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
return None
def plotProgenitor(self,d1='x',d2='z',*args,**kwargs):
"""
NAME:
plotProgenitor
PURPOSE:
plot the progenitor orbit
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
galpy.util.plot.plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
tts= self._progenitor.t[self._progenitor.t \
< self._trackts[self._nTrackChunks-1]]
obs= [self._R0,0.,self._Zsun]
obs.extend(self._vsun)
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_progenitor_dim(d1,tts,ro=self._ro,vo=self._vo,
obs=obs,phys=phys)
ty= self._parse_progenitor_dim(d2,tts,ro=self._ro,vo=self._vo,
obs=obs,phys=phys)
plot.plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
return None
def _parse_track_dim(self,d1,interp=True,phys=False):
"""Parse the dimension to plot the stream track for"""
if interp: interpStr= 'interpolated'
else: interpStr= ''
if d1.lower() == 'x':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,0]
elif d1.lower() == 'y':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,1]
elif d1.lower() == 'z':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,2]
elif d1.lower() == 'r':
tx= self.__dict__['_%sObsTrack' % interpStr][:,0]
elif d1.lower() == 'phi':
tx= self.__dict__['_%sObsTrack' % interpStr][:,5]
elif d1.lower() == 'vx':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,3]
elif d1.lower() == 'vy':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,4]
elif d1.lower() == 'vz':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,5]
elif d1.lower() == 'vr':
tx= self.__dict__['_%sObsTrack' % interpStr][:,1]
elif d1.lower() == 'vt':
tx= self.__dict__['_%sObsTrack' % interpStr][:,2]
elif d1.lower() == 'll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,0]
elif d1.lower() == 'bb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,1]
elif d1.lower() == 'dist':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,2]
elif d1.lower() == 'pmll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,4]
elif d1.lower() == 'pmbb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,5]
elif d1.lower() == 'vlos':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,3]
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._ro
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._vo
return tx
def _parse_progenitor_dim(self,d1,ts,ro=None,vo=None,obs=None,
phys=False):
"""Parse the dimension to plot the progenitor orbit for"""
if d1.lower() == 'x':
tx= self._progenitor.x(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'y':
tx= self._progenitor.y(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'z':
tx= self._progenitor.z(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'r':
tx= self._progenitor.R(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'phi':
tx= self._progenitor.phi(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vx':
tx= self._progenitor.vx(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vy':
tx= self._progenitor.vy(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vz':
tx= self._progenitor.vz(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vr':
tx= self._progenitor.vR(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vt':
tx= self._progenitor.vT(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'll':
tx= self._progenitor.ll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'bb':
tx= self._progenitor.bb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'dist':
tx= self._progenitor.dist(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmll':
tx= self._progenitor.pmll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmbb':
tx= self._progenitor.pmbb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vlos':
tx= self._progenitor.vlos(ts,ro=ro,vo=vo,obs=obs)
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._ro
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._vo
return tx
def _parse_track_spread(self,d1,d2,interp=True,phys=False,
simple=_USESIMPLE):
"""Determine the spread around the track"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
okaySpreadR= ['r','vr','vt','z','vz','phi']
okaySpreadXY= ['x','y','z','vx','vy','vz']
okaySpreadLB= ['ll','bb','dist','vlos','pmll','pmbb']
#Determine which coordinate system we're in
coord= [False,False,False] #R, XY, LB
if d1.lower() in okaySpreadR and d2.lower() in okaySpreadR:
coord[0]= True
elif d1.lower() in okaySpreadXY and d2.lower() in okaySpreadXY:
coord[1]= True
elif d1.lower() in okaySpreadLB and d2.lower() in okaySpreadLB:
coord[2]= True
else:
raise NotImplementedError("plotting the spread for coordinates from different systems not implemented yet ...")
#Get the right 2D Jacobian
indxDict= {}
indxDict['r']= 0
indxDict['vr']= 1
indxDict['vt']= 2
indxDict['z']= 3
indxDict['vz']= 4
indxDict['phi']= 5
indxDictXY= {}
indxDictXY['x']= 0
indxDictXY['y']= 1
indxDictXY['z']= 2
indxDictXY['vx']= 3
indxDictXY['vy']= 4
indxDictXY['vz']= 5
indxDictLB= {}
indxDictLB['ll']= 0
indxDictLB['bb']= 1
indxDictLB['dist']= 2
indxDictLB['vlos']= 3
indxDictLB['pmll']= 4
indxDictLB['pmbb']= 5
if coord[0]:
relevantCov= self._allErrCovs
relevantDict= indxDict
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._ro,self._vo,self._vo,
self._ro,self._vo,1.])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[1]:
relevantCov= self._allErrCovsXY
relevantDict= indxDictXY
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._ro,self._ro,self._ro,
self._vo,self._vo,self._vo])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[2]:
relevantCov= self._allErrCovsLBUnscaled
relevantDict= indxDictLB
indx0= numpy.array([[relevantDict[d1.lower()],relevantDict[d1.lower()]],
[relevantDict[d2.lower()],relevantDict[d2.lower()]]])
indx1= numpy.array([[relevantDict[d1.lower()],relevantDict[d2.lower()]],
[relevantDict[d1.lower()],relevantDict[d2.lower()]]])
cov= relevantCov[:,indx0,indx1] #cov contains all nTrackChunks covs
if not interp:
out= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
out[ii]= minEigvec*numpy.sqrt(covEig[0][minIndx])
eigDir= minEigvec
else:
#We slerp the minor eigenvector and interpolate the eigenvalue
#First store all of the eigenvectors on the track
allEigval= numpy.empty(self._nTrackChunks)
allEigvec= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
allEigval[ii]= numpy.sqrt(covEig[0][minIndx])
allEigvec[ii]= minEigvec
eigDir= minEigvec
#Now interpolate where needed
interpEigval=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allEigval,k=3)
interpolatedEigval= interpEigval(self._interpolatedThetasTrack)
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
2))
for ii in range(self._nTrackChunks-1):
slerpOmega= numpy.arccos(numpy.sum(allEigvec[ii]*allEigvec[ii+1]))
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(2):
interpolatedEigvec[slerpIndx,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmega)*allEigvec[ii,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmega)*allEigvec[ii+1,jj])/numpy.sin(slerpOmega)
out= numpy.tile(interpolatedEigval.T,(2,1)).T*interpolatedEigvec
if coord[2]: #if LB, undo rescalings that were applied before
out[:,0]*= self._ErrCovsLBScale[relevantDict[d1.lower()]]
out[:,1]*= self._ErrCovsLBScale[relevantDict[d2.lower()]]
return (out[:,0],out[:,1])
def plotCompareTrackAAModel(self,**kwargs):
"""
NAME:
plotCompareTrackAAModel
PURPOSE:
plot the comparison between the underlying model's dOmega_perp vs. dangle_r (line) and the track in (x,v)'s dOmega_perp vs. dangle_r (dots; explicitly calculating the track's action-angle coordinates)
INPUT:
galpy.util.plot.plot kwargs
OUTPUT:
plot
HISTORY:
2014-08-27 - Written - Bovy (IAS)
"""
#First calculate the model
model_adiff= (self._ObsTrackAA[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
model_operp= numpy.dot(self._ObsTrackAA[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
#Then calculate the track's frequency-angle coordinates
if self._multi is None:
aatrack= | numpy.empty((self._nTrackChunks,6)) | numpy.empty |
# MIT License
#
# Copyright (c) 2019 Morning Project Samurai (MPS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = '<NAME> <<EMAIL>>'
import os
import json
import numpy as np
from tensorflow import keras
from sklearn.model_selection import train_test_split
class Model:
def __init__(self, *args, **kwargs):
print('INITIALIZE MODEL %s' % self.__class__.__name__)
self._model = None
print('MODEL %s INITIALIZED' % self.__class__.__name__)
def fit(self, X, y):
return self
def predict(self, X):
raise NotImplementedError
def score(self, X, y):
raise NotImplementedError
def save(self, save_dir):
pass
def load(self, save_dir):
pass
class BehaviorClassifier(Model):
def __init__(self, shape=None, fps=None, labels=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._shape = shape
self._fps = fps
self.labels = labels
@property
def shape(self):
return self._shape
@property
def shape_with_batch(self):
return -1, self._shape[0], self._shape[1], self._shape[2]
@property
def image_size(self):
return self._shape[1], self._shape[0]
@property
def time_steps(self):
return self._shape[2]
@property
def fps(self):
return self._fps
@property
def config(self):
return {
'image_size': self.image_size,
'time_steps': self.time_steps,
'fps': self.fps
}
def _create_model(self):
ipt = keras.layers.Input(shape=self._shape)
cnn1_1 = keras.layers.SeparableConv2D(16, (3, 3), activation='relu')(ipt)
pool1_1 = keras.layers.MaxPool2D()(cnn1_1)
cnn2_1 = keras.layers.SeparableConv2D(32, (3, 3), activation='relu')(pool1_1)
pool2_1 = keras.layers.MaxPool2D()(cnn2_1)
fc1 = keras.layers.Dense(128, activation='relu')(keras.layers.Flatten()(pool2_1))
fc2 = keras.layers.Dense(len(self.labels), activation='softmax')(fc1)
self._model = keras.models.Model(inputs=[ipt, ], outputs=[fc2, ])
def fit(self, X, y, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy', ], train_size=0.75, epochs=20):
self._create_model()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_size)
self._model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
self._model.fit(X_train, y_train, epochs=epochs)
test_loss, test_acc = self._model.evaluate(X_test, y_test)
print('TEST LOSS:', test_loss)
print('TEST ACCURACY:', test_acc)
return self
def predict(self, X):
return np.argmax(self._model.predict(X), axis=1)
def score(self, X, y):
return self._model.evaluate(X, y)
def predict_probs(self, X):
return self.predict(X)
def predict_labels(self, X):
return [self.labels[i] for i in self.predict(X)]
def save(self, save_dir):
print('SAVE MODEL %s' % self.__class__.__name__)
with open(os.path.join(save_dir, 'model.json'), 'w') as f:
json.dump({'shape': list(self._shape), 'fps': self._fps, 'labels': self.labels}, f)
self._model.save(os.path.join(save_dir, 'model.h5'))
print('MODEL %s SAVED' % self.__class__.__name__)
def load(self, save_dir):
print('LOAD MODEL %s' % self.__class__.__name__)
with open(os.path.join(save_dir, 'model.json'), 'r') as f:
params = json.load(f)
self._shape, self._fps, self.labels = (params[key] for key in ['shape', 'fps', 'labels'])
self._model = keras.models.load_model(os.path.join(save_dir, 'model.h5'))
print('MODEL %s LOADED' % self.__class__.__name__)
class SleepDetector(Model):
def __init__(self, time_steps=None, weights=None, fps=None, threshold=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._time_steps = time_steps
if isinstance(weights, np.ndarray):
self._weights = weights
elif isinstance(weights, list):
self._weights = np.array(weights)
elif weights is None:
self._weights = weights
else:
raise ValueError('weights must be np.ndarray, list or None')
self._fps = fps
self._threshold = threshold
@property
def time_steps(self):
return self._time_steps
@property
def threshold(self):
return self._threshold
def fit(self, X, y):
pass
def predict(self, X):
return [1 if y > self._threshold else 0 for y in | np.average(X, axis=1, weights=self._weights) | numpy.average |
# -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Standard library imports
import math
# Third party import
import numpy as np
def _nl_loudness(core_loudness):
"""Simulate the nonlinear temporal decay of the hearing system
Parameters
----------
core_loudness : numpy.ndarray
Core loudness
Outputs
-------
nl_loudness : numpy.ndarray
Loudness with non linear temporal decay
"""
# Initialization
sample_rate = 2000
nl_loudness = np.copy(core_loudness)
# Factor for virtual upsampling/inner iterations
nl_iter = 24
# Time constants for non_linear temporal decay
t_short = 0.005
t_long = 0.015
t_var = 0.075
# Initializes constants B and states of capacitors C1 and C2
delta_t = 1 / (sample_rate * nl_iter)
P = (t_var + t_long) / (t_var * t_short)
Q = 1 / (t_short * t_var)
lambda_1 = -P / 2 + math.sqrt(P * P / 4 - Q)
lambda_2 = -P / 2 - math.sqrt(P * P / 4 - Q)
den = t_var * (lambda_1 - lambda_2)
e1 = math.exp(lambda_1 * delta_t)
e2 = math.exp(lambda_2 * delta_t)
B = [
(e1 - e2) / den,
((t_var * lambda_2 + 1) * e1 - (t_var * lambda_1 + 1) * e2) / den,
((t_var * lambda_1 + 1) * e1 - (t_var * lambda_2 + 1) * e2) / den,
(t_var * lambda_1 + 1) * (t_var * lambda_2 + 1) * (e1 - e2) / den,
math.exp(-delta_t / t_long),
math.exp(-delta_t / t_var),
]
nl_lp = {"B": B}
#nl_lp["uo_last"] = 0
#nl_lp["u2_last"] = 0
delta = np.copy(core_loudness,)
delta = np.roll(delta, -1, axis=1)
delta[:, -1] = 0
delta = (delta - nl_loudness)/nl_iter
ui_delta = | np.zeros(core_loudness.size*nl_iter) | numpy.zeros |
"""
Modified from https://github.com/google-research/ssl_detection/blob/master/detection/utils/augmentation.py.
"""
import copy
import cv2
import mmcv
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from mmcv.image.colorspace import bgr2rgb, rgb2bgr
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines import Compose as BaseCompose
from mmdet.datasets.pipelines import transforms
from .geo_utils import GeometricTransformationBase as GTrans
PARAMETER_MAX = 10
def int_parameter(level, maxval, max_level=None):
if max_level is None:
max_level = PARAMETER_MAX
return int(level * maxval / max_level)
def float_parameter(level, maxval, max_level=None):
if max_level is None:
max_level = PARAMETER_MAX
return float(level) * maxval / max_level
class RandAug(object):
"""refer to https://github.com/google-research/ssl_detection/blob/00d52272f
61b56eade8d5ace18213cba6c74f6d8/detection/utils/augmentation.py#L240."""
def __init__(
self,
prob: float = 1.0,
magnitude: int = 10,
random_magnitude: bool = True,
record: bool = False,
magnitude_limit: int = 10,
):
assert 0 <= prob <= 1, f"probability should be in (0,1) but get {prob}"
assert (
magnitude <= PARAMETER_MAX
), f"magnitude should be small than max value {PARAMETER_MAX} but get {magnitude}"
self.prob = prob
self.magnitude = magnitude
self.magnitude_limit = magnitude_limit
self.random_magnitude = random_magnitude
self.record = record
self.buffer = None
def __call__(self, results):
if np.random.random() < self.prob:
magnitude = self.magnitude
if self.random_magnitude:
magnitude = np.random.randint(1, magnitude)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(self.get_aug_info(magnitude=magnitude))
results = self.apply(results, magnitude)
# clear buffer
return results
def apply(self, results, magnitude: int = None):
raise NotImplementedError()
def __repr__(self):
return f"{self.__class__.__name__}(prob={self.prob},magnitude={self.magnitude},max_magnitude={self.magnitude_limit},random_magnitude={self.random_magnitude})"
def get_aug_info(self, **kwargs):
aug_info = dict(type=self.__class__.__name__)
aug_info.update(
dict(
prob=1.0,
random_magnitude=False,
record=False,
magnitude=self.magnitude,
)
)
aug_info.update(kwargs)
return aug_info
def enable_record(self, mode: bool = True):
self.record = mode
@PIPELINES.register_module()
class Identity(RandAug):
def apply(self, results, magnitude: int = None):
return results
@PIPELINES.register_module()
class AutoContrast(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(ImageOps.autocontrast(Image.fromarray(img)), dtype=img.dtype)
)
return results
@PIPELINES.register_module()
class RandEqualize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(ImageOps.equalize(Image.fromarray(img)), dtype=img.dtype)
)
return results
@PIPELINES.register_module()
class RandSolarize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = results[key]
results[key] = mmcv.solarize(
img, min(int_parameter(magnitude, 256, self.magnitude_limit), 255)
)
return results
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of
PIL."""
def impl(pil_img, level, max_level=None):
v = float_parameter(level, 1.8, max_level) + 0.1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
class RandEnhance(RandAug):
op = None
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(
_enhancer_impl(self.op)(
Image.fromarray(img), magnitude, self.magnitude_limit
),
dtype=img.dtype,
)
)
return results
@PIPELINES.register_module()
class RandColor(RandEnhance):
op = ImageEnhance.Color
@PIPELINES.register_module()
class RandContrast(RandEnhance):
op = ImageEnhance.Contrast
@PIPELINES.register_module()
class RandBrightness(RandEnhance):
op = ImageEnhance.Brightness
@PIPELINES.register_module()
class RandSharpness(RandEnhance):
op = ImageEnhance.Sharpness
@PIPELINES.register_module()
class RandPosterize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
magnitude = int_parameter(magnitude, 4, self.magnitude_limit)
results[key] = rgb2bgr(
np.asarray(
ImageOps.posterize(Image.fromarray(img), 4 - magnitude),
dtype=img.dtype,
)
)
return results
@PIPELINES.register_module()
class Sequential(BaseCompose):
def __init__(self, transforms, record: bool = False):
super().__init__(transforms)
self.record = record
self.enable_record(record)
def enable_record(self, mode: bool = True):
# enable children to record
self.record = mode
for transform in self.transforms:
transform.enable_record(mode)
@PIPELINES.register_module()
class OneOf(Sequential):
def __init__(self, transforms, record: bool = False):
self.transforms = []
for trans in transforms:
if isinstance(trans, list):
self.transforms.append(Sequential(trans))
else:
assert isinstance(trans, dict)
self.transforms.append(Sequential([trans]))
self.enable_record(record)
def __call__(self, results):
transform = np.random.choice(self.transforms)
return transform(results)
@PIPELINES.register_module()
class ShuffledSequential(Sequential):
def __call__(self, data):
order = np.random.permutation(len(self.transforms))
for idx in order:
t = self.transforms[idx]
data = t(data)
if data is None:
return None
return data
"""
Geometric Augmentation. Modified from thirdparty/mmdetection/mmdet/datasets/pipelines/auto_augment.py
"""
def bbox2fields():
"""The key correspondence from bboxes to labels, masks and
segmentations."""
bbox2label = {"gt_bboxes": "gt_labels", "gt_bboxes_ignore": "gt_labels_ignore"}
bbox2mask = {"gt_bboxes": "gt_masks", "gt_bboxes_ignore": "gt_masks_ignore"}
bbox2seg = {
"gt_bboxes": "gt_semantic_seg",
}
return bbox2label, bbox2mask, bbox2seg
class GeometricAugmentation(object):
def __init__(
self,
img_fill_val=125,
seg_ignore_label=255,
min_size=0,
prob: float = 1.0,
random_magnitude: bool = True,
record: bool = False,
):
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, "img_fill_val as tuple must have 3 elements."
img_fill_val = tuple([float(val) for val in img_fill_val])
assert np.all(
[0 <= val <= 255 for val in img_fill_val]
), "all elements of img_fill_val should between range [0,255]."
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.min_size = min_size
self.prob = prob
self.random_magnitude = random_magnitude
self.record = record
def __call__(self, results):
if np.random.random() < self.prob:
magnitude: dict = self.get_magnitude(results)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(self.get_aug_info(**magnitude))
results = self.apply(results, **magnitude)
self._filter_invalid(results, min_size=self.min_size)
return results
def get_magnitude(self, results) -> dict:
raise NotImplementedError()
def apply(self, results, **kwargs):
raise NotImplementedError()
def enable_record(self, mode: bool = True):
self.record = mode
def get_aug_info(self, **kwargs):
aug_info = dict(type=self.__class__.__name__)
aug_info.update(
dict(
# make op deterministic
prob=1.0,
random_magnitude=False,
record=False,
img_fill_val=self.img_fill_val,
seg_ignore_label=self.seg_ignore_label,
min_size=self.min_size,
)
)
aug_info.update(kwargs)
return aug_info
def _filter_invalid(self, results, min_size=0):
"""Filter bboxes and masks too small or translated out of image."""
if min_size is None:
return results
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get("bbox_fields", []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
return results
def __repr__(self):
return f"""{self.__class__.__name__}(
img_fill_val={self.img_fill_val},
seg_ignore_label={self.seg_ignore_label},
min_size={self.magnitude},
prob: float = {self.prob},
random_magnitude: bool = {self.random_magnitude},
)"""
@PIPELINES.register_module()
class RandTranslate(GeometricAugmentation):
def __init__(self, x=None, y=None, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
if self.x is None and self.y is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.x, (list, tuple)):
assert len(self.x) == 2
x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0]
magnitude["x"] = x
if isinstance(self.y, (list, tuple)):
assert len(self.y) == 2
y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0]
magnitude["y"] = y
else:
if self.x is not None:
assert isinstance(self.x, (int, float))
magnitude["x"] = self.x
if self.y is not None:
assert isinstance(self.y, (int, float))
magnitude["y"] = self.y
return magnitude
def apply(self, results, x=None, y=None):
# ratio to pixel
h, w, c = results["img_shape"]
if x is not None:
x = w * x
if y is not None:
y = h * y
if x is not None:
# translate horizontally
self._translate(results, x)
if y is not None:
# translate veritically
self._translate(results, y, direction="vertical")
return results
def _translate(self, results, offset, direction="horizontal"):
if self.record:
GTrans.apply(
results,
"shift",
dx=offset if direction == "horizontal" else 0,
dy=offset if direction == "vertical" else 0,
)
self._translate_img(results, offset, direction=direction)
self._translate_bboxes(results, offset, direction=direction)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._translate_masks(results, offset, direction=direction)
self._translate_seg(
results, offset, fill_val=self.seg_ignore_label, direction=direction
)
def _translate_img(self, results, offset, direction="horizontal"):
for key in results.get("img_fields", ["img"]):
img = results[key].copy()
results[key] = mmcv.imtranslate(
img, offset, direction, self.img_fill_val
).astype(img.dtype)
def _translate_bboxes(self, results, offset, direction="horizontal"):
"""Shift bboxes horizontally or vertically, according to offset."""
h, w, c = results["img_shape"]
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
if direction == "horizontal":
min_x = np.maximum(0, min_x + offset)
max_x = np.minimum(w, max_x + offset)
elif direction == "vertical":
min_y = | np.maximum(0, min_y + offset) | numpy.maximum |
import os
from pathlib import Path
from tempfile import tempdir
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
import ross as rs
from ross.defects.misalignment import MisalignmentFlex
from ross.units import Q_
steel2 = rs.Material(name="Steel", rho=7850, E=2.17e11, Poisson=0.2992610837438423)
# Rotor with 6 DoFs, with internal damping, with 10 shaft elements, 2 disks and 2 bearings.
i_d = 0
o_d = 0.019
n = 33
# fmt: off
L = np.array(
[0 , 25, 64, 104, 124, 143, 175, 207, 239, 271,
303, 335, 345, 355, 380, 408, 436, 466, 496, 526,
556, 586, 614, 647, 657, 667, 702, 737, 772, 807,
842, 862, 881, 914]
)/ 1000
# fmt: on
L = [L[i] - L[i - 1] for i in range(1, len(L))]
shaft_elem = [
rs.ShaftElement6DoF(
material=steel2,
L=l,
idl=i_d,
odl=o_d,
idr=i_d,
odr=o_d,
alpha=8.0501,
beta=1.0e-5,
rotary_inertia=True,
shear_effects=True,
)
for l in L
]
Id = 0.003844540885417
Ip = 0.007513248437500
disk0 = rs.DiskElement6DoF(n=12, m=2.6375, Id=Id, Ip=Ip)
disk1 = rs.DiskElement6DoF(n=24, m=2.6375, Id=Id, Ip=Ip)
kxx1 = 4.40e5
kyy1 = 4.6114e5
kzz = 0
cxx1 = 27.4
cyy1 = 2.505
czz = 0
kxx2 = 9.50e5
kyy2 = 1.09e8
cxx2 = 50.4
cyy2 = 100.4553
bearing0 = rs.BearingElement6DoF(
n=4, kxx=kxx1, kyy=kyy1, cxx=cxx1, cyy=cyy1, kzz=kzz, czz=czz
)
bearing1 = rs.BearingElement6DoF(
n=31, kxx=kxx2, kyy=kyy2, cxx=cxx2, cyy=cyy2, kzz=kzz, czz=czz
)
rotor = rs.Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])
@pytest.fixture
def rub():
unbalance_magnitudet = np.array([5e-4, 0])
unbalance_phaset = np.array([-np.pi / 2, 0])
rubbing = rotor.run_rubbing(
dt=0.001,
tI=0,
tF=0.5,
deltaRUB=7.95e-5,
kRUB=1.1e6,
cRUB=40,
miRUB=0.3,
posRUB=12,
speed=125.66370614359172,
unbalance_magnitude=unbalance_magnitudet,
unbalance_phase=unbalance_phaset,
print_progress=True,
)
return rubbing
@pytest.fixture
def rub_units():
unbalance_magnitudet = Q_(np.array([0.043398083107259365, 0]), "lb*in")
unbalance_phaset = Q_( | np.array([-90.0, 0.0]) | numpy.array |
import h5py as h5
import numpy as np
import pytest
from nexusformat.nexus import *
string_dtype = h5.special_dtype(vlen=str)
NX_ENCODING = nxgetencoding()
arr1D = np.linspace(0.0, 100.0, 101, dtype=np.float64)
arr2D = | np.array(((1,2,3,4),(5,6,7,8)), dtype=np.int32) | numpy.array |
# uncompyle6 version 3.7.4
# Python bytecode 3.5 (3350)
# Decompiled from: Python 3.8.5 (default, Jan 27 2021, 15:41:15)
# [GCC 9.3.0]
# Embedded file name: /home/docker/CSN/bin/gpu_ccsn.py
# Compiled at: 2021-03-31 16:15:26
# Size of source mod 2**32: 19699 bytes
"""
python version of csn algorithm
https://github.com/wys8c764/CSN
"""
import os, argparse, logging, pandas as pd, numpy as np
from scipy import sparse
from scipy import stats
import sys
sys.path.append('.')
import useful_functions as uf
def condition_g(adjmc, kk=50, dlimit=5):
"""return the degree >5 and top kk ρ统计量的gene index, 可优化参数 > degree limit"""
a = np.sum(adjmc, axis=1)
id1 = np.argwhere(a >= dlimit)
INDEX = np.argsort(a[id1.flatten()])[::-1]
id2 = INDEX[0:kk]
return id2.tolist()
def get_data(csv):
if str(csv).endswith('csv'):
df = pd.read_csv(csv, index_col=0, header=0)
else:
df = pd.read_csv(csv, index_col=0, header=0, sep='\t')
return df
class SSN:
"""Construction of cell-specific networks
模型构建过程用所有的样品数据,后续预测用整合有的大表做dm转化但仅输出少量样品(cells.list)的network和degree matrix
在dblur features水平做矩阵融合
The function performs the transformation from gene expression matrix to cell-specific network (csn).
This is a groups style docs.
Parameters:
`data` Gene expression matrix, rows = genes, columns = cells
Returns: None
Raises: KeyError - raises an exception
"""
def __init__(self, data, outdir='./', log=None):
"""
default values when initialize. set log file
"""
self.outdir = outdir
self.tablename = data
uf.create_dir(self.outdir)
self.log = os.path.join(self.outdir, log) if log else os.path.join(self.outdir, '{}_{}.log'.format(os.path.basename(data), uf.now()))
self.logger = uf.create_logger(self.log)
self.logger.info('start reading data from {}, log file is {}'.format(data, self.log))
df = get_data(data)
self.data = df.loc[(df.sum(axis=1) != 0, df.sum(axis=0) != 0)]
self.csn = None
self.logger.info('finish reading data from {}'.format(data))
@uf.robust
def get_cells(self, cells=None):
"""
Get cells in list format
Parameters:
file cells.list
Returns:
cells in list format
Raises:
KeyError - raises an exception
"""
if not cells:
cells = list(self.data.columns)
else:
if isinstance(cells, list):
cells = cells
else:
if os.access(cells, os.R_OK):
cells = [cell.strip() for cell in open(cells).readlines()]
else:
print('cells must be list or file with one column')
return cells
@uf.robust
def csnet(self, cells=None, alpha=0.01, boxsize=0.1, edgeW=0, kk=0, dlimit=5, to_csv=0, average=1, *args, **kwargs):
"""
fcndm = cndm(data, 0.1, 0.1, 1) for test
Construct the CSN for sepecified cells
Parameters:
`cells` Construct the CSNs for all cells, set cells = None (Default) otherwise input cells.list
`alpha` Significant level (eg. 0.001, 0.01, 0.05 ...)
larger alpha leads to more edges, Default = 0.01
`boxsize` Size of neighborhood, the value between 1 to 2 is recommended, Default = 0.1,
`edgeW` 1 edge is weighted (statistic pxy(x))
0 edge is not weighted (Default)
`nodeW` 1 node is weighted (gene or otu abundance)
0 node is not wieghted (Default)
`csn` Cell-specific network, the kth CSN is in csn{k}
rows = genes, columns = genes
`kk` the number of conditional gene. when kk=0, the method is CSN
`dlimit` the min degree limitation of conditional genes.
`average` whether use the average(adjmc + adjmc1) network or intersection(adjmc.*adjmc1) network.
Returns:
csnet dict
Raises:
KeyError - raises an exception
Notes:
Too many cells or genes may lead to out of memory.
学习 dataframe 和array python的矩阵运算。
np index start from 0
每个new cell都要和原来所有的细胞一起计算lower upper边界矩阵,都要排序每个基因来计算。
如果数据库足够大,可以就用原来的边界矩阵,重新换算出upper和lower矩阵。带入new cell的基因表达数据就可以。
"""
self.logger.info('start construction cell-specific network ')
nr, nc = self.data.shape
data = self.data
upper = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
lower = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
for i in range(nr):
sort_gi = data.iloc[i, :].sort_values(axis=0, ascending=True)
s1 = sort_gi.values
s2 = sort_gi.index
n1 = sum(np.sign(s1))
n0 = nc - n1
h = round(boxsize * np.sqrt(n1))
k = 0
while k < nc:
s = 0
while k + s + 1 < nc and s1[(k + s + 1)] == s1[k]:
s = s + 1
if s >= h:
upper.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[k])]
lower.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[k])]
else:
upper.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[int(min(nc - 1, k + s + h))])]
lower.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[int(max(n0 * (n0 > h), k - h))])]
k = k + s + 1
# %If gene expression matrix is sparse, use the sparse matrix will accelerate
# %the calculation and reduce memory footprint
# %data = sparse(data); upper = sparse(upper); lower = sparse(lower);
self.logger.info('finish caculate the neighborhood of each gene for each cell')
cells = self.get_cells(cells=cells)
csn = dict()
B = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
p = -stats.norm.ppf(q=alpha, loc=0, scale=1)
for k in cells:
for j in B.columns:
if average:
B.loc[:, j] = (data.loc[:, j] <= upper.loc[:, k]) & (data.loc[:, j] >= lower.loc[:, k]) & (data.loc[:, k] > 0)
else:
B.loc[:, j] = (data.loc[:, j] <= upper.loc[:, k]) & (data.loc[:, j] >= lower.loc[:, k])
B = B * 1
a = np.matrix(B.sum(axis=1))
csnk = (B.dot(B.T) * nc - a.T * a) / np.sqrt(np.multiply(a.T * a, (nc - a).T * (nc - a)) / (nc - 1) + np.spacing(1))
csnlink = (csnk > p) * 1
if csnlink.sum().sum() == 0:
self.logger.info('no genes in Cell {} has a link'.format(k))
continue
if kk != 0:
id = condition_g(csnlink, kk=kk, dlimit=dlimit)
csnlink = pd.DataFrame(np.zeros([nr, nr])) if average else pd.DataFrame(np.ones([nr, nr]))
for m in range(kk):
B_z = B.iloc[id[m], :] * B
idc = np.argwhere(B.iloc[id[m], :] != 0).flatten()
B_z = B_z.iloc[:, idc]
r = B_z.shape[1]
a_z = np.mat(B_z.sum(axis=1))
c_z = B_z @ B_z.T
csnk1 = (c_z * r - a_z.T * a_z) / np.sqrt(np.multiply(a_z.T * a_z, (r - a_z).T * (r - a_z)) / (r - 1) + np.spacing(1))
csnlink1 = (csnk1 > p) * 1
csnlink = csnlink + csnlink1 if average else csnlink * csnlink1
else:
kk = 1
csnlink = csnlink / kk if average else csnlink
csn[k] = csnlink
if to_csv:
filename = os.path.join(self.outdir, 'cellnws', '{}.nw.csv'.format(k))
uf.create_dir(self.outdir + '/cellnws')
csn[k].to_csv(path_or_buf=filename)
self.logger.info('Cell {} specific network is completed'.format(k))
self.logger.info('Finished constructing all {} cell specific networks'.format(len(cells)))
self.upper = upper
self.lower = lower
self.csn = csn
@uf.robust
def csndm(self, cells=None, normalize=1, to_csv=1, nodeW=0, *args, **kwargs):
"""Construction of network degree matrix
The function performs the transformation from gene expression matrix to network degree matrix (ndm).
Parameters:
`data` Gene expression matrix (TPM/RPKM/FPKM/count), rows = genes, columns = cells. otu_even.table
`alpha` Significant level (eg. 0.001, 0.01, 0.05 ...), Default = 0.01
`boxsize` Size of neighborhood, Default = 0.1 (nx(k) = ny(k) = 0.1*n)
`normalize`1 result is normalized (Default);
0 result is not normalized
Note:
If gene expression matrix is sparse, use the sparse matrix will accelerate the calculation and reduce memory footprint
data = sparse(data); upper = sparse(upper); lower = sparse(lower);
可用于机器学习,样品分类预测等
只输出指定 cells 的degree matrix ,不指定就输出所有cell的全部gene's dm
"""
data = self.data
self.logger.info('Constructing network degree matrix ...')
cells = self.get_cells(cells=cells)
nr, nc = self.data.shape
ndm = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
csn = self.csn
celln = 0
for k in cells:
if k not in csn:
self.logger.info('Cell {} has no network'.format(k))
continue
if nodeW:
ndm.loc[:, k] = csn[k].sum(axis=1) * data.loc[:, k]
else:
ndm.loc[:, k] = csn[k].sum(axis=1)
celln += 1
self.logger.info('Network degree vector of cell {} is complete'.format(k))
if normalize:
self.logger.info('Normalizing network degree matrix ...')
a = ndm.mean(axis=0)
ndm = ndm.div(a + np.spacing(1), axis=1)
ndm = np.log(1 + ndm)
self.ndm = ndm
if to_csv:
filename = os.path.join(self.outdir, '{}.{}cells.nwdm.csv'.format(os.path.basename(self.tablename), celln))
ndm.to_csv(path_or_buf=filename)
self.logger.info('Finished network degree matrix, file: {}'.format(filename))
@uf.robust
def nfe(self, cells=None, to_csv=1, *args, **kwargs):
data = self.data
csn = self.csn
self.logger.info('caculate network_flow_entropy ...')
cells = self.get_cells(cells=cells)
nr, nc = data.shape
NFE = pd.DataFrame(np.zeros((nc, 1)), columns=['network_flow_entropy'], index=data.columns)
celln = 0
for k in cells:
if k not in csn:
self.logger.info('Cell {} has no network'.format(k))
NFE.loc[k] = None
continue
datak = | np.mat(data.loc[:, k]) | numpy.mat |
# Author: <NAME>
# Demo: Compute largest inscribed spheres in (approximately) centroidal Laguerre diagram
import numpy as np
from scipy.optimize import linprog
import vorostereology as vs
from math import pi
# NOTE: plotting requires packages not part of the dependencies.
# Install via:
# pip install vtk
# pip install mayavi
from mayavi import mlab
from tvtk.api import tvtk
def sphere_packing(laguerre, domain, points, weights, periodicity):
L1 = domain[0][1] - domain[0][0]
L2 = domain[1][1] - domain[1][0]
L3 = domain[2][1] - domain[2][0]
lengths = np.array([L1, L2, L3])
periodic = periodicity[0] or periodicity[1] or periodicity[2]
n = points.shape[0]
centers = np.zeros((n, 3))
r = np.zeros(n)
c = np.zeros(4)
c[3] = -1
if periodic:
bounds = [(domain[0][0] - L1, domain[0][1] + L1), (domain[1][0] - L2, domain[1][1] + L2),
(domain[2][0] - L3, domain[2][1] + L3), (0, None)]
else:
bounds = [(domain[0][0], domain[0][1]), (domain[1][0], domain[1][1]), (domain[2][0], domain[2][1]), (0, None)]
for idx, cell in enumerate(laguerre):
k = len(cell["faces"])
A = np.zeros((k, 4))
b = np.zeros(k)
for face_idx, face in enumerate(cell["faces"]):
face_vertices = | np.array(cell['vertices']) | numpy.array |
from cgitb import grey
from matplotlib.pyplot import axis, close, hot
import numpy as np
from numpy.random import default_rng
import random
import copy
import math
class ImageManipulator:
def __init__(self):
self._rng = default_rng(seed=42)
def salt_pepper_noise(self, gray_img, ratio):
noise = self._rng.choice(
[-1, 0, 255], size=gray_img.shape, p=[1 - ratio, ratio / 2, ratio / 2]
)
np.copyto(noise, gray_img, where=noise == -1)
gray_img = noise.astype(np.uint8)
return gray_img
def gaussian_noise(self, gray_img, mean, std):
noise = self._rng.normal(loc=mean, scale=std, size=gray_img.shape)
gray_img = gray_img + noise
gray_img = np.clip(gray_img, 0, 255)
gray_img = np.rint(gray_img)
gray_img = gray_img.astype(np.uint8)
return gray_img
def calc_histogram(self, gray_img):
hist = np.zeros(256)
for i in range(len(hist)):
hist[i] = np.sum(gray_img == i)
hist = hist.astype(np.uint)
return hist
def avg_histograms(self, hist_list):
hist_arr = np.array(hist_list)
hist = np.mean(hist_arr, axis=0)
hist = np.rint(hist)
hist = hist.astype(np.uint8)
return hist
def hist_equalization(self, gray_img, hist=None):
if hist is None:
hist = self.calc_histogram(gray_img)
bins = range(len(hist))
hist = hist / np.sum(hist)
cs = np.cumsum(hist)
cs = (len(hist) - 1) * cs / cs[-1]
gray_img = np.interp(gray_img, bins, cs)
gray_img = np.rint(gray_img)
gray_img = gray_img.astype(np.uint8)
return gray_img
def quantize_image(self, gray_img, thresholds):
t = np.array(thresholds)
if t[0] != 0:
t = np.insert(t, 0, 0)
if t[-1] != 256:
t = np.append(t, 256)
P = len(t) - 1
r = np.zeros(P)
for i in range(len(r)):
r[i] = (t[i] + t[i + 1]) / 2
Q = np.zeros(256)
x = np.array(range(256))
for i in range(P):
Q = Q + r[i] * ((x >= t[i]) & (x < t[i + 1]))
B = Q[gray_img]
gray_img = np.rint(B)
gray_img = gray_img.astype(np.uint8)
return gray_img
def linear_filter(self, gray_img, filter, scale, dtype=np.uint8):
filter = np.array(filter)
f_w, f_h = filter.shape
i_w, i_h = gray_img.shape
o_w = i_w - f_w + 1
o_h = i_h - f_h + 1
new_img = np.zeros((o_w, o_h))
for i, j in np.ndindex(new_img.shape):
result = np.sum(filter * gray_img[i : i + f_w, j : j + f_h])
scaled_result = result / scale
new_img[i, j] = scaled_result
new_img = np.rint(new_img)
new_img = new_img.astype(dtype)
return new_img
def median_filter(self, gray_img, weights):
weights = np.array(weights)
f_w, f_h = weights.shape
i_w, i_h = gray_img.shape
o_w = i_w - f_w + 1
o_h = i_h - f_h + 1
new_img = np.zeros((o_w, o_h))
for i, j in np.ndindex(new_img.shape):
pixel_list = np.array([])
for k, l in np.ndindex(f_w, f_h):
pixel_list = np.append(
pixel_list,
[gray_img[i : i + f_w, j : j + f_h][k, l]] * weights[k, l],
)
result = np.median(pixel_list)
new_img[i, j] = result
new_img = np.rint(new_img)
new_img = new_img.astype(np.uint8)
return new_img
def edge_detect(self, gray_img, method="prewitt"):
if method == "prewitt":
filter_x = [[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]
filter_y = [[-1, -1, -1], [0, 0, 0], [1, 1, 1]]
scale = 6
dx = self.linear_filter(gray_img, filter_x, scale=scale, dtype=np.int16)
dy = self.linear_filter(gray_img, filter_y, scale=scale, dtype=np.int16)
mag = np.sqrt((dx * dx) + (dy * dy))
dir = np.arctan2(dy, dx)
dir = np.mod(dir, np.pi * 2) # normalize to 0-2pi scale
elif method == "sobel":
filter_x = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
filter_y = [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]
scale = 8
dx = self.linear_filter(gray_img, filter_x, scale=scale, dtype=np.int16)
dy = self.linear_filter(gray_img, filter_y, scale=scale, dtype=np.int16)
mag = np.sqrt((dx * dx) + (dy * dy))
dir = np.arctan2(dy, dx)
dir = np.mod(dir, np.pi * 2) # normalize to 0-2pi scale
elif method == "compass":
filter_0 = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
filter_1 = [[-2, -1, 0], [-1, 0, 1], [0, 1, 2]]
filter_2 = [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]
filter_3 = [[0, -1, -2], [1, 0, -1], [2, 1, 0]]
scale = 8
d0 = self.linear_filter(gray_img, filter_0, scale=scale, dtype=np.int16)
d1 = self.linear_filter(gray_img, filter_1, scale=scale, dtype=np.int16)
d2 = self.linear_filter(gray_img, filter_2, scale=scale, dtype=np.int16)
d3 = self.linear_filter(gray_img, filter_3, scale=scale, dtype=np.int16)
d4 = -d0
d5 = -d1
d6 = -d2
d7 = -d3
dstack = np.array([d0, d1, d2, d3, d4, d5, d6, d7])
mag = np.amax(dstack, axis=0)
dir = np.argmax(dstack, axis=0)
dir = (dir * np.pi) / 4 # normalize to 0-2pi scale
dir = (dir * 255) / (np.pi * 2) # re-normalize to 0-255 scale for easy viewing
mag = mag * 10 # scaling factor so that the magnitude image is actually visible
mag = np.rint(mag)
mag = mag.astype(np.uint8)
dir = np.rint(dir)
dir = dir.astype(np.uint8)
return mag, dir
def _translate(self, bin_img, trans_x, trans_y):
if trans_x > 0:
bin_img = np.pad(bin_img, ((0, 0), (trans_x, 0)), mode="constant")[
:, :-trans_x
]
elif trans_x < 0:
trans_x = -trans_x
bin_img = np.pad(bin_img, ((0, 0), (0, trans_x)), mode="constant")[
:, trans_x:
]
if trans_y > 0:
bin_img = np.pad(bin_img, ((trans_y, 0), (0, 0)), mode="constant")[
:-trans_y, :
]
elif trans_y < 0:
trans_y = -trans_y
bin_img = np.pad(bin_img, ((0, trans_y), (0, 0)), mode="constant")[
trans_y:, :
]
return bin_img
def dilation(self, bin_img, strel, hot_x, hot_y):
strel = np.array(strel)
dil_img = np.zeros(bin_img.shape)
for i, j in np.ndindex(strel.shape):
if strel[i, j] > 0:
trans_x = j - hot_x
trans_y = i - hot_y
trans_img = self._translate(bin_img, trans_x, trans_y)
dil_img = | np.logical_or(trans_img > 0, dil_img > 0) | numpy.logical_or |
# -*- coding: utf-8 -*-
"""
Created on 2017-5-5
@author: cheng.li
"""
import numpy as np
from typing import Tuple
from typing import Union
from alphamind.exceptions.exceptions import PortfolioBuilderException
from alphamind.cython.optimizers import LPOptimizer
def linear_builder(er: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_constraints: np.ndarray,
risk_target: Tuple[np.ndarray, np.ndarray],
turn_over_target: float = None,
current_position: np.ndarray = None,
method: str='ecos') -> Tuple[str, np.ndarray, np.ndarray]:
er = er.flatten()
n, m = risk_constraints.shape
if not risk_target:
risk_lbound = -np.inf * np.ones((m, 1))
risk_ubound = np.inf * np.ones((m, 1))
else:
risk_lbound = risk_target[0].reshape((-1, 1))
risk_ubound = risk_target[1].reshape((-1, 1))
if isinstance(lbound, float):
lbound = np.ones(n) * lbound
if isinstance(ubound, float):
ubound = | np.ones(n) | numpy.ones |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import gintermediates as imd
########################################
# EOM-IP-CCSD
########################################
def vector_to_amplitudes_ip(vector, nmo, nocc):
nvir = nmo - nocc
r1 = vector[:nocc].copy()
r2 = np.zeros((nocc,nocc,nvir), dtype=vector.dtype)
idx, idy = np.tril_indices(nocc, -1)
r2[idx,idy] = vector[nocc:].reshape(nocc*(nocc-1)//2,nvir)
r2[idy,idx] =-vector[nocc:].reshape(nocc*(nocc-1)//2,nvir)
return r1, r2
def amplitudes_to_vector_ip(r1, r2):
nocc = r1.size
return np.hstack((r1, r2[np.tril_indices(nocc, -1)].ravel()))
def ipccsd_matvec(eom, vector, imds=None, diag=None):
# Ref: Tu, Wang, and Li, J. Chem. Phys. 136, 174102 (2012) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
r1, r2 = vector_to_amplitudes_ip(vector, nmo, nocc)
# Eq. (8)
Hr1 = -np.einsum('mi,m->i', imds.Foo, r1)
Hr1 += np.einsum('me,mie->i', imds.Fov, r2)
Hr1 += -0.5*np.einsum('nmie,mne->i', imds.Wooov, r2)
# Eq. (9)
Hr2 = lib.einsum('ae,ije->ija', imds.Fvv, r2)
tmp1 = lib.einsum('mi,mja->ija', imds.Foo, r2)
Hr2 -= tmp1 - tmp1.transpose(1,0,2)
Hr2 -= np.einsum('maji,m->ija', imds.Wovoo, r1)
Hr2 += 0.5*lib.einsum('mnij,mna->ija', imds.Woooo, r2)
tmp2 = lib.einsum('maei,mje->ija', imds.Wovvo, r2)
Hr2 += tmp2 - tmp2.transpose(1,0,2)
Hr2 += 0.5*lib.einsum('mnef,mnf,ijae->ija', imds.Woovv, r2, imds.t2)
vector = amplitudes_to_vector_ip(Hr1, Hr2)
return vector
def ipccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nocc, nvir = t1.shape
Hr1 = -np.diag(imds.Foo)
Hr2 = np.zeros((nocc,nocc,nvir), dtype=t1.dtype)
for i in range(nocc):
for j in range(nocc):
for a in range(nvir):
Hr2[i,j,a] += imds.Fvv[a,a]
Hr2[i,j,a] += -imds.Foo[i,i]
Hr2[i,j,a] += -imds.Foo[j,j]
Hr2[i,j,a] += 0.5*(imds.Woooo[i,j,i,j]-imds.Woooo[j,i,i,j])
Hr2[i,j,a] += imds.Wovvo[i,a,a,i]
Hr2[i,j,a] += imds.Wovvo[j,a,a,j]
Hr2[i,j,a] += 0.5*(np.dot(imds.Woovv[i,j,:,a], t2[i,j,a,:])
-np.dot(imds.Woovv[j,i,:,a], t2[i,j,a,:]))
vector = amplitudes_to_vector_ip(Hr1, Hr2)
return vector
class EOMIP(eom_rccsd.EOMIP):
matvec = ipccsd_matvec
l_matvec = None
get_diag = ipccsd_diag
ipccsd_star = None
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ip(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ip(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
return nocc + nocc*(nocc-1)/2*nvir
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ip()
return imds
########################################
# EOM-EA-CCSD
########################################
def vector_to_amplitudes_ea(vector, nmo, nocc):
nvir = nmo - nocc
r1 = vector[:nvir].copy()
r2 = np.zeros((nocc,nvir,nvir), vector.dtype)
idx, idy = np.tril_indices(nvir, -1)
r2[:,idx,idy] = vector[nvir:].reshape(nocc,-1)
r2[:,idy,idx] =-vector[nvir:].reshape(nocc,-1)
return r1, r2
def amplitudes_to_vector_ea(r1, r2):
nvir = r1.size
idx, idy = np.tril_indices(nvir, -1)
return np.hstack((r1, r2[:,idx,idy].ravel()))
def eaccsd_matvec(eom, vector, imds=None, diag=None):
# Ref: Nooijen and Bartlett, <NAME>. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
r1, r2 = vector_to_amplitudes_ea(vector, nmo, nocc)
# Eq. (30)
Hr1 = np.einsum('ac,c->a', imds.Fvv, r1)
Hr1 += np.einsum('ld,lad->a', imds.Fov, r2)
Hr1 += 0.5*np.einsum('alcd,lcd->a', imds.Wvovv, r2)
# Eq. (31)
Hr2 = np.einsum('abcj,c->jab', imds.Wvvvo, r1)
tmp1 = lib.einsum('ac,jcb->jab', imds.Fvv, r2)
Hr2 += tmp1 - tmp1.transpose(0,2,1)
Hr2 -= lib.einsum('lj,lab->jab', imds.Foo, r2)
tmp2 = lib.einsum('lbdj,lad->jab', imds.Wovvo, r2)
Hr2 += tmp2 - tmp2.transpose(0,2,1)
Hr2 += 0.5*lib.einsum('abcd,jcd->jab', imds.Wvvvv, r2)
Hr2 -= 0.5*lib.einsum('klcd,lcd,kjab->jab', imds.Woovv, r2, imds.t2)
vector = amplitudes_to_vector_ea(Hr1, Hr2)
return vector
def eaccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nocc, nvir = t1.shape
Hr1 = np.diag(imds.Fvv)
Hr2 = | np.zeros((nocc,nvir,nvir),dtype=t1.dtype) | numpy.zeros |
"""
Methods for interpolating data from structured data sets on Thetis fields.
Simple example of an atmospheric pressure interpolator:
.. code-block:: python
def to_latlon(x, y, positive_lon=False):
# Converts mesh (x,y) points to coordinates used in the atm data
lon, lat = coordsys_spcs.spcs2lonlat(x, y)
if positive_lon and lon < 0.0:
lon += 360.
return lat, lon
class WRFInterpolator(object):
# Interpolates WRF atmospheric model data on 2D fields
def __init__(self, function_space, atm_pressure_field, ncfile_pattern, init_date):
self.atm_pressure_field = atm_pressure_field
# object that interpolates forcing data from structured grid on the local mesh
self.grid_interpolator = NetCDFLatLonInterpolator2d(function_space, to_latlon)
# reader object that can read fields from netCDF files, applies spatial interpolation
self.reader = NetCDFSpatialInterpolator(self.grid_interpolator, ['prmsl'])
# object that can find previous/next time stamps in a collection of netCDF files
self.timesearch_obj = NetCDFTimeSearch(ncfile_pattern, init_date, NetCDFTimeParser)
# finally a linear intepolator class that performs linar interpolation in time
self.interpolator = LinearTimeInterpolator(self.timesearch_obj, self.reader)
def set_fields(self, time):
# Evaluates forcing fields at the given time
pressure = self.interpolator(time)
self.atm_pressure_field.dat.data_with_halos[:] = pressure
Usage:
.. code-block:: python
atm_pressure_2d = Function(solver_obj.function_spaces.P1_2d, name='atm pressure')
wrf_pattern = 'forcings/atm/wrf/wrf_air.2016_*_*.nc'
wrf_atm = WRFInterpolator(
solver_obj.function_spaces.P1_2d,
wind_stress_2d, atm_pressure_2d, wrf_pattern, init_date)
simulation_time = 3600.
wrf_atm.set_fields(simulation_time)
"""
import glob
import os
from .timezone import *
from .log import *
import numpy as np
import scipy.spatial.qhull as qhull
import netCDF4
from abc import ABCMeta, abstractmethod
from firedrake import *
import re
import string
TIMESEARCH_TOL = 1e-6
class GridInterpolator(object):
"""
A reuseable griddata interpolator object.
Usage:
.. code-block:: python
interpolator = GridInterpolator(source_xyz, target_xyz)
vals = interpolator(source_data)
Example:
.. code-block:: python
x0 = np.linspace(0, 10, 10)
y0 = np.linspace(5, 10, 10)
X, Y = np.meshgrid(x, y)
x = X.ravel(); y = Y.ravel()
data = x + 25.*y
x_target = np.linspace(1, 10, 20)
y_target = np.linspace(5, 10, 20)
interpolator = GridInterpolator(np.vstack((x, y)).T, np.vstack((target_x, target_y)).T)
vals = interpolator(data)
Based on
http://stackoverflow.com/questions/20915502/speedup-scipy-griddata-for-multiple-interpolations-between-two-irregular-grids
"""
def __init__(self, grid_xyz, target_xyz, fill_mode=None, fill_value=np.nan,
normalize=False, dont_raise=False):
"""
:arg grid_xyz: Array of source grid coordinates, shape (npoints, 2) or
(npoints, 3)
:arg target_xyz: Array of target grid coordinates, shape (n, 2) or
(n, 3)
:kwarg fill_mode: Determines how points outside the source grid will be
treated. If 'nearest', value of the nearest source point will be
used. Otherwise a constant fill value will be used (default).
:kwarg float fill_value: Set the fill value (default: NaN)
:kwarg bool normalize: If true the data is scaled to unit cube before
interpolation. Default: False.
:kwarg bool dont_raise: Do not raise a Qhull error if triangulation
fails. In this case the data will be set to fill value or nearest
neighbor value.
"""
self.fill_value = fill_value
self.fill_mode = fill_mode
self.normalize = normalize
self.fill_nearest = self.fill_mode == 'nearest'
self.shape = (target_xyz.shape[0], )
ngrid_points = grid_xyz.shape[0]
if self.fill_nearest:
assert ngrid_points > 0, 'at least one source point is needed'
if self.normalize:
def get_norm_params(x, scale=None):
min = x.min()
max = x.max()
if scale is None:
scale = max - min
a = 1./scale
b = -min*a
return a, b
ax, bx = get_norm_params(target_xyz[:, 0])
ay, by = get_norm_params(target_xyz[:, 1])
az, bz = get_norm_params(target_xyz[:, 2])
self.norm_a = np.array([ax, ay, az])
self.norm_b = np.array([bx, by, bz])
ngrid_xyz = self.norm_a*grid_xyz + self.norm_b
ntarget_xyz = self.norm_a*target_xyz + self.norm_b
else:
ngrid_xyz = grid_xyz
ntarget_xyz = target_xyz
self.cannot_interpolate = False
try:
d = ngrid_xyz.shape[1]
tri = qhull.Delaunay(ngrid_xyz)
# NOTE this becomes expensive in 3D for npoints > 10k
simplex = tri.find_simplex(ntarget_xyz)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = | np.take(tri.transform, simplex, axis=0) | numpy.take |
import math
import cv2
import numpy as np
import pandas as pd
##################################################################
## ##
## create a 3d skeleton video in a blank background ##
## ##
##################################################################
# {0, "Nose"}
# {1, "Neck"},
# {2, "RShoulder"},
# {3, "RElbow"},
# {4, "RWrist"},
# {5, "LShoulder"},
# {6, "LElbow"},
# {7, "LWrist"},
# {8, "REye"},
# {9, "LEye"},
# {10, "MidHip"},
PARTS = [
"Nose",
"Neck",
"RShoulder",
"RElbow",
"RWrist",
"LShoulder",
"LElbow",
"LWrist",
"REye",
"LEye",
"MidHip",
]
SKELETON_EDGES = np.array(
[
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[1, 10],
[8, 0],
[9, 0],
]
)
# theta, phi = 3.1415/4, -3.1415/6
theta, phi = -0.3, 0.24
should_rotate = False
scale_dx = 800
scale_dy = 800
# plot 3d skeleton
class Plotter3d:
def __init__(
self,
canvas_size,
origin=(0.5, 0.5),
scale=1,
parts=PARTS,
skeleton_edges=SKELETON_EDGES,
):
self.origin = np.array(
[origin[1] * canvas_size[1], origin[0] * canvas_size[0]],
dtype=np.float32,
) # x, y
self.scale = np.float32(scale)
self.theta = 0
self.phi = 0
self.parts = parts
self.skeleton_edges = skeleton_edges
axis_length = 200
axes = [
np.array(
[
[-axis_length / 2, -axis_length / 2, 0],
[axis_length / 2, -axis_length / 2, 0],
],
dtype=np.float32,
),
np.array(
[
[-axis_length / 2, -axis_length / 2, 0],
[-axis_length / 2, axis_length / 2, 0],
],
dtype=np.float32,
),
np.array(
[
[-axis_length / 2, -axis_length / 2, 0],
[-axis_length / 2, -axis_length / 2, axis_length],
],
dtype=np.float32,
),
]
step = 20
for step_id in range(axis_length // step + 1): # add grid
axes.append(
np.array(
[
[
-axis_length / 2,
-axis_length / 2 + step_id * step,
0,
],
[
axis_length / 2,
-axis_length / 2 + step_id * step,
0,
],
],
dtype=np.float32,
)
)
axes.append(
np.array(
[
[
-axis_length / 2 + step_id * step,
-axis_length / 2,
0,
],
[
-axis_length / 2 + step_id * step,
axis_length / 2,
0,
],
],
dtype=np.float32,
)
)
self.axes = np.array(axes)
def plot(self, img, vertices, edges):
global theta, phi
img.fill(0)
R = self._get_rotation(theta, phi)
self._draw_axes(img, R)
if len(edges) != 0:
self._plot_edges(img, vertices, edges, R)
def _draw_axes(self, img, R):
axes_2d = np.dot(self.axes, R)
axes_2d = axes_2d * self.scale + self.origin
for axe in axes_2d:
axe = axe.astype(int)
cv2.line(
img,
tuple(axe[0]),
tuple(axe[1]),
(128, 128, 128),
1,
cv2.LINE_AA,
)
def _plot_edges(self, img, vertices, edges, R):
vertices_2d = np.dot(vertices, R)
edges_vertices = (
vertices_2d.reshape((-1, 2))[edges] * self.scale + self.origin
)
for edge_vertices in edges_vertices:
edge_vertices = edge_vertices.astype(int)
cv2.line(
img,
tuple(edge_vertices[0]),
tuple(edge_vertices[1]),
(255, 255, 255),
1,
cv2.LINE_AA,
)
def _get_rotation(self, theta, phi):
sin, cos = math.sin, math.cos
return np.array(
[
[cos(theta), sin(theta) * sin(phi)],
[-sin(theta), cos(theta) * sin(phi)],
[0, -cos(phi)],
],
dtype=np.float32,
) # transposed
@staticmethod
def mouse_callback(event, x, y, flags, params):
global previous_position, theta, phi, should_rotate, scale_dx, scale_dy
if event == cv2.EVENT_LBUTTONDOWN:
previous_position = [x, y]
should_rotate = True
if event == cv2.EVENT_MOUSEMOVE and should_rotate:
theta += (x - previous_position[0]) / scale_dx * 6.2831 # 360 deg
phi -= (
(y - previous_position[1]) / scale_dy * 6.2831 * 2
) # 360 deg
phi = max(min(3.1415 / 2, phi), -3.1415 / 2)
previous_position = [x, y]
if event == cv2.EVENT_LBUTTONUP:
should_rotate = False
# read skeleton data from the csv
def read_csv(filename):
"""
Parameters
----------
filename : str
Path to the CSV file.
Returns
-------
df_new : dataframe
Normalised coordinates of 3D pose.
"""
dataframe = pd.read_csv(filename, index_col="Body Part")
# find the bbox of the player for crop
xmax = -10000
ymax = -10000
zmax = -10000
xmin = 10000
ymin = 10000
zmin = 10000
# find the max/min value for each axis
for key in dataframe.keys():
data = list(dataframe[key][1:])
data = list(map(float, data))
data_num = np.array(data)
data_num = data_num[np.where(~np.isnan(data_num))]
keys = key.split(".")
if len(keys) == 1:
key_new = (keys[0], "x")
xmax = max(xmax, np.max(data_num))
xmin = min(xmin, np.min(data_num))
elif len(keys) == 2 and keys[1] == "1":
key_new = (keys[0], "y")
ymax = max(ymax, np.max(data_num))
ymin = min(ymin, np.min(data_num))
elif len(keys) == 2 and keys[1] == "2":
key_new = (keys[0], "y")
zmax = max(zmax, np.max(data_num))
zmin = min(zmin, np.min(data_num))
if key == "MidHip":
data_midhip = data_num
if key == "Neck":
data_neck = data_num
# determine the center of x, y, z
xc = (np.mean(data_neck) + np.mean(data_midhip)) / 2
yc = (ymax + ymin) / 2
zc = (zmax + zmin) / 2
# determine the width, height and depth
width = 2 * max(xc - xmin, xmax - xc)
height = ymax - ymin
depth = zmax - zmin
# select a cubic
sq = max(width, height)
sq = max(sq, depth)
sq = np.ceil(sq / 100) * 100
depth = width = height = sq
xmin = xc - sq / 2
ymin = yc - sq / 2
zmin = zc - sq / 2
# normalise the absolute coordinates with length of the cubic
df = dict()
for key in dataframe.keys():
data = list(dataframe[key][1:])
data = list(map(float, data))
nan_idx = np.where(np.isnan(data))[0]
if len(nan_idx) == len(data):
data[:] = 0
elif len(nan_idx) > 0:
for jj in nan_idx:
if jj == 0:
data[jj] = np.where(~np.isnan(data))[0][0]
else:
data[jj] = data[jj - 1]
keys = key.split(".")
if len(keys) == 1:
key_new = (keys[0], "x")
data = np.round(list((np.array(data) - xmin) / (width)), 5)
elif len(keys) == 2 and keys[1] == "1":
key_new = (keys[0], "y")
data = np.round(list(( | np.array(data) | numpy.array |
'''
@Authors: <NAME>, <NAME>, <NAME>, <NAME>
@Purpose: Explore 6DOF rocket trajectory, esspecially quaternion rotation
Learning resources: https://eater.net/quaternions
'''
import numpy as np
import oyaml as yaml
import math
class Rotator:
def __init__(self):
self.re = 0; self.i = 0; self.j = 0; self.k = 1
self.body_vector = np.array([[0],[1],[0],[0]])
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
This function should take inputs: 'Cartesian, unit rotation-axis (Vector),
Rotation Angle in radians (Theta) and form a quaternion vector
'''
def form_quaternion(self, vector, theta):
assert self.vector_is_unit(vector), 'Class: Rotator, Fxn: form_quaternion, vector is not a unit quaternion'
r = np.cos(theta/2)
i = -1*np.sin(theta/2)*vector[0]
j = -1* | np.sin(theta/2) | numpy.sin |
import copy
import argparse
import os
import datetime
from collections import deque
import random
import numpy as np
import matplotlib.pyplot as plt
import time
from multiprocessing import Pool
from env import Env
class QLearning:
def __init__(self,
action_space,
eps,
decay,
alpha,
min_eps=0,
qtables=None,
verbose=1,):
self.i = 0
self.verbose = verbose
self.n_agents = action_space.shape[0]
self.high = action_space.high
self.low = action_space.low
self.max = self.high.max()
if qtables is None:
self.qtables = []
for a in range(len(self.high)):
n = self.high[a] - self.low[a] + 1
qtable = np.zeros(n)
self.qtables.append(qtable)
else:
self.qtables = copy.deepcopy(qtables)
self.eps = eps
self.min_eps = min_eps
self.decay = decay
self.alpha = alpha
def act(self):
actions = np.zeros(self.n_agents, dtype=int)
random_actions = np.random.rand(self.n_agents) * (self.high + 1)
random_actions = np.floor(random_actions)
probs = np.random.rand(self.n_agents)
rand_mask = probs < self.eps
actions[rand_mask] = random_actions[rand_mask]
play_idx = np.where(~rand_mask)[0]
actions[play_idx] = [self.qtables[i].argmax() for i in play_idx]
return list(actions)
def update(self, actions, rewards):
self.eps = max(self.eps * self.decay, self.min_eps)
for a, qtable in enumerate(self.qtables):
action = actions[a]
reward = rewards[a]
qtable[action] = qtable[action] + self.alpha * (reward - qtable[action])
if self.verbose:
self.i += 1
if self.i == 500:
self.i = 0
print('\nEps:', self.eps)
def init_qtables(env):
qtables = []
for agent in env.all_agents:
qtable = np.zeros(len(env.choices[agent]))
for i, route in enumerate(env.choices[agent]):
qtable[i] = 1 / len(route)
qtables.append(qtable)
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Environment Params
parser.add_argument('netfile', help='File containing the network')
parser.add_argument('--s', help='Max number of steps allowed from an origin to a destination node.', default=2, type=int)
parser.add_argument('--k', help='Max number of shortest paths from an origin to a destination node.', default=2, type=int)
# Agent Params
parser.add_argument('--eps', help='Starting epsilon for QLearning.', default=.05, type=float)
parser.add_argument('--decay', help='Epsilon decay rate.', default=1, type=float)
parser.add_argument('--alpha', help='Learning rate alpha value of QLearning.', default=0.5, type=float)
# Simulation Params
parser.add_argument('--episodes', help='Number of episodes for a run of QLearning.', default=1000,type=int)
parser.add_argument('--runs', help='Number of runs for QLearning.', default=1, type=int)
parser.add_argument('--outdir', help='Output dir for the plot.', default='./figs/')
return parser.parse_args()
def run_ql(env, n_runs, episodes, ql_params):
all_avg_rewards = | np.zeros((n_runs, episodes)) | numpy.zeros |
# Version 3.1; <NAME>; Polar Geospatial Center, University of Minnesota; 2019
# Translated from MATLAB code written by <NAME>, Ohio State University, 2018
from __future__ import division
import os
import sys
import traceback
import warnings
import ogr
import numpy as np
import scipy.stats
from scipy import interpolate
if sys.version_info[0] < 3:
import raster_array_tools as rat
from filter_scene import getDataDensityMap, readSceneMeta, rescaleDN
else:
from lib import raster_array_tools as rat
from lib.filter_scene import getDataDensityMap, readSceneMeta, rescaleDN
# The spatial reference of the strip, set at the beginning of scenes2strips()
# to the spatial reference of the first scene DEM in order and used for
# comparison to the spatial references of all other source raster files.
__STRIP_SPAT_REF__ = None
# The Catalog ID of "Image 1" as parsed from the output scene metadata files for
# an intrack stereo SETSM DEM strip. It is expected that all ortho scenes in the
# intrack strip correspond to the same Catalog ID.
__INTRACK_ORTHO_CATID__ = None
HOLD_GUESS_OFF = 0
HOLD_GUESS_ALL = 1
HOLD_GUESS_UPDATE_RMSE = 2
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class SpatialRefError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class RasterDimensionError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class MetadataError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
def scenes2strips(demFiles,
maskSuffix=None, filter_options=(), max_coreg_rmse=1,
trans_guess=None, trans_err_guess=None, rmse_guess=None,
hold_guess=HOLD_GUESS_OFF, check_guess=True,
use_second_ortho=False):
"""
From MATLAB version in Github repo 'setsm_postprocessing', 3.0 branch:
function [X,Y,Z,M,O,trans,rmse,f]=scenes2strips(varargin)
%SCENES2STRIPS merge scenes into strips
%
% [x,y,z,m,o,trans,rmse,f]=scenes2strips(demdir,f) merges the
% scene geotiffs listed in cellstr f within directory demdir after
% ordering them by position. If a break in coverage is detected between
% scene n and n+1 only the first 1:n scenes will be merged. The data are
% coregistered at overlaps using iterative least squares, starting with
% scene n=1.
% Outputs are the strip grid coorinates x,y and strip elevation, z,
% matchtag, m and orthoimage, o. The 3D translations are given in 3xn
% vector trans, along with root-mean-squared of residuals, rmse. The
% output f gives the list of filenames in the mosaic. If a break is
% detected, the list of output files will be less than the input.
%
% [...]=scenes2strips(...,'maskFileSuffix',value) will apply the mask
% identified as the dem filename with the _dem.tif replaced by
% _maskSuffix
% [...]=scenes2strips(...,'max_coreg_rmse',value) will set a new maximum
% coregistration error limit in meters (default=1). Errors above this
% limit will result in a segment break.
%
% Version 3.1, <NAME>, Ohio State University, 2015.
If maskFileSuffix='edgemask', edge and data masks identified as the DEM
filename with the _dem.tif replaced by _edgemask.tif and _datamask.tif,
respectively, will be applied.
"""
from batch_scenes2strips import getDemSuffix, selectBestMatchtag, selectBestOrtho, selectBestOrtho2
demSuffix = getDemSuffix(demFiles[0])
cluster_min_px = 1000 # Minimum data cluster area for 2m.
add_min_px = 50000 # Minimum number of unmasked pixels scene must add to existing segment to not be skipped.
# Order scenes in north-south or east-west direction by aspect ratio.
num_scenes = len(demFiles)
if trans_guess is None and trans_err_guess is None and rmse_guess is None:
print("Ordering {} scenes".format(num_scenes))
demFiles_ordered = orderPairs(demFiles)
elif trans_err_guess is not None and trans_guess is None:
raise InvalidArgumentError("`trans_guess_err` argument can only be used in conjunction "
"with `trans_guess` argument")
elif trans_guess is not None and trans_guess.shape[1] != num_scenes:
raise InvalidArgumentError("`trans_guess` array must be of shape (3, N) where N is the number "
"of scenes in `demFiles`, but was {}".format(trans_guess.shape))
elif rmse_guess is not None and rmse_guess.shape[1] != num_scenes:
raise InvalidArgumentError("`rmse_guess` array must be of shape (1, N) where N is the number "
"of scenes in `demFiles`, but was {}".format(rmse_guess.shape))
else:
# Files should already be properly ordered if a guess is provided.
# Running `orderPairs` on them could detrimentally change their order.
demFiles_ordered = list(demFiles)
num_scenes = len(demFiles_ordered)
# Initialize output stats.
trans = np.zeros((3, num_scenes))
trans_err = trans.copy()
rmse = np.zeros((1, num_scenes))
if check_guess:
trans_check = np.copy(trans)
trans_err_check = np.copy(trans_err)
rmse_check = np.copy(rmse)
# Get projection reference of the first scene to be used in equality checks
# with the projection reference of all scenes that follow.
global __STRIP_SPAT_REF__
__STRIP_SPAT_REF__ = rat.extractRasterData(demFiles_ordered[0], 'spat_ref')
if __STRIP_SPAT_REF__.ExportToProj4() == '':
raise SpatialRefError("DEM '{}' spatial reference ({}) has no PROJ4 representation "
"and is likely erroneous".format(demFiles_ordered[0], __STRIP_SPAT_REF__.ExportToWkt()))
# File loop.
skipped_scene = False
segment_break = False
for i in range(num_scenes+1):
if skipped_scene:
skipped_scene = False
trans[:, i-1] = np.nan
trans_err[:, i-1] = np.nan
rmse[0, i-1] = np.nan
if i >= num_scenes:
break
if ( (trans_guess is not None and np.any(np.isnan(trans_guess[:, i])))
or (trans_err_guess is not None and np.any(np.isnan(trans_err_guess[:, i])))
or (rmse_guess is not None and np.isnan(rmse_guess[0, i]))):
# State of scene is somewhere between naturally redundant
# or redundant by masking, as classified by prior s2s run.
skipped_scene = True
continue
# Construct filenames.
demFile = demFiles_ordered[i]
matchFile = selectBestMatchtag(demFile)
orthoFile = selectBestOrtho(demFile)
ortho2File = selectBestOrtho2(demFile) if use_second_ortho else None
metaFile = demFile.replace(demSuffix, 'meta.txt')
if maskSuffix is None:
print("No mask applied")
maskFile = None
else:
maskFile = demFile.replace(demSuffix, maskSuffix)
if use_second_ortho and ortho2File is None:
raise InvalidArgumentError("`use_second_ortho=True`, but second ortho could not be found")
print("Scene {} of {}: {}".format(i+1, len(demFiles_ordered), demFile))
# try:
x, y, z, m, o, o2, md = loadData(demFile, matchFile, orthoFile, ortho2File, maskFile, metaFile)
# except:
# print("Data read error:")
# traceback.print_exc()
# print("...skipping")
# continue
# Apply masks.
x, y, z, m, o, o2, md = applyMasks(x, y, z, m, o, o2, md, filter_options, maskSuffix)
# Check for redundant scene.
if np.count_nonzero(~np.isnan(z)) <= add_min_px:
print("Not enough (unmasked) data, skipping")
skipped_scene = True
continue
dx = x[1] - x[0]
dy = y[1] - y[0]
# Fix grid so that x, y coordinates of
# pixels in overlapping scenes will match up.
if ((x[1] / dx) % 1 != 0) or ((y[1] / dy) % 1 != 0):
x, y, z, m, o, o2, md = regrid(x, y, z, m, o, o2, md)
# If this is the first scene in strip,
# set as strip and continue to next scene.
if 'X' not in vars():
X, Y, Z, M, O, O2, MD = x, y, z, m, o, o2, md
del x, y, z, m, o, o2, md
continue
# Pad new arrays to stabilize interpolation.
buff = int(10*dx + 1)
z = np.pad(z, buff, 'constant', constant_values=np.nan)
m = np.pad(m, buff, 'constant', constant_values=0)
o = np.pad(o, buff, 'constant', constant_values=0)
o2 = np.pad(o2, buff, 'constant', constant_values=0) if o2 is not None else None
md = np.pad(md, buff, 'constant', constant_values=1)
x = np.concatenate((x[0] - dx*np.arange(buff, 0, -1), x,
x[-1] + dx*np.arange(1, buff+1)))
y = np.concatenate((y[0] + dx*np.arange(buff, 0, -1), y,
y[-1] - dx*np.arange(1, buff+1)))
# Expand strip coverage to encompass new scene.
if x[0] < X[0]:
X1 = np.arange(x[0], X[0], dx)
X = np.concatenate((X1, X))
Z, M, O, O2, MD = expandCoverage(Z, M, O, O2, MD, X1, direction='left')
del X1
if x[-1] > X[-1]:
X1 = np.arange(X[-1]+dx, x[-1]+dx, dx)
X = | np.concatenate((X, X1)) | numpy.concatenate |
"""
This module contains routines for modeling cluster and source signals.
"""
import os
import sys
from pixell import enmap
import astropy
import astropy.wcs as enwcs
import astropy.io.fits as pyfits
import astropy.constants as constants
#from astropy.cosmology import FlatLambdaCDM
from astLib import *
from scipy import ndimage
from scipy import interpolate
from scipy import stats
import time
import astropy.table as atpy
import nemo
from . import maps
from . import catalogs
from . import photometry
from . import filters
from . import gnfw
from . import plotSettings
import numpy as np
import numpy.fft as fft
import math
import pylab as plt
import pickle
import operator
import pyximport; pyximport.install()
import nemoCython
import nemo
import glob
import shutil
import yaml
import warnings
#import IPython
np.random.seed()
#------------------------------------------------------------------------------------------------------------
# Global constants (we could move others here but then need to give chunky obvious names, not just e.g. h)
TCMB=2.72548
Mpc_in_cm=constants.pc.value*100*1e6
MSun_in_g=constants.M_sun.value*1000
# Default cosmology (e.g., for fitQ)
#fiducialCosmoModel=FlatLambdaCDM(H0 = 70.0, Om0 = 0.3, Ob0 = 0.05, Tcmb0 = TCMB)
# Default cosmology (e.g., for fitQ) - now based on CCL rather than astropy
Om0=0.3
Ob0=0.05
H0=70
sigma8=0.8
ns=0.95
transferFunction="boltzmann_camb"
on_rtd=os.environ.get('READTHEDOCS', None)
if on_rtd is None:
import pyccl as ccl
fiducialCosmoModel=ccl.Cosmology(Omega_c=Om0-Ob0, Omega_b=Ob0, h=0.01*H0, sigma8=sigma8, n_s=ns,
transfer_function=transferFunction)
# For CCL-based mass conversions
M200mDef=ccl.halos.MassDef(200, "matter", c_m_relation = 'Bhattacharya13')
M200cDef=ccl.halos.MassDef(200, "critical", c_m_relation = 'Bhattacharya13')
M500cDef=ccl.halos.MassDef(500, "critical")
else:
fiducialCosmoModel=None
M200mDef=None
M200cDef=None
M500cDef=None
#------------------------------------------------------------------------------------------------------------
class BeamProfile(object):
"""Describes the beam profile (i.e., the point spread function for some instrument in real space). This
can be either read from a white-space delimited text file (with the angle in degrees in the first column
and the response in the second column), or can be set directly using arrays.
Args:
beamFileName(:obj:`str`, optional): Path to text file containing a beam profile in the ACT format.
profile1d (:obj:`np.ndarray`, optional): One dimensional beam profile, with index 0 at the centre.
rDeg (:obj:`np.ndarray`, optional): Corresponding angular distance in degrees from the centre for
the beam profile.
Attributes:
profile1d (:obj:`np.ndarray`): One dimensional beam profile, with index 0 at the centre.
rDeg (:obj:`np.ndarray`): Corresponding angular distance in degrees from the centre for the
beam profile.
tck (:obj:`tuple`): Spline knots for interpolating the beam onto different angular binning
(in degrees), for use with :meth:`scipy.interpolate.splev`.
FWHMArcmin (float): Estimate of the beam FWHM in arcmin.
"""
def __init__(self, beamFileName = None, profile1d = None, rDeg = None):
if beamFileName is not None:
beamData=np.loadtxt(beamFileName).transpose()
self.profile1d=beamData[1]
self.rDeg=beamData[0]
else:
self.profile1d=profile1d
self.rDeg=rDeg
if self.profile1d is not None and self.rDeg is not None:
self.tck=interpolate.splrep(self.rDeg, self.profile1d)
# This is really just for sorting a list of beams by resolution
self.FWHMArcmin=self.rDeg[np.argmin(abs(self.profile1d-0.5))]*60*2
#------------------------------------------------------------------------------------------------------------
class QFit(object):
"""A class for managing the filter mismatch function, referred to as `Q` in the ACT papers from
`Hasselfield et al. (2013) <http://adsabs.harvard.edu/abs/2013JCAP...07..008H>`_ onwards.
Args:
QFitFileName (:obj:`str`): Path to a FITS-table format file as made by :meth:`fitQ`.
tileNames (:obj:`list`): If given, the Q-function will be defined only for these tiles (their names
must appear in the file specified by `QFitFileName`).
Attributes:
fitDict (:obj:`dict`): Dictionary of interpolation objects, indexed by `tileName`. You should not
need to access this directly - use :meth:`getQ` instead.
"""
def __init__(self, QFitFileName = None, tileNames = None):
self._zGrid=np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1.0, 1.2, 1.6, 2.0])
self._theta500ArcminGrid=np.logspace(np.log10(0.1), np.log10(55), 10)
self.zMin=(self._zGrid).min()
self.zMax=(self._zGrid).max()
self.zDependent=None
self.zDepThetaMax=None
self.fitDict={}
if QFitFileName is not None:
self.loadQ(QFitFileName, tileNames = tileNames)
def loadQ(self, source, tileNames = None):
"""Load the filter mismatch function Q (see `Hasselfield et al. 2013
<https://ui.adsabs.harvard.edu/abs/2013JCAP...07..008H/abstract>`_) as a dictionary of spline fits.
Args:
source (:obj:`nemo.startUp.NemoConfig` or str): Either the path to a .fits table (containing Q fits
for all tiles - this is normally ``selFn/QFit.fits``), or a :obj:`nemo.startUp.NemoConfig` object
(from which the path and tiles to use will be inferred).
tileNames (optional, list): A list of tiles for which the Q function spline fit coefficients
will be extracted. If source is a :obj:`nemo.startUp.NemoConfig` object, this should be set to
``None``.
Returns:
A dictionary (with tilNames as keys), containing spline knots for the Q function for each tile.
Q values can then be obtained by using these with :func:`scipy.interpolate.splev`.
"""
# Bit messy, but two modes here:
# - combined Q fit file for all tiles
# - single Q fit for a single tile (interim stage, when under nemo MPI run)
if type(source) == nemo.startUp.NemoConfig:
tileNames=source.tileNames
combinedQTabFileName=source.selFnDir+os.path.sep+"QFit.fits"
loadMode=None
if os.path.exists(combinedQTabFileName) == True:
tileNamesInFile=[]
with pyfits.open(combinedQTabFileName) as QTabFile:
for ext in QTabFile:
if type(ext) == astropy.io.fits.hdu.table.BinTableHDU:
tileNamesInFile.append(ext.name)
tileNamesInFile.sort()
if tileNames is None:
tileNames=tileNamesInFile
loadMode="combined"
else:
globStr=source.selFnDir+os.path.sep+"QFit#*.fits"
QTabFileNames=glob.glob(globStr)
loadMode="single"
if len(QTabFileNames) == 0:
raise Exception("could not find either '%s' or '%s' - needed to make QFit object" % (combinedQTabFileName, globStr))
zMin=self._zGrid.max()
zMax=self._zGrid.min()
for tileName in tileNames:
if loadMode == "combined":
QTab=atpy.Table().read(combinedQTabFileName, hdu = tileName)
elif loadMode == "single":
QTab=atpy.Table().read(source.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName))
else:
raise Exception("loadMode is not defined")
if QTab['z'].min() < zMin:
self.zMin=QTab['z'].min()
if QTab['z'].max() > zMax:
self.zMax=QTab['z'].max()
self.fitDict[tileName]=self._makeInterpolator(QTab)
elif os.path.exists(source) == True:
# Inspect file and get tile names if MEF
if tileNames is None:
tileNames=[]
with pyfits.open(source) as QTab:
for ext in QTab:
if type(ext) == astropy.io.fits.hdu.table.BinTableHDU:
tileNames.append(ext.name)
zMin=self._zGrid.max()
zMax=self._zGrid.min()
for tileName in tileNames:
if tileName == '': # Individual, interim file name
assert(source.find("QFit#") > 0)
tileName=os.path.split(source)[-1].split("QFit#")[-1].split(".fits")[0]
QTab=atpy.Table().read(source)
else:
QTab=atpy.Table().read(source, hdu = tileName)
if QTab['z'].min() < zMin:
self.zMin=QTab['z'].min()
if QTab['z'].max() > zMax:
self.zMax=QTab['z'].max()
self.fitDict[tileName]=self._makeInterpolator(QTab)
def _makeInterpolator(self, QTab):
"""Inspects QTab, and makes an interpolator object - 2d if there is z-dependence, 1d if not.
"""
if QTab.meta['ZDEPQ'] == 0:
QTab.sort('theta500Arcmin')
spline=interpolate.InterpolatedUnivariateSpline(QTab['theta500Arcmin'], QTab['Q'], ext = 1)
if self.zDependent == True:
raise Exception("QFit contains a mixture of z-dependent and z-independent tables")
self.zDepThetaMax=None
self.zDependent=False
elif QTab.meta['ZDEPQ'] == 1:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
spline=interpolate.LSQBivariateSpline(QTab['z'], QTab['theta500Arcmin'], QTab['Q'],
self._zGrid, self._theta500ArcminGrid)
zs=np.unique(QTab['z'])
thetaMaxs=[]
for z in zs:
thetaMaxs.append(QTab['theta500Arcmin'][QTab['z'] == z].max())
self.zDepThetaMax=interpolate.InterpolatedUnivariateSpline(zs, thetaMaxs)
if self.zDependent == False:
raise Exception("QFit contains a mixture of z-dependent and z-independent tables")
self.zDependent=True
else:
raise Exception("Valid ZDEPQ values are 0 or 1 only")
return spline
def getQ(self, theta500Arcmin, z = None, tileName = None):
"""Return the value of Q (the filter mismatch function) using interpolation.
Args:
theta500Arcmin (:obj:`float` or :obj:`np.ndarray`): The angular scale at which *Q* will
be calculated. This can be an array or a single value.
z (:obj:`float`, optional): Redshift, only used if *Q* is a function of
redshift, otherwise it is ignored. This must be a single value only,
i.e., not an array.
tileName (:obj:`str`, optional): The name of the tile to use for the *Q* function.
Returns:
The value of *Q* (an array or a single float, depending on the input).
Note:
In the case where *Q* is a function of redshift, values outside of the range for which
*Q* has been calculated will be filled with zeros (i.e., there is no extrapolation in
redshift).
"""
if z is not None:
if type(z) == np.ndarray and z.shape == (1,):
z=float(z)
if type(z) is not float and type(z) is not np.float64:
raise Exception("z must be a float, and not, e.g., an array")
if self.zDependent == True:
Qs=self.fitDict[tileName](z, theta500Arcmin)[0]
thetaMask=theta500Arcmin > self.zDepThetaMax(z)
Qs[thetaMask]=0.0
if z < self.zMin or z > self.zMax:
Qs=0
else:
# Univariate case handles own valid bounds checking
Qs=self.fitDict[tileName](theta500Arcmin)
return Qs
#------------------------------------------------------------------------------------------------------------
def fSZ(obsFrequencyGHz, TCMBAlpha = 0.0, z = None):
"""Returns the frequency dependence of the (non-relativistic) Sunyaev-Zel'dovich effect.
Args:
obsFrequencyGHz (float): Frequency in GHz at which to calculate fSZ.
TCMBAlpha (float, optional): This should always be zero unless you really do want to make a model
where CMB temperature evolves T0*(1+z)^{1-TCMBAlpha}.
z (float, optional): Redshift - needed only if TCMBAlpha is non-zero.
Returns:
Value of SZ spectral shape at given frequency (neglecting relativistic corrections).
"""
h=constants.h.value
kB=constants.k_B.value
sigmaT=constants.sigma_T.value
me=constants.m_e.value
c=constants.c.value
x=(h*obsFrequencyGHz*1e9)/(kB*TCMB)
if TCMBAlpha != 0 and z is not None:
assert(z >= 0)
x=x* | np.power(1+z, TCMBAlpha) | numpy.power |
#! /home/drcl_yang/anaconda3/envs/py36/bin/python
import rospy
import sys
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
import tf.transformations as tft
import numpy as np
import math
import roslib
from std_msgs.msg import Empty as EmptyMsg
from std_msgs.msg import Float64
from nav_msgs.msg import Odometry
import time
roslib.load_manifest('mecanum_robot_gazebo')
g_get_state = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
left_ball_state= []
right_ball_state= []
pre_z = np.nan
pre_gradient = np.nan
t0 = time.time()
t1 = time.time()
dt = t0 - t1
def check_ball_bounce(cnt_z):
global pre_z
global pre_gradient
if np.isnan(pre_z):
pre_z = cnt_z
return False
cnt_gradient = cnt_z - pre_z
if np.isnan(pre_gradient):
pre_gradient = cnt_gradient
return False
if (cnt_gradient > 0 )== True and (pre_gradient < 0) == False:
pre_gradient = cnt_gradient
return True
else:
pre_gradient = cnt_gradient
return False
def gat_ball_stats(ball_name):
ball_state = g_get_state(model_name = ball_name)
"""object_pose.position.x = float(robot_state.pose.position.x)
object_pose.position.y = float(robot_state.pose.position.y)
object_pose.position.z = float(robot_state.pose.position.z)
object_pose.orientation.x = float(robot_state.pose.orientation.x)
object_pose.orientation.y = float(robot_state.pose.orientation.y)
object_pose.orientation.z = float(robot_state.pose.orientation.z)
object_pose.orientation.w = float(robot_state.pose.orientation.w)
angle = qua2eular(object_pose.orientation.x, object_pose.orientation.y,
object_pose.orientation.z, object_pose.orientation.w)"""
return ball_state
def check_ball_exist(ball_name_list):
global t0
t0 = time.time()
if g_get_state(model_name = ball_name_list[0]).success:
return 1
elif g_get_state(model_name = ball_name_list[1]).success:
return 2
else:
return 0
def cal_drag_lift_force(down_motion, drag_force, lift_force, angle_xy, angle_x, cl):
if down_motion == 0 :
if cl < 0:
drag_force_z = drag_force * np.sin(angle_xy)
drag_force_xy = drag_force * np.cos(angle_xy)
drag_force_x = drag_force_xy * np.cos(angle_x)
drag_force_y = drag_force_xy * np.sin(angle_x)
lift_force_z = lift_force * np.sin(angle_xy)
lift_force_xy = lift_force * np.cos(angle_xy)
lift_force_x = -lift_force_xy * np.cos(angle_x)
lift_force_y = lift_force_xy * np.sin(angle_x)
else:
drag_force_z = drag_force * np.sin(angle_xy)
drag_force_xy = drag_force * | np.cos(angle_xy) | numpy.cos |
"""The ``templates`` module allows for fast creation of a few select sample types and diffraction geometries without having to
worry about any of the "under the hood" scripting.
"""
import numpy as np
import pygalmesh
from scipy.spatial.transform import Rotation
from xrd_simulator.detector import Detector
from xrd_simulator.beam import Beam
from xrd_simulator.motion import RigidBodyMotion
from xrd_simulator.polycrystal import Polycrystal
from xrd_simulator.mesh import TetraMesh
from xrd_simulator.phase import Phase
from xrd_simulator import utils
PARAMETER_KEYS = [
"detector_distance",
"number_of_detector_pixels_z",
"number_of_detector_pixels_y",
"detector_center_pixel_z",
"detector_center_pixel_y",
"pixel_side_length_z",
"pixel_side_length_y",
"wavelength",
"beam_side_length_z",
"beam_side_length_y",
"rotation_step",
"rotation_axis"
]
def s3dxrd(parameters):
"""Construct a scaning-three-dimensional-xray diffraction experiment.
This is a helper/utility function for quickly creating an experiment. For full controll
over the diffraction geometry consider custom creation of the primitive quantities:
(:obj:`xrd_simulator.beam.Beam`), and (:obj:`xrd_simulator.detector.Detector`) seperately.
Args:
parameters (:obj:`dict`): Dictionary with fields as \n
``"detector_distance"`` : (:obj:`float`) Distance form sample origin to
detector centre in units of microns. \n
``"number_of_detector_pixels_z"`` : (:obj:`int`) Number of detector pixels
along z-axis. \n
``"number_of_detector_pixels_y"`` : (:obj:`int`) Number of detector pixels
along y-axis. \n
``"detector_center_pixel_z"`` : (:obj:`float`) Intersection pixel coordinate between
beam centroid line and detector along z-axis. \n
``"detector_center_pixel_y"`` : (:obj:`float`) Intersection pixel coordinate between
beam centroid line and detector along y-axis. \n
``"pixel_side_length_z"`` : (:obj:`float`) Detector pixel side length in units
of microns along z-axis. \n
``"pixel_side_length_y"`` : (:obj:`float`) Detector pixel side length in units
of microns along y-axis. \n
``"wavelength"`` : (:obj:`float`) Wavelength in units of Angstrom. \n
``"beam_side_length_z"`` : (:obj:`float`) Beam side length in units
of microns. \n
``"beam_side_length_y"`` : (:obj:`float`) Beam side length in units
of microns. \n
``"rotation_step"`` : (:obj:`float`) Angular frame integration step in
units of radians. \n
``"rotation_axis"`` : (:obj:`numpy array`) Axis around which to
positively rotate the sample by ``rotation_step`` radians. \n
Returns:
(:obj:`xrd_simulator`) objects defining an experiment: (:obj:`xrd_simulator.beam.Beam`),
(:obj:`xrd_simulator.detector.Detector`), (:obj:`xrd_simulator.motion.RigidBodyMotion`).
Examples:
.. literalinclude:: examples/example_s3dxrd.py
"""
for key in PARAMETER_KEYS:
if key not in list(parameters):
raise ValueError(
"No keyword " +
key +
" found in the input parameters dictionary")
detector = _get_detector_from_params(parameters)
beam = _get_beam_from_params(parameters)
motion = _get_motion_from_params(parameters)
return beam, detector, motion
def polycrystal_from_odf(orientation_density_function,
number_of_crystals,
sample_bounding_cylinder_height,
sample_bounding_cylinder_radius,
unit_cell,
sgname,
path_to_cif_file=None,
maximum_sampling_bin_seperation=np.radians(5.0),
strain_tensor=lambda x: np.zeros((3, 3))):
"""Fill a cylinder with crystals from a given orientation density function.
The ``orientation_density_function`` is sampled by discretizing orientation space over the unit
quarternions. Each bin is assigned its apropiate probability, assuming the
``orientation_density_function`` is approximately constant over a single bin. Each sampled
orientation is constructed by first drawing a random bin and next drawing uniformly from
within that bin, again assuming that ``orientation_density_function`` is approximately constant
over a bin.
Args:
orientation_density_function (:obj:`callable`): orientation_density_function(x, q) ->
:obj:`float` where input variable ``x`` is a :obj:`numpy array` of shape ``(3,)``
representing a spatial coordinate in the cylinder (x,y,z) and ``q`` is a
:obj:`numpy array` of shape ``(4,)`` representing a orientation in so3 by a unit
quarternion. The format of the quarternion is "scalar last"
(same as in scipy.spatial.transform.Rotation).
number_of_crystals (:obj:`int`): Approximate number of crystal elements to compose the
cylinder volume.
sample_bounding_cylinder_height (:obj:`float`): Height of sample cylinder in units of
microns.
sample_bounding_cylinder_radius (:obj:`float`): Radius of sample cylinder in units of
microns.
unit_cell (:obj:`list` of :obj:`float`): Crystal unit cell representation of the form
[a,b,c,alpha,beta,gamma], where alpha,beta and gamma are in units of degrees while
a,b and c are in units of anstrom.
sgname (:obj:`string`): Name of space group , e.g 'P3221' for quartz, SiO2, for instance
path_to_cif_file (:obj:`string`): Path to CIF file. Defaults to None, in which case no structure
factors are computed.
maximum_sampling_bin_seperation (:obj:`float`): Discretization steplength of orientation
space using spherical coordinates over the unit quarternions in units of radians.
A smaller steplength gives more accurate sampling of the input
``orientation_density_function`` but is computationally slower.
strain_tensor (:obj:`callable`): Strain tensor field over sample cylinder.
strain_tensor(x) -> :obj:`numpy array` of shape ``(3,3)`` where input variable ``x`` is
a :obj:`numpy array` of shape ``(3,)`` representing a spatial coordinate in the
cylinder (x,y,z).
Returns:
(:obj:`xrd_simulator.polycrystal.Polycrystal`)
Examples:
.. literalinclude:: examples/example_polycrystal_from_odf.py
"""
# Sample topology
volume_per_crystal = np.pi * (sample_bounding_cylinder_radius**2) * \
sample_bounding_cylinder_height / number_of_crystals
max_cell_circumradius = (3 * volume_per_crystal / (np.pi * 4.))**(1 / 3.)
# Fudge factor 2.6 gives approximately number_of_crystals elements in the
# mesh
max_cell_circumradius = 2.65 * max_cell_circumradius
dz = sample_bounding_cylinder_height / 2.
R = float(sample_bounding_cylinder_radius)
cylinder = pygalmesh.generate_mesh(
pygalmesh.Cylinder(-dz, dz, R, max_cell_circumradius),
max_cell_circumradius=max_cell_circumradius,
max_edge_size_at_feature_edges=max_cell_circumradius,
verbose=False)
mesh = TetraMesh._build_tetramesh(cylinder)
# Sample is uniformly single phase
phases = [Phase(unit_cell, sgname, path_to_cif_file)]
element_phase_map = np.zeros((mesh.number_of_elements,)).astype(int)
# Sample spatial texture
orientation = _sample_ODF(
orientation_density_function,
maximum_sampling_bin_seperation,
mesh.ecentroids)
# Sample spatial strain
strain_lab = np.zeros((mesh.number_of_elements, 3, 3))
for ei in range(mesh.number_of_elements):
# strain in lab/sample-coordinates
strain_lab[ei] = strain_tensor(mesh.ecentroids[ei])
return Polycrystal(
mesh,
orientation,
strain=strain_lab,
phases=phases,
element_phase_map=element_phase_map)
def get_uniform_powder_sample(
sample_bounding_radius,
number_of_grains,
unit_cell,
sgname,
strain_tensor=np.zeros((3, 3)),
path_to_cif_file=None):
"""Generate a polycyrystal with grains overlayed at the origin and orientations drawn uniformly.
Args:
sample_bounding_radius (:obj:`float`): Bounding radius of sample. All tetrahedral crystal
elements will be overlayed within a sphere of ``sample_bounding_radius`` radius.
number_of_grains (:obj:`int`): Number of grains composing the polycrystal sample.
unit_cell (:obj:`list` of :obj:`float`): Crystal unit cell representation of the form
[a,b,c,alpha,beta,gamma], where alpha,beta and gamma are in units of degrees while
a,b and c are in units of anstrom.
sgname (:obj:`string`): Name of space group , e.g 'P3221' for quartz, SiO2, for instance
strain_tensor (:obj:`numpy array`): Strain tensor to apply to all tetrahedral crystal
elements contained within the sample. ``shape=(3,3)``.
path_to_cif_file (:obj:`string`): Path to CIF file. Defaults to None, in which case no structure
factors are computed.
Returns:
(:obj:`xrd_simulator.polycrystal`) A polycyrystal sample with ``number_of_grains`` grains.
Examples:
.. literalinclude:: examples/example_get_uniform_powder_sample.py
"""
coord, enod, node_number = [], [], 0
r = sample_bounding_radius
for _ in range(number_of_grains):
coord.append([r / np.sqrt(3.), r / np.sqrt(3.), -r / np.sqrt(3.)])
coord.append([r / | np.sqrt(3.) | numpy.sqrt |
""" Isentropic properties. """
from __future__ import absolute_import, division
import numpy as np
import scipy as sp
from scipy.optimize import bisect, newton
from skaero.util.decorators import implicit
def mach_angle(M):
r"""Returns Mach angle given supersonic Mach number.
.. math::
\mu = \arcsin{\left ( \frac{1}{M} \right )}
Parameters
----------
M : float
Mach number.
Returns
-------
mu : float
Mach angle.
Raises
------
ValueError
If given Mach number is subsonic.
"""
try:
with np.errstate(invalid="raise"):
mu = np.arcsin(1 / M)
except FloatingPointError:
raise ValueError("Mach number must be supersonic")
return mu
def mach_from_area_ratio(A_Astar, fl=None):
"""Computes the Mach number given an area ratio asuming isentropic flow.
Uses the relation between Mach number and area ratio for isentropic flow,
and returns both the subsonic and the supersonic solution.
Parameters
----------
A_Astar : float
Cross sectional area.
fl : IsentropicFlow, optional
Isentropic flow object, default flow with gamma = 7 / 5.
Returns
-------
out : tuple of floats
Subsonic and supersonic Mach number solution of the equation.
Raises
------
ValueError
If the area ratio is less than 1.0 (the critical area is always the
minimum).
"""
if not fl:
fl = IsentropicFlow(gamma=1.4)
eq = implicit(fl.A_Astar)
if A_Astar < 1.0:
raise ValueError("Area ratio must be greater than 1")
elif A_Astar == 1.0:
M_sub = M_sup = 1.0
else:
M_sub = bisect(eq, 0.0, 1.0, args=(A_Astar,))
M_sup = newton(eq, 2.0, args=(A_Astar,))
return M_sub, M_sup
def mach_from_nu(nu, in_radians=True, gamma=1.4):
r"""Computes the Mach number given a Prandtl-Meyer angle, :math:`\nu`.
Uses the relation between Mach number and Prandtl-Meyer angle for
isentropic flow, to iteratively compute and return the Mach number.
Parameters
----------
nu : float
Prandtl-Meyer angle, by default in radians.
in_radians : bool, optional
When set as False, converts nu from degrees to radians.
gamma : float, optional
Specific heat ratio.
Returns
-------
M : float
Mach number corresponding to :math:`\nu`.
Raises
------
ValueError
If :math:`\nu` is 0 or negative or above the theoretical maxima based on
:math:`\gamma`.
"""
if not in_radians:
nu = np.radians(nu)
nu_max = np.pi / 2.0 * (np.sqrt((gamma + 1.0) / (gamma - 1.0)) - 1)
if nu <= 0.0 or nu >= nu_max:
raise ValueError(
"Prandtl-Meyer angle must be between (0, %f) radians." % nu_max
)
eq = implicit(PrandtlMeyerExpansion.nu)
M = newton(eq, 2.0, args=(nu,))
return M
class IsentropicFlow(object):
"""Class representing an isentropic gas flow.
Isentropic flow is characterized by:
* Viscous and heat conductivity effects are negligible.
* No chemical or radioactive heat production.
"""
def __init__(self, gamma=1.4):
"""Constructor of IsentropicFlow.
Parameters
----------
gamma : float, optional
Specific heat ratio, default 7 / 5.
"""
self.gamma = gamma
def p_p0(self, M):
r"""Pressure ratio from Mach number.
.. math::
\left ( \frac{P}{P_{0}} \right ) = \left ( \frac{T}{T_{0}} \right )^{\frac{\gamma}{(\gamma - 1)}}
Parameters
----------
M : array_like
Mach number.
Returns
-------
p_p0 : array_like
Pressure ratio.
"""
M = np.asanyarray(M)
p_p0 = self.T_T0(M) ** (self.gamma / (self.gamma - 1))
return p_p0
def rho_rho0(self, M):
r"""Density ratio from Mach number.
.. math::
\left ( \frac{\rho}{\rho_{0}} \right ) = \left ( \frac{T}{T_{0}} \right )^{\frac{1}{(\gamma - 1)}}
Parameters
----------
M : array_like
Mach number.
Returns
-------
rho_rho0 : array_like
Density ratio.
"""
M = np.asanyarray(M)
rho_rho0 = self.T_T0(M) ** (1 / (self.gamma - 1))
return rho_rho0
def T_T0(self, M):
r"""Temperature ratio from Mach number.
.. math::
\left ( \frac{T}{T_{0}} \right ) = \left (1 + \frac{\gamma - 1}{2}M^{2} \right )^{-1}
Parameters
----------
M : array_like
Mach number.
Returns
-------
T_T0 : array_like
Temperature ratio.
"""
M = np.asanyarray(M)
T_T0 = 1 / (1 + (self.gamma - 1) * M * M / 2)
return T_T0
def A_Astar(self, M):
"""Area ratio from Mach number.
Duct area divided by critial area given Mach number.
Parameters
----------
M : array_like
Mach number.
Returns
-------
A_Astar : array_like
Area ratio.
"""
M = np.asanyarray(M)
# If there is any zero entry, NumPy array division gives infinity,
# which is correct.
with np.errstate(divide="ignore"):
A_Astar = (2 / self.T_T0(M) / (self.gamma + 1)) ** (
(self.gamma + 1) / (2 * (self.gamma - 1))
) / M
return A_Astar
def a_a0(self, M):
""" Speed of sound ratio from Mach number.
Parameters
----------
M: array_like
Mach number.
Returns
-------
a_a0: array_like
Speed of sound ratio.
"""
M = | np.asarray(M) | numpy.asarray |
"""
Defines the unit tests for the
:mod:`colour.models.rgb.transfer_functions.linear` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import linear_function
from colour.utilities import ignore_numpy_errors
__author__ = "<NAME>"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"TestLinearFunction",
]
class TestLinearFunction(unittest.TestCase):
"""
Define :func:`colour.models.rgb.transfer_functions.linear.\
linear_function` definition unit tests methods.
"""
def test_linear_function(self):
"""
Test :func:`colour.models.rgb.transfer_functions.linear.\
linear_function` definition.
"""
self.assertEqual(linear_function(0.0), 0.0)
self.assertEqual(linear_function(0.18), 0.18)
self.assertEqual(linear_function(1.0), 1.0)
def test_n_dimensional_linear_function(self):
"""
Test :func:`colour.models.rgb.transfer_functions.linear.\
linear_function` definition n-dimensional arrays support.
"""
a = 0.18
a_p = linear_function(a)
a = np.tile(a, 6)
a_p = np.tile(a_p, 6)
np.testing.assert_almost_equal(linear_function(a), a_p, decimal=7)
a = np.reshape(a, (2, 3))
a_p = | np.reshape(a_p, (2, 3)) | numpy.reshape |
#!/usr/bin/env/python
import argparse
import numpy as np
import os
import pandas as pd
import yaml
import micro_dl.inference.evaluation_metrics as metrics
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.preprocess_utils as preprocess_utils
import micro_dl.utils.image_utils as image_utils
import micro_dl.utils.normalize as normalize
def parse_args():
"""Parse command line arguments
In python namespaces are implemented as dictionaries
:return: namespace containing the arguments passed.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
required=True,
help='Directory containing model weights, config and csv files',
)
parser.add_argument(
'--model_fname',
type=str,
default=None,
help='File name of weights in model dir (.hdf5). If None grab newest.',
)
parser.add_argument(
'--test_data',
dest='test_data',
action='store_true',
help="Use test indices in split_samples.json",
)
parser.add_argument(
'--all_data',
dest='test_data',
action='store_false',
)
parser.set_defaults(test_data=True)
parser.add_argument(
'--image_dir',
type=str,
required=True,
help="Directory containing target images",
)
parser.add_argument(
'--metrics',
type=str,
required=True,
nargs='*',
help='Metrics for model evaluation'
)
parser.add_argument(
'--orientations',
type=str,
default='xyz',
nargs='*',
help='Evaluate metrics along these orientations (xy, xz, yz, xyz)'
)
parser.add_argument(
'--name_parser',
type=str,
default='parse_sms_name',
help="The function in aux_utils that will parse the file name for indices",
)
return parser.parse_args()
def compute_metrics(model_dir,
image_dir,
metrics_list,
orientations_list,
test_data=True,
name_parser='parse_sms_name'):
"""
Compute specified metrics for given orientations for predictions, which
are assumed to be stored in model_dir/predictions. Targets are stored in
image_dir.
Writes metrics csv files for each orientation in model_dir/predictions.
:param str model_dir: Assumed to contain config, split_samples.json and
subdirectory predictions/
:param str image_dir: Directory containing target images with frames_meta.csv
:param list metrics_list: See inference/evaluation_metrics.py for options
:param list orientations_list: Any subset of {xy, xz, yz, xyz}
(see evaluation_metrics)
:param bool test_data: Uses test indices in split_samples.json,
otherwise all indices
:param str name_parser: Type of name parser (default or parse_idx_from_name)
"""
# Load config file
config_name = os.path.join(model_dir, 'config.yml')
with open(config_name, 'r') as f:
config = yaml.safe_load(f)
preprocess_config = preprocess_utils.get_preprocess_config(config['dataset']['data_dir'])
# Load frames metadata and determine indices
frames_meta = pd.read_csv(os.path.join(image_dir, 'frames_meta.csv'))
if isinstance(metrics_list, str):
metrics_list = [metrics_list]
metrics_inst = metrics.MetricsEstimator(metrics_list=metrics_list)
split_idx_name = config['dataset']['split_by_column']
if test_data:
idx_fname = os.path.join(model_dir, 'split_samples.json')
try:
split_samples = aux_utils.read_json(idx_fname)
test_ids = np.sort(split_samples['test'])
except FileNotFoundError as e:
print("No split_samples file. Will predict all images in dir.")
else:
test_ids = np.sort( | np.unique(frames_meta[split_idx_name]) | numpy.unique |
################################################################################
# tabulation.py
#
# The Tabulation class represents a function by a sequence of linear
# interpolations between points defined by arrays of x and y coordinates.
#
# <NAME>, PDS Rings Node, December 2011
################################################################################
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
import unittest
class Tabulation(object):
"""A class that represents a function by a sequence of linear interpolations
between points defined by arrays of x and y coordinates. The function is
treated as equal to zero outside the range of the x coordinates."""
def __init__(self, x, y):
"""Constructor for a Tabulation object.
Input:
x a 1-D array of x-coordinates, which must be monotonic.
y a 1-D array of y-values, given in the same order as the
x-coordinates.
"""
ignore = self._update(x,y)
########################################
# Private methods
########################################
def _update(self, x, y):
"""Updates a tabulation in place with new x and y arrays."""
x = np.asfarray(x)
y = np.asfarray(y)
sorted = np.sort(x)
if len(x.shape) != 1:
raise ValueError("x array in not 1-dimensional")
if x.shape != y.shape:
raise ValueError("x and y arrays do not have the same size")
if np.all(sorted == x):
self.x = x
self.y = y
elif np.all(sorted == x[::-1]):
self.x = x[::-1]
self.y = y[::-1]
else:
raise ValueError("x-coordinates are not monotonic")
self.func = None
return self
def _update_y(self, new_y):
"""Updates a tabulation in place with a new y array."""
y = np.asfarray(y)
if new_y.shape != self.x.shape:
raise ValueError("x and y arrays do not have the same size")
self.y = y
self.func = None
return self
def _trim(self):
"""Updates the given Tabulation by deleting leading and trailing regions
of the domain that contain nothing but zeros. This is never strictly
necessary but can improve efficiency and reduce memory requirements. It
can be useful because many filter bandpass functions contains strings of
zeros at one end or the other.
NOTE that this function operates in-place, returning the same
Tabulation object.
"""
# Trim the trailing end
(new_x, new_y) = Tabulation._trim1(self.x[::-1], self.y[::-1])
# Trim the leading end
(new_x, new_y) = Tabulation._trim1(new_x[::-1], new_y[::-1])
return self._update(new_x, new_y)
@staticmethod
def _trim1(x,y):
"""Private procedure used by trim() to strip away the leading end of
an (x,y) array pair.
"""
# Define a mask at the low end
mask = np.cumsum(y != 0.) != 0
# Shift left by one to keep last zero
mask[:-1] = mask[1:]
return (x[mask], y[mask])
@staticmethod
def _xmerge(x1,x2):
"""Returns a new array of x-values containing the union of x-values
found in each of the given arrays.
"""
# Confirm overlap
if x1[0] > x2[-1] or x2[0] > x1[-1]:
raise ValueError("domains do not overlap")
# Merge and sort
sorted = np.sort(np.hstack((x1, x2)))
# Locate and remove duplicates
mask = np.hstack((sorted[:-1] != sorted[1:], [True]))
return sorted[mask]
@staticmethod
def _xoverlap(x1,x2):
"""Returns a new array of x-values containing the union of x-values from
each of the given arrays that fall within the intersection of the two
domains.
"""
new_x = Tabulation._xmerge(x1,x2)
mask = (new_x >= max(x1[0],x2[0])) & (new_x <= min(x1[-1],x2[-1]))
return new_x[mask]
########################################
# Standard operators
########################################
def __call__(self, x):
# Fill in the 1-D interpolation if necessary
if self.func is None:
self.func = interp1d(self.x, self.y, kind="linear",
bounds_error=False, fill_value=0.)
if np.shape(x):
return self.func(x)
else:
return float(self.func(x)[()])
def __mul__(self, other):
# Multiplication of two Tabulations
# Note: the new domain is the intersection of the given domains
if type(other) == type(self):
new_x = Tabulation._xoverlap(self.x, other.x)
return Tabulation(new_x, self(new_x) * other(new_x))._trim()
# Otherwise just scale the y-values
elif np.shape(other) == ():
return Tabulation(self.x, self.y * other)
def __truediv__(self, other):
# Division of two Tabulations
# Note: the new domain is the intersection of the given domains
if type(other) == type(self):
new_x = Tabulation._xoverlap(self.x, other.x)
return Tabulation(new_x, self(new_x) / other(new_x))._trim()
# Otherwise just scale the y-values
elif np.shape(other) == ():
return Tabulation(self.x, self.y / other)
def __add__(self, other):
# Addition of two Tabulations
# Note: the new domain is the union of the given domains
if type(other) == type(self):
new_x = Tabulation._xmerge(self.x, other.x)
return Tabulation(new_x, self(new_x) + other(new_x))
# Otherwise just shift the y-values
elif np.shape(other) == ():
return Tabulation(self.x, self.y + other)
# Note that a constant added to a Tabulation will still return zero
# outside the domain.
def __sub__(self, other):
# Subtraction of two Tabulations
# Note: the new domain is the union of the given domains
if type(other) == type(self):
new_x = Tabulation._xmerge(self.x, other.x)
return Tabulation(new_x, self(new_x) - other(new_x))
# Otherwise just shift the y-values
elif np.shape(other) == ():
return Tabulation(self.x, self.y - other)
# Note that a constant subtracted from a Tabulation will still return
# zero outside the domain.
def __imul__(self, other):
# In-place multiplication of two Tabulations
if type(other) == type(self):
new_x = Tabulation._xoverlap(self.x, other.x)
return self._update(new_x, self(new_x) * other(new_x))._trim()
# Otherwise just scale the y-values
elif np.shape(other) == ():
return self._update_y(self.y * other)
def __idiv__(self, other):
# In-place division of two Tabulations
if type(other) == type(self):
new_x = Tabulation._xoverlap(self.x, other.x)
return self._update(new_x, self(new_x) / other(new_x))._trim()
# Otherwise just scale the y-values
elif np.shape(other) == ():
return self._update_y(self.y / other)
def __iadd__(self, other):
# In-place addition of two Tabulations
if type(other) == type(self):
new_x = Tabulation._xmerge(self.x, other.x)
return self._update(new_x, self(new_x) + other(new_x))
# Otherwise just shift the y-values
elif np.shape(other) == ():
return self._update_y(self.y + other)
# Note that a constant added to a Tabulation will still return zero
# outside the domain.
def __isub__(self, other):
# In-place subtraction of two Tabulations
if type(other) == type(self):
new_x = Tabulation._xmerge(self.x, other.x)
return self._update(new_x, self(new_x) - other(new_x))
# Otherwise just shift the y-values
elif np.shape(other) == ():
return self._update_y(self.y - other)
# Note that a constant subtracted from a Tabulation will still return
# zero outside the domain.
########################################
# Additional methods
########################################
def trim(self):
"""Returns a new Tabulation (shallow copy) in which the zero-valued
leading and trailing regions of the domain have been removed."""
# Save the original arrays
x = self.x
y = self.y
# Create a trimmed version
self._trim() # operates in-place
result = Tabulation(self.x, self.y)
# Restore the original
self.x = x
self.y = y
return result
def domain(self):
"""Returns a tuple containing the range of x-values over which the
function is nonzero.
"""
return (self.x[0], self.x[-1])
def clip(self, xmin, xmax):
"""Returns a tuple in which the domain has been redefined as
(xmin,xmax).
"""
new_x = Tabulation._xmerge(self.x, np.array((xmin,xmax)))
mask = (new_x >= xmin) & (new_x <= xmax)
return self.resample(new_x[mask])
def locate(self, yvalue):
"""Returns a list of the x-values where the Tabulation has the given
value of y. Note that the exact ends of the domain are not checked."""
signs = np.sign(self.y - yvalue)
mask = (signs[:-1] * signs[1:]) < 0.
xlo = self.x[:-1][mask]
ylo = self.y[:-1][mask]
xhi = self.x[1:][mask]
yhi = self.y[1:][mask]
xarray = xlo + (yvalue - ylo)/(yhi - ylo) * (xhi - xlo)
xlist = list(xarray) + list(self.x[signs == 0])
xlist.sort()
return xlist
def integral(self):
"""Returns the integral of [y dx].
"""
# Make an array consisting of the midpoints between the x-values
# Begin with an array holding one extra element
dx = np.empty(self.x.size + 1)
dx[1:] = self.x # Load the array shifted right
dx[0] = self.x[0] # Replicate the endpoint
dx[:-1] += self.x # Add the array shifted left
dx[-1] += self.x[-1]
# dx[] is now actually 2x the value at each midpoint.
# The weight on each value is the distance between the adjacent midpoints
dx[:-1] -= dx[1:] # Subtract the midpoints shifted right (not left)
# dx[] is now actually -2x the correct value of each weight. The last
# element is to be ignored.
# The integral is now the sum of the products y * dx
return -0.5 * np.sum(self.y * dx[:-1])
def resample(self, new_x):
"""Re-samples a tabulation at a given list of x-values."""
return Tabulation(new_x, self(new_x))
def subsample(self, new_x):
"""Resamples a tabulation at the given list of x-values, while at the
same time retaining all original x-values."""
new_x = Tabulation._xmerge(new_x, self.x)
return Tabulation(new_x, self(new_x))
def mean(self, dx=None):
"""Returns the mean value of the tabulation. If specified, dx is the
minimum step permitted along the x-axis during integration."""
trimmed = self.trim()
if dx is None:
resampled = Tabulation(self.x, self.y.copy())
# y cannot be a shallow copy...
else:
(x0,x1) = trimmed.domain()
new_x = | np.arange(x0 + dx, x1, dx) | numpy.arange |
from unittest import TestCase
import numpy as np
from numpy import ndarray
from pandas import DataFrame
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from aix360.algorithms.rbm import FeatureBinarizerFromTrees
# noinspection PyPep8Naming
class TestFeatureBinarizerFromTrees(TestCase):
def setUp(self) -> None:
self.random_state = 0
d: dict = load_breast_cancer()
X: DataFrame = DataFrame(d['data'], columns=d['feature_names'])
self.col_ordinal = X.columns.to_list()
np.random.seed(self.random_state)
s = np.array(['a', 'b', 'c'])
X['cat alpha'] = s[np.random.randint(0, 3, len(X))]
X['cat num'] = np.random.randint(0, 3, len(X))
self.col_categorical = ['cat alpha', 'cat num']
s = np.array(['a', 'b'])
X['bin alpha'] = s[np.random.randint(0, 2, len(X))]
X['bin num'] = np.random.randint(0, 2, len(X))
self.col_binary = ['bin alpha', 'bin num']
self.X = X
self.y: ndarray = d['target']
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(self.X, self.y, test_size=0.4, random_state=self.random_state)
def test_init(self):
# colCateg >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(colCateg=self.col_categorical)
self.assertListEqual(fbt.colCateg, self.col_categorical)
# treeNum >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeNum=None)
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeNum=0)
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeNum=-1)
fbt = FeatureBinarizerFromTrees(treeNum=3)
self.assertEqual(fbt.treeNum, 3)
# treeDepth >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeDepth=0)
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeDepth=-1)
fbt = FeatureBinarizerFromTrees(treeDepth=3)
self.assertEqual(fbt.treeDepth, 3)
self.assertEqual(fbt.treeKwargs['max_depth'], 3)
fbt = FeatureBinarizerFromTrees(treeDepth=None, treeKwargs=dict(max_depth=5))
self.assertEqual(fbt.treeKwargs['max_depth'], 5)
self.assertEqual(fbt.treeDepth, 5)
# treeFeatureSelection >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeFeatureSelection=0)
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeFeatureSelection=-1)
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeFeatureSelection=3)
with self.assertRaises(ValueError):
fbt = FeatureBinarizerFromTrees(treeFeatureSelection='bad string value')
fbt = FeatureBinarizerFromTrees(treeFeatureSelection=0.4)
self.assertEqual(fbt.treeFeatureSelection, 0.4)
self.assertEqual(fbt.treeKwargs['max_features'], 0.4)
fbt = FeatureBinarizerFromTrees(treeFeatureSelection=None)
self.assertTrue(fbt.treeFeatureSelection is None)
self.assertTrue(fbt.treeKwargs['max_features'] is None)
fbt = FeatureBinarizerFromTrees(treeFeatureSelection='log2')
self.assertEqual(fbt.treeFeatureSelection, 'log2')
self.assertEqual(fbt.treeKwargs['max_features'], 'log2')
fbt = FeatureBinarizerFromTrees(treeFeatureSelection=None, treeKwargs=dict(max_features=0.2))
self.assertEqual(fbt.treeKwargs['max_features'], 0.2)
self.assertEqual(fbt.treeFeatureSelection, 0.2)
# threshRound >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(threshRound=None)
with self.assertRaises(ValueError):
FeatureBinarizerFromTrees(threshRound=-1)
fbt = FeatureBinarizerFromTrees(threshRound=3)
self.assertTrue(fbt.threshRound == 3)
# threshStr > >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(threshStr=True)
self.assertTrue(fbt.threshStr)
fbt = FeatureBinarizerFromTrees(threshStr=False)
self.assertFalse(fbt.threshStr)
# returnOrd >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(returnOrd=True)
self.assertTrue(fbt.returnOrd)
fbt = FeatureBinarizerFromTrees(returnOrd=False)
self.assertFalse(fbt.returnOrd)
# randomState >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(randomState=3)
self.assertEqual(fbt.randomState, 3)
def test_fit_and_transform_exceptions(self):
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
# fit() requires y. The error is raised at 'self.decisionTree.fit(X, y)'
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees()
with self.assertRaises((TypeError, ValueError)):
fbt.fit(self.X_train)
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
# fit() does not allow/support NaN/None
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees()
Xn = self.X_train.copy(True)
Xn.iloc[[31, 27, 80], Xn.columns.get_loc('smoothness error')] = np.NaN
with self.assertRaises(ValueError):
fbt.fit(Xn, self.y_train)
fbt = FeatureBinarizerFromTrees()
Xn = self.X_train.copy(True)
Xn.iloc[[3, 17, 20], Xn.columns.get_loc('bin num')] = np.NaN
with self.assertRaises(ValueError):
fbt.fit(Xn, self.y_train)
fbt = FeatureBinarizerFromTrees(colCateg=self.col_categorical)
Xn = self.X_train.copy(True)
Xn.iloc[[3, 17, 20], Xn.columns.get_loc('cat num')] = np.NaN
with self.assertRaises(ValueError):
fbt.fit(Xn, self.y_train)
def test_fit_and_transform_binary(self):
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
# Test binary features with no categorical or ordinal features.
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=1, randomState=self.random_state)
fbt.fit(self.X_train[self.col_binary], self.y_train)
self.assertListEqual(list(fbt.maps.keys()), ['bin alpha'])
temp = [('bin alpha', '', ''), ('bin alpha', 'not', '')]
self.assertListEqual(fbt.features.to_list(), temp)
# Transform
T = fbt.transform(self.X_test)
self.assertListEqual(T.columns.to_list(), temp)
# Now test taking all available features.
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=None, randomState=self.random_state)
fbt.fit(self.X_train[self.col_binary], self.y_train)
self.assertListEqual(list(fbt.maps.keys()), self.col_binary)
temp = [('bin alpha', '', ''), ('bin alpha', 'not', ''), ('bin num', '', ''), ('bin num', 'not', '')]
self.assertListEqual(fbt.features.to_list(), temp)
# Transform
T = fbt.transform(self.X_test)
self.assertListEqual(fbt.features.to_list(), temp)
a = T[('bin num', '', '')].to_numpy()
b = (self.X_test['bin num'] == 1).astype(int).to_numpy()
self.assertTrue(np.all(a == b))
a = T[('bin num', 'not', '')].to_numpy()
b = 1 - b
self.assertTrue(np.all(a == b))
a = T[('bin alpha', '', '')].to_numpy()
b = (self.X_test['bin alpha'] == 'b').astype(int).to_numpy()
self.assertTrue(np.all(a == b))
a = T[('bin alpha', 'not', '')].to_numpy()
b = 1 - b
self.assertTrue(np.all(a == b))
def test_fit_and_transform_categorical(self):
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
# Test categorical with no binary or ordinal features.
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
# Two features (one feature with == and !=).
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=1, colCateg=self.col_categorical,
randomState=self.random_state)
fbt.fit(self.X_train[self.col_categorical], self.y_train)
self.assertListEqual(list(fbt.enc.keys()), [self.col_categorical[1]])
self.assertTrue(type(list(fbt.enc.values())[0]) is OneHotEncoder)
temp = [('cat num', '!=', 0), ('cat num', '==', 0)]
self.assertListEqual(fbt.features.to_list(), temp)
# Test transform. Categorical values are converted to strings to be like FeatureBinarizer
T = fbt.transform(self.X_test)
self.assertListEqual(T.columns.to_list(), temp)
# Test taking all available features.
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=None, colCateg=self.col_categorical,
randomState=self.random_state)
fbt.fit(self.X_train[self.col_categorical], self.y_train)
self.assertListEqual(self.col_categorical, fbt.colCateg)
self.assertListEqual(self.col_categorical, list(fbt.enc.keys()))
temp = [('cat alpha', '!=', 'a'), ('cat alpha', '!=', 'c'), ('cat alpha', '==', 'a'), ('cat alpha', '==', 'c'),
('cat num', '!=', 0), ('cat num', '!=', 2), ('cat num', '==', 0), ('cat num', '==', 2)]
self.assertListEqual(fbt.features.to_list(), temp)
# Transform
T = fbt.transform(self.X_test)
self.assertListEqual(T.columns.to_list(), temp)
a = T[('cat alpha', '==', 'a')].to_numpy()
b = (self.X_test['cat alpha'] == 'a').astype(int).to_numpy()
self.assertTrue(np.all(a == b))
a = T[('cat alpha', '!=', 'a')].to_numpy()
b = 1 - b
self.assertTrue(np.all(a == b))
a = T[('cat num', '==', 2)].to_numpy()
b = (self.X_test['cat num'] == 2).astype(int).to_numpy()
self.assertTrue(np.all(a == b))
a = T[('cat num', '!=', 2)].to_numpy()
b = 1 - b
self.assertTrue(np.all(a == b))
def test_fit_and_transform_ordinal(self):
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
# Test ordinal with no categorical or binary features.
# >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=1, randomState=self.random_state)
fbt.fit(self.X_train[self.col_ordinal], self.y_train)
temp = [('mean concave points', '<=', 0.04892), ('mean concave points', '>', 0.04892)]
self.assertListEqual(fbt.features.to_list(), temp)
self.assertDictEqual(fbt.thresh, {'mean concave points': np.array([0.04892])})
# Transform
T = fbt.transform(self.X_test)
self.assertListEqual(T.columns.to_list(), temp)
# Test threshStr
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=1, randomState=self.random_state, threshStr=True)
fbt.fit(self.X_train[self.col_ordinal], self.y_train)
self.assertDictEqual(fbt.thresh, {'mean concave points': np.array([0.04892])})
# Transform
T = fbt.transform(self.X_test)
temp = [('mean concave points', '<=', '0.04892'), ('mean concave points', '>', '0.04892')]
self.assertListEqual(T.columns.to_list(), temp)
# Test threshRound
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=1, threshRound=2, randomState=self.random_state)
fbt.fit(self.X_train[self.col_ordinal], self.y_train)
temp = [('mean concave points', '<=', 0.05), ('mean concave points', '>', 0.05)]
self.assertListEqual(fbt.features.to_list(), temp)
self.assertDictEqual(fbt.thresh, {'mean concave points': np.array([0.05])})
# Now test taking all available features.
fbt = FeatureBinarizerFromTrees(treeNum=1, treeDepth=None, randomState=self.random_state)
fbt.fit(self.X_train[self.col_ordinal], self.y_train)
temp = {'area error': np.array([46.315001]), 'concavity error': np.array([0.016965]),
'mean area': np.array([995.5]), 'mean concave points': np.array([0.04892]),
'mean texture': np.array([19.9]), 'smoothness error': | np.array([0.003299, 0.005083]) | numpy.array |
import keras
import keras.backend as K
import tensorflow as tf
import numpy as np
import random
np.random.seed(42)
tf.set_random_seed(42)
random.seed(42)
from osgeo import gdal, gdalconst
import os
from sklearn.model_selection import train_test_split
import skimage.io
import skimage.transform
from osgeo import ogr, osr
from glob import glob
def normalize_raster_locally(x):
mx = x.max()
mn = x.min()
x = (x - mn + np.finfo(float).eps) / (mx - mn + np.finfo(float).eps)
return x
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def create_dataset(input_tif, label_tif, step_size, dim, x_dir, y_dir, threshold=0.15):
ds = gdal.Open(input_tif, gdalconst.GA_ReadOnly)
xsize = ds.RasterXSize
ysize = ds.RasterYSize
for x in range(0, xsize-dim, step_size):
for y in range(0, ysize-dim, step_size):
current_label = gdal.Translate('', label_tif, srcWin=[x, y, dim, dim], format='MEM')
current_array = current_label.ReadAsArray()
if np.count_nonzero(current_array) >= (threshold*dim*dim):
print(x, y, xsize, ysize, np.unique(current_array, return_counts=True))
gdal.Translate(os.path.join(x_dir,'{}_{}.tif').format(x, y), input_tif, srcWin=[x, y, dim, dim])
gdal.Translate(os.path.join(y_dir,'{}_{}.tif').format(x, y), label_tif, srcWin=[x, y, dim, dim])
def get_input_label_pairs(input_dir, filenames, dim, multi_class=False):
batch_size = len(filenames)
xs = np.empty((len(filenames), dim, dim))
ys = np.empty((len(filenames), dim, dim))
for e, file in enumerate(filenames):
im = skimage.io.imread(os.path.join(input_dir, 'x', file), plugin='pil')
label = skimage.io.imread(os.path.join(input_dir, 'y', file), plugin='pil')
if not multi_class:
label[label != 0] = 1
xs[e] = normalize_raster_locally(im)
ys[e] = label
return np.expand_dims(xs, -1), np.expand_dims(ys, -1)
class BinaryData(keras.utils.Sequence):
def __init__(self, input_dir, filenames, batch_size, shuffle, dim=128, return_filenames=False):
self.input_dir = input_dir
self.filenames = filenames
self.batch_size = batch_size
self.shuffle = shuffle
self.dim = dim
self.return_filenames = return_filenames
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.filenames) / self.batch_size))
def __getitem__(self, index):
filenames = self.filenames[index*self.batch_size:(index+1)*self.batch_size]
x, y = get_input_label_pairs(self.input_dir, filenames, self.dim)
if self.return_filenames:
return x, y, filenames
else:
return x, y
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.filenames)
class MultiData(keras.utils.Sequence):
def __init__(self, input_dir, filenames, batch_size, shuffle, dim=128, return_filenames=False, num_classes=6):
self.input_dir = input_dir
self.filenames = filenames
self.batch_size = batch_size
self.shuffle = shuffle
self.dim = dim
self.return_filenames = return_filenames
self.num_classes = 6
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.filenames) / self.batch_size))
def __getitem__(self, index):
filenames = self.filenames[index*self.batch_size:(index+1)*self.batch_size]
x, y = get_input_label_pairs(self.input_dir, filenames, self.dim, multi_class=True)
if self.return_filenames:
return x, y, filenames
else:
return x, y
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.filenames)
def mask_to_polygon_in_memory(original_data, array, field_name='label'):
geotrans = original_data.GetGeoTransform()
proj = original_data.GetProjection()
driver = gdal.GetDriverByName('MEM')
dataset = driver.Create('', array.shape[1], array.shape[0], 1, gdal.GDT_Float32)
dataset.GetRasterBand(1).WriteArray(array)
dataset.SetProjection(proj)
dataset.SetGeoTransform(geotrans)
band = dataset.GetRasterBand(1)
driver_mask = gdal.GetDriverByName('MEM')
ds_mask = driver_mask.Create('', array.shape[1], array.shape[0], 1, gdal.GDT_Float32)
ds_mask.SetGeoTransform(geotrans)
ds_mask.SetProjection(proj)
ds_mask_array = (array>0).astype(np.int32)
ds_mask.GetRasterBand(1).WriteArray( ds_mask_array )
mask_band = ds_mask.GetRasterBand(1)
srs = osr.SpatialReference(wkt=proj)
driver = gdal.GetDriverByName("Memory")
outDatasource = driver.Create('',0,0,0,gdal.GDT_Float32)
# outDatasource = driver.CreateDataSource('')
outLayer = outDatasource.CreateLayer("polygonized", srs=srs)
if field_name is None:
field_name='MyFLD'
newField = ogr.FieldDefn(field_name, ogr.OFTInteger)
outLayer.CreateField(newField)
gdal.Polygonize(band, mask_band, outLayer, 0, [], callback=None )
return outDatasource
def merge_shp_files_from_memory(shp_files, outputMergefn):
driverName = 'ESRI Shapefile'
geometryType = ogr.wkbPolygon
out_driver = ogr.GetDriverByName( driverName )
if os.path.exists(outputMergefn):
out_driver.DeleteDataSource(outputMergefn)
out_ds = out_driver.CreateDataSource(outputMergefn)
out_layer = out_ds.CreateLayer(outputMergefn, geom_type=geometryType)
only_ones = True
for ds in shp_files:
# ds = ogr.Open(file)
lyr = ds.GetLayer()
if only_ones:
lyr_def = lyr.GetLayerDefn ()
for i in range(lyr_def.GetFieldCount()):
out_layer.CreateField (lyr_def.GetFieldDefn(i) )
only_ones=False
for feat in lyr:
out_layer.CreateFeature(feat)
del ds, lyr
del out_ds, out_layer
def make_predictions_and_vectorize_binary(model, test_data, output_shp_file, input_dir, field_name='label'):
list_of_shps = []
batch_counter = 0
for x, y, files in test_data:
predictions = model.predict(x)
predictions = (predictions > 0.5).astype(np.uint8)
for i in range(len(files)):
cur_pred = predictions[i]
cur_pred = np.squeeze(cur_pred, -1)
original_data = gdal.Open(os.path.join(input_dir, 'x', files[i]), gdalconst.GA_ReadOnly)
cur_shp = mask_to_polygon_in_memory(original_data, cur_pred, field_name=field_name)
list_of_shps.append(cur_shp)
if batch_counter % 10 == 0:
print('done with batch: {}'.format(batch_counter))
batch_counter += 1
print('merging shp data!')
merge_shp_files_from_memory(list_of_shps, output_shp_file)
def make_predictions_and_vectorize_multiclass(model, test_data, output_shp_file, input_dir, field_name='label'):
list_of_shps = []
batch_counter = 0
for x, y, files in test_data:
predictions = model.predict(x)
predictions = | np.argmax(predictions, -1) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Created on Nov 04 2020
@author: <NAME>
@supervisor: <NAME>
Tools to work with Red Clumps
"""
import os
import sys
import copy
import math
import numpy as np
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit
from scipy.optimize import leastsq
from scipy.signal import find_peaks
# Use LaTex fonts
from matplotlib import rc
rc('text', usetex=True)
plt.rcParams.update({'font.size': 12})
# set comma to dot - Brazilian decimal notation
import locale
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
locale.setlocale(locale.LC_NUMERIC, 'pt_BR.UTF-8')
import matplotlib as mpl
mpl.rcParams['axes.formatter.use_locale'] = True
# my library
sys.path.append('/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/')
import math_functions
class RedClump(object):
def __init__(self,Rv):
self.gc_distance = 8178 # +- 13 pc https://www.aanda.org/articles/aa/full_html/2019/05/aa35656-19/aa35656-19.html
self.Rv = Rv
self.path = '/home/botan/OneDrive/Doutorado/VVV_DATA'
def cartezian_projections(self,d,gal_l,gal_b):
dx = d*np.cos(math.radians(gal_b))*np.cos(math.radians(gal_l))
rx = dx - self.gc_distance
ry = d*np.cos(math.radians(gal_b))*np.sin(math.radians(gal_l))
rz = d*np.sin(math.radians(gal_b))
return rx,ry,rz
def red_clump_distance(self,Ks_mag,Ks_err,c,c_err):
# Ruiz-Dern et al. (2018) https://ui.adsabs.harvard.edu/abs/2018A%26A...609A.116R/abstract
c_0 = 0.66
c_0_err = 0.02
MKs = -1.605
MKs_err = 0.009
# Minniti (2011) AJ Letters 73:L43
mu = Ks_mag - self.Rv * (c - c_0) - MKs
mu_err = np.sqrt(Ks_err**2 + c_err**2 + c_0_err**2 + MKs_err**2)
dist = 10**((mu + 5)/5)
dist_err = 2**((mu + 5)/5) * 5**(mu/5)*np.log(10) * mu_err
return dist,dist_err
def find_RC_color_peak(self, color, color_mask, bins=50, show=False):
'''
Find RC peaks color and color sigma and
return fit parameters for color peak (gaussian)
'''
y = color[color_mask]
hist, bin_edges = np.histogram(y,bins=bins)
flor = hist > hist[0]
binSize = bin_edges[1]-bin_edges[0]
x = np.empty(len(bin_edges)-1)
x[0] = bin_edges[0] + binSize/2
i = 1
while i < len(bin_edges)-1:
x[i] = x[i-1] + binSize
i+=1
guess = [hist.max(),y.median(),0.5]
fit = leastsq(func=math_functions.single_gaussian_residuals,
x0=guess,
args=(x[flor],hist[flor]))
if show:
func = math_functions.single_gaussian(x,fit[0])
plt.hist(y,bins=bins)
plt.plot(x,func,'-')
plt.ylabel('\#\ stars')
plt.xlabel('J-Ks')
plt.show()
return fit[0]
def find_RC_mag_peak(self, mag, mag_mask, mu1, mu2, bins=100, show=False):
'''
find RC peaks in magnitudes
renturn fit parameters for peaks (two gaussians) and
Luminosity Function (exponential)
'''
hist, bin_edges = np.histogram(mag[mag_mask],bins=bins)
binSize = bin_edges[1]-bin_edges[0]
x = np.empty(len(bin_edges)-1)
x[0] = bin_edges[0] + binSize/2
i = 1
while i < len(bin_edges)-1:
x[i] = x[i-1] + binSize
i+=1
# exponential fit to Luminosity Function
mask2fit = ((x<12.2) | ((x>15.5) & (x<16))) # Mask mag around RC
guess = [-1e4,3e3,0.1]
lum_fit = leastsq( func = math_functions.exponential_residuals,
x0 = guess,
args=(x[mask2fit],hist[mask2fit]))
lum_func = math_functions.exponential(x,lum_fit[0])
# RC peaks
RC_peaks = hist - lum_func
mask2peaks = ((x>12)&(x<14.5))
x_RC_peaks = x[mask2peaks]
y_RC_peaks = RC_peaks[mask2peaks]
guess = [RC_peaks.max(),mu1,0.5,0.7*RC_peaks.max(),mu2,0.2]
peak_fit = leastsq( func=math_functions.double_gaussian_residuals,
x0=guess,
args=(x_RC_peaks,y_RC_peaks))
if show:
y = math_functions.double_gaussian(x,peak_fit[0])
plt.hist(mag[mag_mask],bins=bins)
plt.plot(x,y+lum_func,'-')
plt.plot(x,lum_func,'k--')
plt.ylabel('\#\ stars')
plt.xlabel('J-Ks')
plt.show()
return peak_fit[0],lum_fit[0]
def find_RC_dist_peak(self, distances, bins, show=False):
'''
find RC peaks in disttance
renturn fit parameters for peaks (two gaussians)
'''
hist, bin_edges = np.histogram(distances,bins=bins)
binSize = bin_edges[1]-bin_edges[0]
x = np.empty(len(bin_edges)-1)
x[0] = bin_edges[0] + binSize/2
i = 1
while i < len(bin_edges)-1:
x[i] = x[i-1] + binSize
i+=1
# gaussian
guess = [hist.max(), 8000 ,1000 , 0.5*hist.max(), 11000, 2000]
peak_fit = leastsq( func=math_functions.double_gaussian_residuals,
x0=guess,
args=(x,hist))
if show:
y1 = math_functions.single_gaussian(x,peak_fit[0][:3])
y2 = math_functions.single_gaussian(x,peak_fit[0][3:])
plt.hist(distances,bins=bins)
plt.plot(x,y1,'k--')
plt.plot(x,y2,'r--')
plt.ylabel('\#\ stars')
plt.xlabel('d [pc]')
plt.show()
return peak_fit[0]
def red_clump_inclination(self,method='2gaussian',plotHist=False):
'''
method = '1gaussian'
method = '2gaussian'
method = 'polynomial'
'''
# params dict [cmin,cmax,ymin,ymax,xmin,xmax]
params_JKs = { 'b293':[0.85,1.00,11.01,15.49,0.7,2.6],
'b294':[0.86,1.00,11.01,15.49,0.7,2.6],
'b295':[0.95,1.20,11.01,15.49,0.7,2.6],
'b296':[1.05,1.35,11.01,15.49,0.7,2.6],
'b307':[1.00,1.40,11.01,15.49,0.7,2.6],
'b308':[1.19,1.71,11.01,15.49,0.7,2.6],
'b309':[1.19,1.71,11.01,15.49,0.7,2.6],
'b310':[1.45,1.80,11.01,15.49,0.7,2.6]}
params_HKs = { 'b293':[0.19,0.32,11.01,15.49,0.1,0.9],
'b294':[0.19,0.32,11.01,15.49,0.1,0.9],
'b295':[0.23,0.36,11.01,15.49,0.1,0.9],
'b296':[0.29,0.45,11.01,15.49,0.1,0.9],
'b307':[0.22,0.45,11.01,15.49,0.1,0.9],
'b308':[0.30,0.59,11.01,15.49,0.1,0.9],
'b309':[0.32,0.62,11.01,15.49,0.1,0.9],
'b310':[0.45,0.70,11.01,15.49,0.1,0.9]}
params_band = { 'J-Ks':params_JKs,
'H-Ks':params_HKs}
# CMD axes dict
axes_dict = { 'b293':[1,3],
'b294':[1,2],
'b295':[1,1],
'b296':[1,0],
'b307':[0,3],
'b308':[0,2],
'b309':[0,1],
'b310':[0,0]}
for color_band in list(params_band.keys()):#[:1]:
params_dict = params_band[color_band]
plt.rcParams.update({'font.size': 14})
fig, axes = plt.subplots(2, 4, figsize=(16,8))
fig.subplots_adjust(wspace=0.1)
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
for tile in tiles:#['b309','b310','b296']:#tiles:#[:1]:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'{self.path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{self.path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
magCols = [_ for _ in tileData.columns if _[:3] == 'MAG']
errCols = [_ for _ in tileData.columns if _[:3] == 'ERR']
err_msk = ( tileData[errCols] > 0.2).values
f = color_band.split('-')[0]
color = tileData[f'mag_{f}'] - tileData.mag_Ks
msk = ~color.isnull()
mag = tileData.mag_Ks
mag = mag[msk]
color = color[msk]
yRCpeak = []
xRCpeak = []
if method == '1gaussian':
# Single Gaussian fit
num_bins = 20
cmin = params_dict[tile][0]
cmax = params_dict[tile][1]
n = cmin
while n < cmax:
dc = abs(cmax-cmin)/10
cmsk = ((color > n) & (color <= n+dc) & (mag < 14))
hist, bin_edges = np.histogram(mag[cmsk],bins=num_bins)
binSize = bin_edges[1]-bin_edges[0]
x = [bin_edges[0] + binSize/2]
i = 1
while i < len(bin_edges)-1:
x.append(x[i-1] + binSize)
i+=1
guess = [500,13.2,0.5,]
fit = leastsq(math_functions.single_gaussian_residuals,guess,args=(x,hist))
params = fit[0]
yfit = math_functions.single_gaussian(x,params)
if plotHist:
fig,ax=plt.subplots()
ax.hist(mag[cmsk],num_bins)
ax.plot(x,yfit,'-')
plt.show()
yRCpeak.append(params[1])
xRCpeak.append(n)
n+=dc
if method == '2gaussian':
# DOuble Gaussian
num_bins = 80
cmin = params_dict[tile][0]
cmax = params_dict[tile][1]
n = cmin
while n < cmax:
dc = 0.05 #abs(cmax-cmin)/10
cmsk = ((color > n) & (color <= n+dc))# & (mag < 17.5))
hist, bin_edges = np.histogram(mag[cmsk],bins=num_bins)
binSize = bin_edges[1]-bin_edges[0]
x = [bin_edges[0] + binSize/2]
i = 1
while i < len(bin_edges)-1:
x.append(x[i-1] + binSize)
i+=1
mu1 = 13.0 #params_dict[tile][6] # initial guess for fisrt peak mag
mu2 = 13.6 #params_dict[tile][7] # initial guess for second peak mag
peak_fit, lum_fit = self.find_RC_mag_peak(mag, cmsk, mu1, mu2, show=False)
#peak_fit, lum_fit = find_RC_mag_peak(1,mag, cmsk, mu1, mu2, bins=num_bins, show=False)
x = np.arange(11,18,(18-12)/1000)
lum_func = math_functions.exponential(x,lum_fit)
RC_fit = math_functions.double_gaussian(x,peak_fit)
fitted_curve = RC_fit + lum_func
crop = x < 14.5
mag_peak = x[crop][np.where(fitted_curve[crop] == fitted_curve[crop].max())[0][0]]
if plotHist:
yaxis_ref = np.histogram(mag[cmsk],bins=num_bins)[0].max()
fig,ax=plt.subplots(figsize=[6,4])
ax.hist(x=mag[cmsk],
bins=num_bins,
histtype='barstacked',
lw=0.5,
color='dodgerblue',
edgecolor='w',
alpha=0.6)
ax.plot(x,RC_fit+lum_func,'r-',lw=1)
ax.plot(x,lum_func,'k--',lw=1)
ptxt = '{:#.3n}'.format(mag_peak)
ax.axvline(mag_peak,lw=0.8,c='gray')
ax.text(s=ptxt,x=mag_peak+0.2,y=0.95*yaxis_ref,ha='left')
title = '{:#.3n}'.format(n) + ' < J-Ks < ' + '{:#.3n}'.format(n+dc)
ax.text(s=f'Tile: {tile} | {title}', x=0.5, y=1.02, ha='center', transform=ax.transAxes)
ax.set_ylabel('Número de estrelas')
ax.set_xlabel('Ks [mag]')
ax.set_ylim(-yaxis_ref*0.01,yaxis_ref+yaxis_ref*0.04)
plt.tight_layout()
plt.savefig(f'{self.path}/figuras_tese/RC_peaks_{tile}_{n}.png',dpi=300)
plt.show()
plt.close()
yRCpeak.append(mag_peak)
xRCpeak.append(n)
n+=dc
if method == 'polynomial':
# Polynomial fit
num_bins = 100
cmin = params_dict[tile][0]
cmax = params_dict[tile][1]
n = cmin
while n < cmax:
dc = (cmax-cmin)/8
cmsk = ((color > n) & (color <= n+dc) & (mag < 17.5))
hist, bin_edges = np.histogram(mag[cmsk],bins=num_bins)
binSize = bin_edges[1]-bin_edges[0]
x = [bin_edges[0] + binSize/2]
i = 1
while i < len(bin_edges)-1:
x.append(x[i-1] + binSize)
i+=1
x = np.array(x)
fit = np.polyfit(x, hist, 200)
yp = np.poly1d(fit)
x2 = np.arange(mag[cmsk].min(),mag[cmsk].max(),(mag[cmsk].max() - mag[cmsk].min())/1000)
msk = ((x2>12.5)&(x2<14))
peaks,_ = find_peaks(yp(x2[msk]))
if plotHist:
fig,ax=plt.subplots()
ax.hist(mag[cmsk],num_bins)
ax.plot(x,yp(x),'-')
ax.plot(x2[msk][peaks],yp(x2[msk][peaks]),"*")
ax.plot(x2[msk][peaks[0]],yp(x2[msk][peaks[0]]),"*")
plt.show()
yRCpeak.append(x2[msk][peaks[0]])
xRCpeak.append(n)
n+=dc
# CMD plot
y = np.array(yRCpeak)
x = np.array(xRCpeak)
xlim= params_dict[tile][4:6]
ylim= params_dict[tile][2:4]
xlabel= color_band
ylabel='Ks [mag]'
guess = [0.6,13]
c,cov = curve_fit( f = math_functions.linear,
xdata = x,
ydata = y,
p0 = guess,
sigma = y*.01,
absolute_sigma = False)
xfit = np.array(xlim)
yfit = math_functions.linear(xfit,c[0],c[1])
bins=(600,400)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
clip = ~color.isnull()
N, xedges, yedges = np.histogram2d(color[clip],mag[clip],bins=bins)
ax1 = axes_dict[tile][0]
ax2 = axes_dict[tile][1]
img = axes[ax1,ax2].imshow(np.log10(N.T), origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto', interpolation='nearest', cmap=cmap)
red_inc = '{:#.3n}'.format(c[0])
if ax1==0:
if ax2==1 or ax2==2:
axes[ax1,ax2].plot(x,y,'k.')
axes[ax1,ax2].plot(xfit,yfit,'k-')
axes[ax1,ax2].text( s=f'Tile: {tile} | AKs/E({color_band}) = {red_inc}',
x=0.5,
y=1.025,
ha='center',
transform=axes[ax1,ax2].transAxes)
else:
axes[ax1,ax2].text( s=f'Tile: {tile}',
x=0.025,
y=1.025,
ha='left',
transform=axes[ax1,ax2].transAxes)
else:
axes[ax1,ax2].text( s=f'Tile: {tile}',
x=0.025,
y=1.025,
ha='left',
transform=axes[ax1,ax2].transAxes)
axes[ax1,ax2].set_xlim(xlim)
axes[ax1,ax2].set_ylim(ylim)
axes[ax1,ax2].set_xlabel(xlabel)
axes[ax1,ax2].set_ylabel(ylabel)
axes[ax1,ax2].invert_yaxis()
for im in plt.gca().get_images():
im.set_clim(0, 3)
for ax in fig.get_axes():
ax.label_outer()
cbar_ax = plt.axes([0.92, 0.2, 0.01, 0.6])
cb = fig.colorbar(img,
ticks=[0, 1, 2, 3],
format=r'$10^{%i}$',
shrink=0.6 ,
cax=cbar_ax)
cb.set_label('Número por pixel',rotation=90)
#cb.set_label(r'$\mathrm{number\ in\ pixel}$',rotation=90)
#plt.tight_layout()
plt.savefig(f'{self.path}/figuras_tese/red_clump_reddening_{color_band}.png',dpi=200)
plt.show()
plt.rcParams.update({'font.size': 12})
plt.close()
def find_RC_peaks(self,plot=False,show=False):
# params dict [ymin,ymax,xmin,xmaxc,cmin,cmax,RC_peak1,RC_peak2]
params_dict = { 'b293':[11,17.9,0.0,1.4,0.65,1.10,13.0,13.8],
'b294':[11,17.9,0.0,1.5,0.70,1.20,13.0,13.8],
'b295':[11,17.9,0.2,1.5,0.75,1.30,13.0,13.9],
'b296':[11,17.9,0.2,1.7,0.85,1.64,13.0,14.1],
'b307':[11,17.9,0.1,2.0,0.85,1.50,13.1,13.8],
'b308':[11,17.9,0.1,2.3,1.00,1.60,13.2,14.0],
'b309':[11,17.9,0.1,2.3,1.00,2.00,13.2,14.2],
'b310':[11,17.9,0.3,2.6,1.20,2.00,13.2,14.3]}
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
cols = ['RC_peak1_Ks_mag','RC_peak1_Ks_sigma',
'RC_peak1_color' ,'RC_peak1_color_sigma',
'RC_peak1_dist' ,'RC_peak1_dist_sigma',
'RC_peak2_Ks_mag','RC_peak2_Ks_sigma',
'RC_peak2_color' ,'RC_peak2_color_sigma',
'RC_peak2_dist' ,'RC_peak2_dist_sigma',
'tile_central_l' ,'tile_central_b']
RC_info = pd.DataFrame(index=tiles,columns=cols)
for tile in tiles:#[:1]:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'{self.path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{self.path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
ra = tileData.RA
dec = tileData.DEC
c_icrs = SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg))
c_gal = c_icrs.galactic
tileData.loc[tileData.index,'gal_l'] = c_gal.l.deg
tileData.loc[tileData.index,'gal_b'] = c_gal.b.deg
color = tileData.mag_J - tileData.mag_Ks
msk = ~color.isnull()
color = color[msk]
mag = tileData.mag_Ks[msk]
color_min = params_dict[tile][4]
# get RC peaks magnitudes
mag_mask = ((color > color_min))
mu1 = params_dict[tile][6] # initial guess for fisrt peak mag
mu2 = params_dict[tile][7] # initial guess for second peak mag
peak_fit, lum_fit = self.find_RC_mag_peak(mag, mag_mask, mu1, mu2, show=False)
# get RC peaks colors
color_masks = []
peak_colors = []
i = 1
while i < 6:
peak_mag, peak_sigma = peak_fit[i], peak_fit[i+1]
# RC peaks color and color sigma
color_mask = (((color > color_min) & (color < 2.6)) & ((mag > peak_mag - abs(peak_sigma)) & (mag < peak_mag + abs(peak_sigma))))
color_fit = self.find_RC_color_peak(color, color_mask, show=False)
peak_colors += [color_fit[1], abs(color_fit[2])]
color_masks.append(color_mask)
i+=3
# calculate distances
dist1,dist1_sigma = self.red_clump_distance(peak_fit[1],peak_fit[2],peak_colors[0],abs(peak_colors[1]))
dist2,dist2_sigma = self.red_clump_distance(peak_fit[4],peak_fit[5],peak_colors[2],abs(peak_colors[3]))
# tile central l and b
tile_l = (tileData.gal_l.max() - tileData.gal_l.min())/2 + tileData.gal_l.min()
tile_b = (tileData.gal_b.max() - tileData.gal_b.min())/2 + tileData.gal_b.min()
# save peaks info into a pandas DataFrame
info = list(peak_fit[1:3]) + peak_colors[:2] + [dist1,dist1_sigma] + list(peak_fit[4:6]) + peak_colors[2:] + [dist2,dist2_sigma,tile_l,tile_b]
RC_info.loc[tile,cols] = info
if plot:
# Plot CMD
xlim = params_dict[tile][2:4]
ylim = params_dict[tile][:2]
xlabel='J-Ks'
ylabel='Ks [mag]'
bins=(600,400)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
clip = ~color.isnull()
N, xedges, yedges = np.histogram2d(color[clip],mag[clip],bins=bins)
fig, axes = plt.subplots(1, 2, figsize=(10,4))
img = axes[0].imshow( np.log10(N.T),
origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto',
interpolation='nearest',
cmap=cmap)
axes[0].errorbar( x=info[2],
y=info[0],
xerr=info[3],
yerr=info[1],
marker="o",
mfc='k',
mec='k',
ecolor='k',
ms=3,
lw=.8,
capsize=3)
axes[0].errorbar( x=info[6],
y=info[4],
xerr=info[7],
yerr=info[5],
marker="o",
mfc='k',
mec='k',
ecolor='k',
ms=3,
lw=.8,
capsize=3)
axes[0].set_xlim(xlim)
axes[0].set_ylim(ylim)
axes[0].set_xlabel(xlabel)
axes[0].set_ylabel(ylabel)
axes[0].invert_yaxis()
axes[0].axvline(color_min,c='k',lw=1)
axes[0].text(s=f'Tile: {tile}',x=0.5,y=1.02,ha='center',transform=axes[0].transAxes)
cb = fig.colorbar( img,
ax=axes[0],
ticks=[0, 1, 2, 3],
format=r'$10^{%i}$',
shrink=0.6,
orientation='vertical')
cb.set_label(r'$\mathrm{Número\ por\ pixel}$',rotation=90)
# to plot luminosity ans peaks functions
x = np.arange(11,18,(18-12)/1000)
lum_func = math_functions.exponential(x,lum_fit)
RC_fit = math_functions.double_gaussian(x,peak_fit)
# mask test:
#axes[0].plot(color[color_masks[0]],mag[color_masks[0]],'b.',ms=.8,alpha=.01)
#axes[0].plot(color[color_masks[1]],mag[color_masks[1]],'b.',ms=.8,alpha=.01)
yaxis_ref = np.histogram(mag[mag_mask],bins=100)[0].max() # reference value
axes[1].hist( x=mag[mag_mask],
bins=100,
histtype='barstacked',
lw=.5,
color='dodgerblue',
edgecolor='w',
alpha=0.6)#,range=range)
axes[1].plot(x,RC_fit+lum_func,'r-',lw=1)
axes[1].plot(x,lum_func,'k--',lw=1)
axes[1].axvline(x=peak_fit[1],
ls='--',
c='gray',
lw=1)
m1 = '{:#.4n}'.format(peak_fit[1])
axes[1].text( s=f'{m1}',
x=peak_fit[1],
y=.9*yaxis_ref)
axes[1].axvline(x=peak_fit[4],
ls='--',
c='gray',
lw=1)
m2 = '{:#.4n}'.format(peak_fit[4])
axes[1].text( s=f'{m2}',
x=peak_fit[4],
y=.8*yaxis_ref)
axes[1].set_xlabel(ylabel)
axes[1].set_ylabel('Número de estrelas')
a = '{:#.2n}'.format(color_min)
axes[1].text(s=f'J-Ks > {a}',x=0.5,y=1.02,ha='center',transform=axes[1].transAxes)
axes[1].yaxis.set_label_position("right")
axes[1].yaxis.tick_right()
axes[1].set_ylim(-yaxis_ref*.01,yaxis_ref+yaxis_ref*.04)
plt.tight_layout()
plt.savefig(f'{self.path}/figuras_tese/{tile}_RC_bumps.png',dpi=200)
if show:
plt.show()
plt.close()
return RC_info
''' ======================= WORK IN PROGRESS ========================'''
def RC_peak_distance_distribution(self,plot=False,show=False):
path = '/home/botan/OneDrive/Doutorado/VVV_DATA'
params_dict = { 'b293':[11,17.9,0.0,1.4,0.65,1.10,13.0,13.8],
'b294':[11,17.9,0.0,1.5,0.70,1.20,13.0,13.8],
'b295':[11,17.9,0.2,1.5,0.75,1.30,13.0,13.9],
'b296':[11,17.9,0.2,1.7,0.85,1.64,13.0,14.1],
'b307':[11,17.9,0.1,2.0,0.85,1.50,13.1,13.8],
'b308':[11,17.9,0.1,2.3,1.00,1.60,13.2,14.0],
'b309':[11,17.9,0.1,2.3,1.00,2.00,13.2,14.2],
'b310':[11,17.9,0.3,2.6,1.20,2.00,13.2,14.3]}
tiles = sorted(os.listdir(f'{path}/data/psf_ts/'))
cols = ['mag_peak1','mag_err_peak1',
'color_peark1','color_err_peark1',
'distance1','distance_err1',
'x1','y1','z1',
'mag_peak2','err_peak2',
'color_peark2','color_err_peark2',
'distance2','distance_err2',
'x2','y2','z2',
'tile_l','tile_b']
RC_info = pd.DataFrame(index=tiles,columns=cols)
for tile in tiles:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'{path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
ra = tileData.RA
dec = tileData.DEC
c_icrs = SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg))
c_gal = c_icrs.galactic
tileData.loc[tileData.index,'gal_l'] = c_gal.l.deg
tileData.loc[tileData.index,'gal_b'] = c_gal.b.deg
color = tileData.mag_J - tileData.mag_Ks
color_err = np.sqrt((tileData.er_J)**2 + (tileData.er_Ks)**2)
msk = ~color.isnull()
color = color[msk]
color_err = color_err[msk]
mag = tileData.mag_Ks[msk]
err = tileData.er_Ks[msk]
color_min = params_dict[tile][4]
mag_max = 14.5
#CMD_crop = ((color > color_min) & (mag < mag_max))
# get RC peaks magnitudes
mag_mask = ((color > color_min))
mu1 = params_dict[tile][6] # initial guess for fisrt peak mag
mu2 = params_dict[tile][7] # initial guess for second peak mag
peak_fit, lum_fit = self.find_RC_mag_peak(mag, mag_mask, mu1, mu2, show=False)
peak_sigma = 2
peaks_lim = [peak_fit[1] - peak_sigma*peak_fit[2], peak_fit[4] + peak_sigma*peak_fit[5]]
CMD_crop = ((color > color_min) & ((mag > peaks_lim[0])&(mag < peaks_lim[1])))
# get RC peaks colors
color_masks = []
peak_colors = []
i = 1
while i < 6:
peak_mag, peak_sigma = peak_fit[i], peak_fit[i+1]
# RC peaks color and color sigma
color_mask = (((color > color_min) & (color < 2.6)) & ((mag > peak_mag - abs(peak_sigma)) & (mag < peak_mag + abs(peak_sigma))))
color_fit = self.find_RC_color_peak(color, color_mask, show=False)
peak_colors += list(abs(color_fit[1:]))
color_masks.append(color_mask)
i+=3
# get peaks distances
Rv = 0.689
binsize = 50
dist, dist_sigma = self.red_clump_distance(mag,err,Rv,color,color_err)
#gc_dist = self.gc_distance
dist_peaks = self.find_RC_dist_peak(distances=dist[CMD_crop],bins=binsize)
# distance using peak in mag
dist2,dist_sigma2 = self.red_clump_distance(peak_fit[1],peak_fit[2],Rv,peak_colors[0],abs(peak_colors[1]))
dist3,dist_sigma3 = self.red_clump_distance(peak_fit[4],peak_fit[5],Rv,peak_colors[2],abs(peak_colors[3]))
tile_l = (tileData.gal_l.max() - tileData.gal_l.min())/2
tile_b = (tileData.gal_b.max() - tileData.gal_b.min())/2
cartesian2 = self.cartezian_projections(dist2,tile_l,tile_b,self.gc_distance)
cartesian3 = self.cartezian_projections(dist3,tile_l,tile_b,self.gc_distance)
params = (list(peak_fit[1:3]) + peak_colors[:2] + [dist2,dist_sigma2] + list(cartesian2)
+ list(peak_fit[4:]) + peak_colors[2:] + [dist3,dist_sigma3] + list(cartesian3) +
[tile_l, tile_b])
RC_info.loc[tile,cols] = params
if plot:
# PLOT CMD ans HIST
xlim = params_dict[tile][2:4]
ylim = params_dict[tile][:2]
xlabel='J-Ks'
ylabel='Ks [mag]'
bins=(600,400)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
N, xedges, yedges = np.histogram2d(color,mag,bins=bins)
fig, axes = plt.subplots(1, 2, figsize=(10,4))
img = axes[0].imshow( np.log10(N.T),
origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto',
interpolation='nearest',
cmap=cmap)
axes[0].add_patch(Rectangle(xy=(color_min,mag_max),
width=xlim[1],
height=peaks_lim[0]-peaks_lim[1],
fc ='red',
ec ='none',
lw = 0,
alpha=0.3) )
axes[0].set_xlim(xlim)
axes[0].set_ylim(ylim)
axes[0].set_xlabel(xlabel)
axes[0].set_ylabel(ylabel)
axes[0].invert_yaxis()
axes[0].text(s=f'Tile: {tile}',x=0.5,y=1.02,ha='center',transform=axes[0].transAxes)
cb = fig.colorbar( img,
ax=axes[0],
ticks=[0, 1, 2, 3],
format=r'$10^{%i}$',
shrink=0.6,
orientation='vertical')
cb.set_label(r'$\mathrm{Número\ por\ pixel}$',rotation=90)
x = np.arange(dist[CMD_crop].min(),dist[CMD_crop].max(),(dist[CMD_crop].max() - dist[CMD_crop].min())/1000)
y1 = math_functions.single_gaussian(x,dist_peaks[:3])
y2 = math_functions.single_gaussian(x,dist_peaks[3:])
axes[1].hist( x=dist[CMD_crop]/1000,
bins=binsize,
histtype='barstacked',
lw=.5,
color='dodgerblue',
edgecolor='w',
alpha=0.6)#,range=range)
axes[1].plot(x/1000,y1,'r--',lw=.8)
axes[1].plot(x/1000,y2,'r--',lw=.8)
axes[1].plot(x/1000,y1+y2,'k-',lw=.8)
axes[1].axvline(dist2/1000,ls='--',lw=.8,c='gray')
axes[1].axvline(dist3/1000,ls='--',lw=.8,c='gray')
axes[1].set_xlabel('d [kpc]')
axes[1].set_ylabel('Número de estrelas')
axes[1].yaxis.set_label_position("right")
axes[1].yaxis.tick_right()
plt.tight_layout()
plt.savefig(f'{path}/figuras_tese/{tile}_RC_dist_hist.png',dpi=200)
if show:
plt.show()
plt.close()
return RC_info
def cmd(self):
#params dict [cmin,cmax,ymin,ymax,xmin,xmax]
params_dict = { 'b293':[0.85,1.00,11,17.9,0.7,2.6],
'b294':[0.86,1.00,11,17.9,0.7,2.6],
'b295':[0.95,1.20,11,17.9,0.7,2.6],
'b296':[1.05,1.40,11,17.9,0.7,2.6],
'b307':[1.00,1.40,11,17.9,0.7,2.6],
'b308':[1.19,1.71,11,17.9,0.7,2.6],
'b309':[1.19,1.71,11,17.9,0.7,2.6],
'b310':[1.45,2.00,11,17.9,0.7,2.6]}
params_HKs = { 'b293':[0.17,0.29,11,17.9,0.01,1.0],
'b294':[0.19,0.32,11,17.9,0.01,1.0],
'b295':[0.23,0.36,11,17.9,0.01,1.0],
'b296':[0.27,0.45,11,17.9,0.01,1.0],
'b307':[0.18,0.39,11,17.9,0.01,1.0],
'b308':[0.28,0.59,11,17.9,0.01,1.0],
'b309':[0.28,0.62,11,17.9,0.01,1.0],
'b310':[0.42,0.70,11,17.9,0.01,1.0]}
# CMD axes dict
axes_dict = { 'b293':[1,3],
'b294':[1,2],
'b295':[1,1],
'b296':[1,0],
'b307':[0,3],
'b308':[0,2],
'b309':[0,1],
'b310':[0,0]}
filters = ['mag_H','mag_J']
for band in filters:
fig, axes = plt.subplots(2, 4, figsize=(16,8))
tiles = sorted(os.listdir('/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/'))
for tile in tiles:#[:1]:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
color = tileData[band] - tileData.mag_Ks
msk = ~color.isnull()
mag = tileData.mag_Ks
mag = mag[msk]
color = color[msk]
xlim= params_dict[tile][4:6]
ylim= params_dict[tile][2:4]
xlabel=f'{band[-1]}-Ks'
ylabel='Ks [mag]'
bins=(600,400)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
N, xedges, yedges = | np.histogram2d(color,mag,bins=bins) | numpy.histogram2d |
import copy
import numpy as np
# sympy order matters; it overrides scipy (???)
import sympy as sym
import scipy
import scipy.signal
import scipy.integrate
from scipy.linalg import solve_continuous_are
from matplotlib import pyplot as plt
# define constants
M = sym.Symbol("M")
m1 = sym.Symbol("m1")
m2 = sym.Symbol("m2")
l1 = sym.Symbol("l1")
l2 = sym.Symbol("l2")
g = sym.Symbol("g")
constant_values = [(g,9.81),(M,1000),(m1,100),(m2,100),(l1,20),(l2,10)]
# core state variables
t = sym.Symbol("t")
x = sym.Function("x")(t)
dx = x.diff(t)
t1 = sym.Function("theta1")(t)
dt1 = t1.diff(t)
t2 = sym.Function("theta2")(t)
dt2 = t2.diff(t)
states = [x,dx,t1,dt1,t2,dt2]
# other variables
ddx = dx.diff(t)
ddt1 = dt1.diff(t)
ddt2 = dt2.diff(t)
F = sym.Symbol("F")
# Energy Equations (basic inputs)
T = 0.5 * (M + m1 + m2)*dx**2 - m1*l1*dx*dt1*sym.cos(t1) + 0.5*m1*l1**2*dt1**2 - m2*l2*dx*dt2*sym.cos(t2) + 0.5*m2*l2**2*dt2**2
V = -m1*g*l1*sym.cos(t1) - m2*g*l2*sym.cos(t2)
# Simulation and Control Parameters
Times = np.arange(0,60,1e-3)
IC = np.array([0,1e-3,0,5e-5,0,1e-4])
# Q = np.diag([1,100,1,10,1,10])
# R = 0.001
Q = np.diag([1,0.5,1000,500,1000,500])
R = 1.0e-4
STEP = 0.1
if __name__ == "__main__":
###############################################################################################
######################################## Part A ###############################################
###############################################################################################
# compute Lagrangian
L = T - V
# compose equations of motion from Lagrangian
EOM = [
sym.diff(sym.diff(L,dx),t) - sym.diff(L,x) - F,
sym.diff(sym.diff(L,dt1),t) - sym.diff(L,t1),
sym.diff(sym.diff(L,dt2),t) - sym.diff(L,t2)
]
G = {k:v.simplify() for k,v in sym.solve(EOM, [ddx,ddt1,ddt2]).items()}
print("Equations of motion:")
sym.pprint(G)
###############################################################################################
######################################## Part B ###############################################
###############################################################################################
# construct Jacobian
J_orig = sym.Matrix([
[sym.diff(G[ddx],v) for v in states],
[sym.diff(G[ddt1],v) for v in states],
[sym.diff(G[ddt2],v) for v in states]
])
# substitute our origin conditions (x,dx,t1,dt1,t2,dt2) = 0
J = J_orig.subs([(v,0) for v in states]).doit()
print("Jacobian at origin:")
sym.pprint(J)
# Convert to our linearized state space representation
A = sym.Matrix([sym.zeros(1,6), J[0,:], sym.zeros(1,6), J[1,:], sym.zeros(1,6), J[2,:]])
A[0,1] = A[2,3] = A[4,5] = sym.Rational(1)
B = sym.Matrix([0,1/M,0,1/(l1*M),0,1/(l2*M)])
print("State Space Representation (A,B):")
sym.pprint(A)
sym.pprint(B)
###############################################################################################
######################################## Part C ###############################################
###############################################################################################
# Determine Controllability Conditions
Controllability = B
for i in range(1,6):
Controllability = Controllability.row_join(A**i * B)
print("Controllability matrix: ")
sym.pprint(Controllability)
# Controllability conditions:
print("System is controllable iff the following is satisfied: ")
print(Controllability.det())
###############################################################################################
######################################## Part D ###############################################
###############################################################################################
# check conditions in part D
Controllability_subs = Controllability.subs(constant_values)
# sanity check rank
assert(Controllability_subs.rank() == 6)
print("Part D: Rank {}".format(Controllability_subs.rank()))
sym.pprint(Controllability_subs)
# update A,B matrices with these values
A = np.array(A.subs(constant_values),dtype=np.float32)
B = np.array(B.subs(constant_values),dtype=np.float32)
# solve the Riccati equation:
P = solve_continuous_are(A, B, Q, R)
K = (B.T@P)/R
# plot the response to an initial offset from 0
# closed loop system
SYS_CL = scipy.signal.StateSpace(A-B*K,B,np.eye(6))
# simulate the nonlinear system
G_subs = {k:sym.lambdify(states,v.subs(constant_values).subs(F,0).evalf(),"numpy") for k,v in G.items()}
def ODE(time, y):
state = [
y[1],
G_subs[ddx](*y),
y[3],
G_subs[ddt1](*y),
y[5],
G_subs[ddt2](*y)
]
command = (B*K)@np.array(y)
return state - command
# integrate
T,Y,X = scipy.signal.lsim(SYS_CL, None, Times, IC)
# plot response
plt.figure("Closed Loop Response")
plt.plot(T,X[:,0], 'b')
plt.plot(T,X[:,2], 'r')
plt.plot(T,X[:,4], 'k')
nonlinear = scipy.integrate.solve_ivp(ODE, [Times[0],Times[-1]], IC)
assert(nonlinear.success)
# plot the results
plt.plot(nonlinear.t, nonlinear.y[0,:], '--b')
plt.plot(nonlinear.t, nonlinear.y[2,:], '--r')
plt.plot(nonlinear.t, nonlinear.y[4,:], '--k')
plt.grid(True)
plt.legend(["Linear X","Linear Theta1","Linear Theta2", "Nonlinear X", "Nonlinear Theta1", "Nonlinear Theta2"])
plt.show()
# check that eigenvalues of the linearized closed loop system are all in the LHP
eigenvalues = np.linalg.eig(A-B*K)[0]
assert(np.all(eigenvalues < 0))
print("The linearized closed loop system is locally asymptotically stable.")
###############################################################################################
######################################## Part E ###############################################
###############################################################################################
# examine the observability of various different C matrices
Potentials = [
sym.Matrix([
[1,0,0,0,0,0], # x
]),
sym.Matrix([
[0,0,1,0,0,0], # t1
[0,0,0,0,1,0] # t2
]),
sym.Matrix([
[1,0,0,0,0,0], # x
[0,0,0,0,1,0] # t2
]),
sym.Matrix([
[1,0,0,0,0,0], # x
[0,0,1,0,0,0], # t1
[0,0,0,0,1,0] # t2
])
]
Observables = []
for i,c in enumerate(Potentials):
# check determinant of full system
A_temp = sym.Matrix(A)
obs = sym.Matrix(c)
for j in range(1,6):
obs = obs.col_join(c*(A_temp)**j)
if obs.rank() == 6:
print("Potential C matrix #{} is observable. Rank: {}".format(i+1, obs.rank()))
Observables.append(sym.Matrix(c))
else:
print("Potential C matrix #{} is NOT observable. Rank: {}".format(i+1, obs.rank()))
###############################################################################################
######################################## Part F ###############################################
###############################################################################################
# construct desired poles (order of magnitude larger than controller eigenvalues)
observer_poles = np.array([np.complex(10*sym.re(eig), sym.im(eig)) for eig in eigenvalues])
# obtain observers for each output vector
for i,C in enumerate(Observables):
# initialize plots
fig = plt.figure("Closed Loop Observer Response C_{}".format(i+1))
# place poles of A-LC an order of magnitude farther left than the controller poles
C = np.array(C, dtype=np.float32)
L = scipy.signal.place_poles(A.T, C.T, observer_poles).gain_matrix.T
print("Found observer for potential C matrix #{}".format(i+1))
# simulate the linear response
Ao = np.block([[A-B@K, B@K],[np.zeros(A.shape),A-L@C]])
Bo = np.block([[B],[np.zeros(B.shape)]])
Co = np.block([[C, | np.zeros(C.shape) | numpy.zeros |
import os, os.path as osp
import json
import shutil
import numpy as np
from deeply.metrics import tversky_index
from deeply.model.transfer.backbone import BackBone
import tensorflow as tf
from tensorflow.data import Dataset
from tensorflow.keras.backend import int_shape
from tensorflow.keras.layers import (
Layer,
Conv2D,
MaxPooling2D,
Activation,
Cropping2D,
Conv2DTranspose,
Dropout,
Concatenate,
Add,
Multiply,
BatchNormalization
)
from tensorflow.keras.callbacks import ModelCheckpoint
import imgaug.augmenters as iaa
from deeply.util.model import get_checkpoint_prefix, get_input
from deeply.model.base import BaseModel
from deeply.model.layer import ActivationBatchNormDropout
from deeply.generators import BaseDataGenerator
from deeply.callbacks import GeneralizedEarlyStopping, PlotHistoryCallback
from deeply.metrics import jaccard_index, dice_coefficient, tversky_index
from bpyutils.util.array import sequencify
from bpyutils.util.system import make_archive, make_temp_dir
from bpyutils.util.datetime import get_timestamp_str
# verify with paper...
def kernel_initializer(shape, dtype = None):
n = np.prod(shape[:3])
stddev = np.sqrt(2 / n)
return tf.random.normal(shape, stddev = stddev, dtype = dtype)
class ConvBlock(Layer):
def __init__(self, filters, kernel_size = 3, activation = "relu", width = 2, batch_norm = True,
dropout_rate = 0.2, kernel_initializer = kernel_initializer, padding = "valid", *args, **kwargs):
self._super = super(ConvBlock, self)
self._super.__init__(*args, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.batch_norm = batch_norm
self.dropout_rate = dropout_rate
self.padding = padding
self.kernel_initializer = kernel_initializer
self.convs = [ ]
self.batch_norms = [ ]
self.activations = [ ]
self.dropouts = [ ]
for _ in range(width):
conv = Conv2D(filters = filters, kernel_size = kernel_size,
kernel_initializer = kernel_initializer, padding = padding)
self.convs.append(conv)
activation = ActivationBatchNormDropout(activation = activation,
batch_norm = batch_norm, dropout_rate = dropout_rate)
self.activations.append(activation)
self.width = width
# return x
def call(self, inputs, training = False):
x = inputs
for i in range(self.width):
x = self.convs[i](x, training = training)
x = self.activations[i](x, training = training)
return x
def get_config(self):
return {
"filters": self.filters,
"kernel_size": self.kernel_size,
"activation": self.activation,
"width": self.width,
"batch_norm": self.batch_norm,
"dropout_rate": self.dropout_rate,
"padding": self.padding,
"kernel_initializer": self.kernel_initializer
}
@classmethod
def from_config(cls, config):
return cls(**config)
def get_crop_length(a, b):
c = a - b
assert (c >= 0)
if c % 2 != 0:
c1, c2 = int(c/2), int(c/2) + 1
else:
c1, c2 = int(c/2), int(c/2)
return (c1, c2)
def get_crop_shape(a, b):
a_shape = int_shape(a)
b_shape = int_shape(b)
cw1, cw2 = get_crop_length(a_shape[2], b_shape[2])
ch1, ch2 = get_crop_length(a_shape[1], b_shape[1])
return (ch1, ch2), (cw1, cw2)
def copy_crop_concat_block(x, skip_layer, **kwargs):
ch, cw = get_crop_shape(skip_layer, x)
skip_layer_cropped = Cropping2D(cropping = (ch, cw))(skip_layer)
x = Concatenate()([x, skip_layer_cropped])
return x
class UNetModel(BaseModel):
def __init__(self, *args, **kwargs):
self._super = super(UNetModel, self)
self._super.__init__(*args, **kwargs)
def compile(self, *args, **kwargs):
kwargs["optimizer"] = kwargs.get("optimizer", "sgd")
kwargs["loss"] = kwargs.get("loss", "categorical_crossentropy")
metrics = sequencify(kwargs.get("metrics", []))
if kwargs["loss"] == "categorical_crossentropy" and not metrics:
metrics.append("categorical_accuracy")
metrics.append(dice_coefficient)
metrics.append(jaccard_index)
metrics.append(tversky_index)
kwargs["metrics"] = metrics
return self._super.compile(*args, **kwargs)
def UNet(
x = None,
y = None,
channels = 1,
n_classes = 2,
layer_depth = 4,
n_conv = 2,
kernel_size = 3,
init_filters = 64,
filter_growth_rate = 2,
activation = "relu",
padding = "valid",
batch_norm = True, # recommendation, don't use batch norm and dropout at the same time.
dropout_rate = 0,
pool_size = 2,
mp_strides = 2,
up_conv_size = 2,
final_conv_size = 1,
final_activation = "softmax",
kernel_initializer = kernel_initializer,
name = "unet",
attention_gate = None,
backbone = None,
backbone_weights = "imagenet",
freeze_backbone = False,
weights = None,
):
"""
Constructs a U-Net.
:param x: Input image width.
:param y: Input image height.
:param channels: Number of channels for input image.
:param layer_depth: Depth of the U-Net.
:param n_conv: Number of convolutions in each layer.
:param kernel_size: Size of kernel in a convolution.
:param init_filters: Number of filters in initial convolution.
:param filter_growth_rate: Growth rate of filter over convolutions.
:param activation: Activation function after each convolution.
:param batch_norm: Batch Normalization after each convolution.
:param dropout_rate: Dropout rate after each convolution.
:param pool_size: Size of max pooling layer.
:param mp_strides: Size of strides of max pooling layer.
:param up_conv_size: Size of upsampling layer.
:param final_activation: Activation function on final layer.
:param final_conv_size: Kernel size of final convolution.
:param kernel_initializer: Weight initializer for each convolution block.
:param attention_gate: Use a custom attention gate.
:param
References
[1]. Ronneberger, Olaf, et al. “U-Net: Convolutional Networks for Biomedical Image Segmentation.” ArXiv:1505.04597 [Cs], May 2015. arXiv.org, http://arxiv.org/abs/1505.04597.
>>> from deeply.model.unet import UNet
>>> model = UNet()
"""
input_shape = (x, y, channels)
input_ = get_input(*input_shape)
filters = init_filters
conv_block_args = dict(kernel_size = kernel_size,
activation = activation, dropout_rate = dropout_rate, width = n_conv,
kernel_initializer = kernel_initializer, padding = padding, batch_norm = batch_norm)
contracting_layers = [ ]
if backbone:
backbone = BackBone(backbone, input_tensor = input_, input_shape = input_shape, weights = backbone_weights)
input_ = backbone._model.input
m = backbone._model.output
for feature_layer in backbone.get_feature_layers():
contracting_layers.append(feature_layer.output)
filters = filters * filter_growth_rate
else:
m = input_
# contracting path
for _ in range(layer_depth):
m = ConvBlock(filters = filters, **conv_block_args)(m)
contracting_layers.append(m)
m = MaxPooling2D(pool_size = pool_size, strides = mp_strides)(m)
filters = filters * filter_growth_rate
m = ConvBlock(filters = filters, **conv_block_args)(m)
# expanding path
for skip_layer in reversed(contracting_layers):
filters = filters // filter_growth_rate
m = Conv2DTranspose(filters = filters, kernel_size = up_conv_size,
strides = pool_size, padding = padding,
kernel_initializer = kernel_initializer)(m)
m = Activation(activation = activation)(m)
if attention_gate:
skip_layer = attention_gate(skip_layer, m)
m = copy_crop_concat_block(m, skip_layer)
m = ConvBlock(filters = filters, **conv_block_args)(m)
m = Conv2D(filters = n_classes, kernel_size = final_conv_size, padding = padding,
kernel_initializer = kernel_initializer)(m)
output_layer = Activation(activation = final_activation, name = "outputs")(m)
model = UNetModel(inputs = [input_], outputs = [output_layer], name = name)
if weights:
model.load_weights(weights)
return model
class AttentionGate(Layer):
def __init__(self, batch_norm = True, dropout_rate = 0, *args, **kwargs):
self.super = super(AttentionGate, self)
self.super.__init__(*args, **kwargs)
self.batch_norm = batch_norm
self.droput_rate = dropout_rate
def call(self, input_, gating_signal, training = False):
t = Conv2D(filters = 1, kernel_size = (1, 1))(input_)
g = Conv2D(filters = 1, kernel_size = (1, 1))(gating_signal)
x = Add()([t, g])
if training and self.batch_norm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
if training and self.droput_rate:
x = Dropout(rate = self.droput_rate)(x)
x = Conv2D(filters = 1, kernel_size = (1, 1))(x)
x = Activation("sigmoid")(x)
x = Multiply()([input_, x])
return x
def AttentionUNet(*args, **kwargs):
"""
Constructs an Attention U-Net.
References
[1]. Oktay, Ozan, et al. “Attention U-Net: Learning Where to Look for the Pancreas.” ArXiv:1804.03999 [Cs], May 2018. arXiv.org, http://arxiv.org/abs/1804.03999.
>>> from deeply.model.unet import AttentionUNet
>>> model = AttentionUNet()
"""
batch_norm = kwargs.get("batch_norm", True)
dropout_rate = kwargs.get("dropout_rate", 0)
_attention_gate = kwargs.pop("attention_gate", AttentionGate(
batch_norm = batch_norm, dropout_rate = dropout_rate))
unet = UNet(
name = "attention-unet",
attention_gate = _attention_gate,
**kwargs
)
return unet
def UnetPP(*args, **kwargs):
"""
Constructs a U-Net++.
>>> from deeply.model.unet import UnetPP
>>> model = UnetPP()
"""
layer_depth = kwargs.get("layer_depth", 4)
unets = [ ]
for i in range(layer_depth):
unet = UNet(
name = "unet++-%s" % i,
layer_depth = i,
**kwargs
)
unets.append(unet)
unetpp = None
return unetpp
def UNet3D(*args, **kwargs):
"""
Constructs a 3D U-Net.
>>> from deeply.model.unet import UNet3D
>>> model = UNet3D()
"""
unet = UNet(
name = "unet-3d",
**kwargs
)
return unet
def _center_crop(arr, shape):
arr_shape = arr.shape
diff_x = (arr_shape[0] - shape[0])
diff_y = (arr_shape[1] - shape[1])
assert diff_x >= 0
assert diff_y >= 0
if diff_x == 0 and diff_y == 0:
return arr
off_lx = diff_x // 2
off_ly = diff_y // 2
off_rx = diff_x - off_lx
off_ry = diff_y - off_ly
cropped = arr[ off_lx : -off_ly, off_rx : -off_ry ]
return cropped
# augmentor = iaa.Sequential([
# iaa.CenterCropToFixedSize(
# width = shape[0],
# height = shape[1]
# )
# ])
# from_, to = (0, 1, 2), (1, 0, 2)
# arr = arr.numpy()
# arr = np.moveaxis(arr, from_, to)
# aug = augmentor(images = [arr])
# aug = squash(aug)
# aug = np.moveaxis(aug, to, from_)
# return aug
def _crop(shape):
def crop(x, y):
dtype = y.dtype
label = tf.py_function(_center_crop, [y, shape], dtype)
return x, label
return crop
def _format_dataset(ds, mapper = None, target_shape = None, batch_size = 1, **kwargs):
if isinstance(ds, Dataset):
if mapper:
ds = ds.map(mapper)
if target_shape:
ds = ds.map(_crop(target_shape))
ds = ds.batch(batch_size)
return ds
class Trainer:
def __init__(self, artifacts_path = None):
self.artifacts_path = osp.abspath(artifacts_path or get_timestamp_str('%Y%m%d%H%M%S'))
def fit(self, model, train, val = None, batch_size = 32, early_stopping = True, monitor = "loss", **kwargs):
target_shape = model.output_shape[1:]
mapper = kwargs.pop("mapper", None)
format_args = dict(target_shape = target_shape, mapper = mapper,
batch_size = batch_size)
train = _format_dataset(train, **format_args)
if val:
val = _format_dataset(val, **format_args)
if isinstance(train, BaseDataGenerator):
kwargs["steps_per_epoch"] = train.n_samples // batch_size
if isinstance(val, BaseDataGenerator):
kwargs["validation_steps"] = val.n_samples // batch_size
callbacks = sequencify(kwargs.get("callbacks", []))
prefix = get_checkpoint_prefix(model)
if val:
monitor = "val_%s" % monitor
history = None
with make_temp_dir() as tmp_dir:
filepath = osp.join(tmp_dir, "%s.hdf5" % prefix)
checkpoint = ModelCheckpoint(
filepath = filepath,
monitor = monitor,
save_best_only = True,
save_weights_only = True
)
callbacks.append(checkpoint)
plothistory = PlotHistoryCallback(fpath = osp.join(tmp_dir, "history.png"))
callbacks.append(plothistory)
# if early_stopping:
# gen_early_stop = GeneralizedEarlyStopping(baseline = 0.05)
# callbacks.append(gen_early_stop)
kwargs["callbacks"] = callbacks
history = model.fit(train, validation_data = val, **kwargs)
filepath = osp.join(tmp_dir, "%s.json" % prefix)
with open(filepath, mode = "w") as f:
json.dump(history.history, f)
make_archive(self.artifacts_path, "zip", tmp_dir)
return history
def _generate_samples(x = 200, y = None, channels = 1, n_samples = 100,
r_min_f = 1, r_max_f = 10, seg_min_f = 1, seg_max_f = 5):
if not y:
y = x
features, labels = np.empty((n_samples, y, x, channels)), \
np.empty((n_samples, y, x))
size = min(x, y)
compute_factor = lambda f: (f / 100) * size
min_radius = compute_factor(r_min_f)
max_radius = compute_factor(r_max_f)
min_segs = compute_factor(seg_min_f)
max_segs = compute_factor(seg_max_f)
for i in range(n_samples):
feature = np.ones((y, x, channels))
mask = np.zeros((y, x), dtype = np.bool)
n_segs = np.random.randint(min_segs, max_segs)
for _ in range(n_segs):
from_ = np.random.randint(0, x)
to = np.random.randint(0, y)
radius = np.random.randint(min_radius, max_radius)
cx, cy = np.ogrid[-to:y-to, -from_:x-from_]
circle = cx*cx + cy*cy <= radius*radius
color = np.random.randint(1, 255)
mask = | np.logical_or(mask, circle) | numpy.logical_or |
import pandas as pd
import numpy as np
import csv
import urllib.request
import json
from datetime import datetime
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
import web_scrapers
import os
def load_real_estate_data(filename, state_attr, state):
df = pd.read_csv(filename, encoding="ISO-8859-1")
df = df.loc[df[state_attr] == state]
return df
def load_data(filenames):
df_list=[]
for i in range(0, len(filenames)):
df = pd.read_csv(filenames[i], encoding="ISO-8859-1")
df_list.append(df)
return df_list
def create_zipcode_list(filenames):
zipcodes = {} # structured with within 5, 10 miles from another zipcode
zip_list = []
for i in range(0, len(filenames)):
with open(filenames[i], 'r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
your_list = list(reader)
for z in range(0, len(your_list)):
zipcodes[your_list[z][0]] = [], []
zip_list.append(your_list[z][0])
return zipcodes, zip_list
def wrangle_real_estate_data(df, zip_list, drop_columns):
df = df[df['RegionName'].isin(zip_list)]
df = df.drop(drop_columns, axis=1)
return df
def wrangle_IPO_data(df, zip_list):
df['Date Filed'] = pd.to_datetime(df['Date Filed'], format='%Y-%m-%d')
df['Lockup Expiration Date'] = pd.to_datetime(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y')
df = df[df['Zipcode'].isin(zip_list)]
df = df.drop(['Lockup Expiration Date'], axis=1)
df['Lockup Expiration Date'] = df['Date Filed'] + timedelta(days=180)
df = df[df['Date Filed']> df['Date Filed'].min()+ timedelta(days=366)]
return df
def wrangle_census_data(df_census_econ, df_census_dem, zip_list, census_econ_columns, census_dem_columns):
df_census_econ.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_econ.rename(
columns={'Percent; EMPLOYMENT STATUS - Civilian labor force - Unemployment Rate': 'Unemployment Rate'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Less than $10,000': 'l10000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $10,000 to $14,999': 'l15000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $15,000 to $24,999': 'l25000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; COMMUTING TO WORK - Mean travel time to work (minutes)': 'Mean Travel Time to Work Estimate (minutes)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $200,000 or more': 'Percent of Households with Income Greater than $200,000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Median household income (dollars)': 'Median Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Mean household income (dollars)': 'Mean Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Per capita income (dollars)': 'Per Capita Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; HEALTH INSURANCE COVERAGE - Civilian noninstitutionalized population - No health insurance coverage': 'Percent of Population with no Health Insurance Coverage'},
inplace=True)
df_census_econ.rename(columns={
'Percent; PERCENTAGE OF FAMILIES AND PEOPLE WHOSE INCOME IN THE PAST 12 MONTHS IS BELOW THE POVERTY LEVEL - All people': 'Percent of People whose Income in the Past 12 months has been Below Poverty Level'},
inplace=True)
df_census_econ['l10000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l10000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l10000'] = df_census_econ['l10000'].astype(float)
df_census_econ['l15000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l15000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l15000'] = df_census_econ['l15000'].astype(float)
df_census_econ['l25000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l25000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l25000'] = df_census_econ['l25000'].astype(float)
df_census_econ["Percent of Households With Income Less Than $24,999"] = df_census_econ['l10000'] + df_census_econ[
'l15000'] + df_census_econ['l25000']
df_census_econ = df_census_econ.filter(census_econ_columns)
df_census_dem.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_dem.rename(columns={'Estimate; SEX AND AGE - Median age (years)': 'Median Age'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - Under 18 years': 'Percent of People under 18 years of age'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 65 years and over': 'Percent of People 65 years and over'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Male': 'Percent of Males'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Female': 'Percent of Females'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race)': 'Percent of People who are Hispanic'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - White alone': 'Percent of People who are White'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Black or African American alone': 'Percent of People who are Black or African American'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Asian alone': 'Percent of People who are Asian'},
inplace=True)
df_census_dem = df_census_dem.filter(census_dem_columns)
# filter data to only Silicon Valley + San Francisco Zip Codes
df_census_dem = df_census_dem[df_census_dem['Zipcode'].isin(zip_list)]
df_census_econ = df_census_econ[df_census_econ['Zipcode'].isin(zip_list)]
return df_census_econ, df_census_dem
def wrangle_real_estate_headers(df):
'''
run before joining dataframes so keys match
df_sale_counts_by_zip_silicon_valley.columns = df_sale_counts_by_zip_silicon_valley.columns.str.replace('Sales Counts ', '')
df_sale_counts_by_zip_silicon_valley = df_sale_counts_by_zip_silicon_valley.add_prefix('Sales Counts ')
df_sale_counts_by_zip_silicon_valley.rename(columns = {'Sales Counts RegionName':'Zipcode'}, inplace=True)
'''
df.columns = df.columns.str.replace('All Homes ', '')
df = df.add_prefix('All Homes ')
df.rename(columns={'All Homes RegionName': 'Zipcode'}, inplace=True)
return df
def wrangle_ipo_headers(df):
df.rename(columns={'Ticker': 'Symbol'}, inplace=True)
df["Found"] = df["Found"].astype(dtype=np.int64)
return df
def join_data(df1, df2, key, join_type):
df = df1.set_index(key).merge(df2, on=key, how=join_type)
return df
def merge_data(df1, df2, key):
df = pd.merge(df1, df2, on=key, how='inner')
return df
def df_replace(df, replace_list):
for i in range(0, len(replace_list)):
df = df.replace([replace_list[i]], [''], regex=True)
return df
def drop_columns_and_nans(df, drop_columns, nan_columns):
df = df.drop(['IPO Name', 'Offer date', 'CUSIP', 'PERM'], axis=1)
for i in range(0, len(nan_columns)):
df.drop_duplicates(subset=nan_columns[i], keep='first', inplace=True)
return df
def calculate_distance_between_zips(zipcode, min_radius, max_radius):
# api-endpoint
URL_base = "https://api.zip-codes.com/ZipCodesAPI.svc/1.0/FindZipCodesInRadius?zipcode="
URL = URL_base + zipcode + '&minimumradius=' + min_radius + '&maximumradius=' + max_radius + '&key=<KEY>'
# sending get request and saving the response as response object
contents = urllib.request.urlopen(URL).read()
# printing the output
zipcodes_nearby = []
print(json.loads(contents))
for i in range(1, len(json.loads(contents)['DataList'])):
zipcodes_nearby.append(json.loads(contents)['DataList'][i]['Code'])
return zipcodes_nearby
def create_zipcode_distances_dictionary(zipcodes, zip_list):
'''
***DONT RUN IF THESE ARE ALREADY CREATED***
currently stored as data/processed/zipcodes_within_radius.txt
'''
print(len(zip_list))
for i in range(0, len(zip_list)):
zipcodes[zip_list[i]] = calculate_distance_between_zips(zip_list[i], '0', '5'), calculate_distance_between_zips(
zip_list[i], '5', '10')
return zipcodes
def create_text_file_from_dictionary(filename, dictionary):
'''
with open('data/processed/zipcodes_within_radius.txt', 'w') as json_file:
json.dump(zipcodes, json_file)
'''
with open(filename, 'w') as json_file:
json.dump(dictionary, json_file)
return dictionary
def export_dataframe_to_dictionary(df, name):
filename = 'data/processed/' + name + '.csv'
export_csv = df.to_csv(filename, index=True, header=True) # Don't forget to add '.csv' at the end of the path
def update_zipcodes_dict(zipcodes, zip_list):
exists = os.path.isfile('../data/processed/zipcodes_within_radius.txt')
if not exists:
zipcodes = create_zipcode_distances_dictionary(zipcodes, zip_list)
create_text_file_from_dictionary('../data/processed/zipcodes_within_radius.txt', zipcodes)
else:
zipcodes = {}
with open('../data/processed/zipcodes_within_radius.txt', 'r') as f:
zipcodes = json.load(f)
return zipcodes
def create_IPO_an_Zipcode_dataframe(census_econ_cols, census_dem_cols, df_ipo, df_zip, zipcodes):
if 'Zipcode' in census_econ_cols:
census_econ_cols.remove('Zipcode')
if 'Zipcode' in census_dem_cols:
census_dem_cols.remove('Zipcode')
ipo_header_list = list(df_ipo.columns.values) +census_dem_cols+census_econ_cols + ['All Homes Date Filed',
'All Homes Lockup Expiration Date',
'All Homes 1 Year Before Date Filed',
'All Homes 2 Years After Date Filed']
'''
Distance from IPO = estimate is .2 if in the same zipcode as IPO
= estimate is 0.5 if not in same zip code as IPO and less than 5 miles from zipcode to IPO
= estimate is 1 if greater than 5 and less than 10 miles from zipcode to IPO
'''
new_df_list = []
for index, row in df_ipo.iterrows():
ipo_zipcode = str(row['Zipcode'])
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(ipo_zipcode)]
headerList = join_IPO_and_Zip_Data(row['Date Filed'], row['Lockup Expiration Date'], census_econ_cols,census_dem_cols)
data = np.concatenate(( | np.array(row.values) | numpy.array |
import sandz.ndft as ndft
import sandz.util as util
import sandz.arma as arma
import numpy as np
import numpy.testing as npt
import sys
def test_00():
np.random.seed(0) # fix randomness
ts = np.array([0,.2,.4,.5])
data = np.array([2,-2,-1j,1j])
ans_acf = np.array([1,-.4,-.8-.4j,.8j])
npt.assert_allclose(([0, .1, .2, .3], ans_acf), util.acf_with_lags(times=ts, data=data, ndft_fun=ndft.exe_ndft, max_lag=.4, max_freq=10, os=100))
if 'nfft' in sys.modules: # python version
npt.assert_allclose(ans_acf, ndft.calc_acf(times=ts, data=data, max_lag=.4, ndft_fun=ndft.exe_nfft_py, max_freq=10))
if 'pynfft' in sys.modules: # c version
npt.assert_allclose(ans_acf, ndft.calc_acf(times=ts, data=data, max_lag=.4, ndft_fun=ndft.exe_nfft_c, max_freq=10))
npt.assert_allclose([1,.25,.5,.25], ndft.lag_strength(times=10*ts, max_lag=4, ndft_fun=ndft.exe_ndft))
data_padded = np.array([2,0,-2,0,-1j,1j])
ans_psd_embed = np.abs(np.fft.fft(data_padded))**2/4
ans_spec_win = np.array([1,1/16,1/16,.25,1/16,1/16])
mask = | np.array([True, False, True, False, True, True]) | numpy.array |
""" Tests of the command-line interface
:Author: <NAME> <<EMAIL>>
:Date: 2021-07-07
:Copyright: 2020, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from biosimulators_ginsim import __main__
from biosimulators_ginsim import core
from biosimulators_ginsim.data_model import KISAO_ALGORITHM_MAP
from biosimulators_ginsim.utils import read_model
from biosimulators_utils.combine import data_model as combine_data_model
from biosimulators_utils.combine.io import CombineArchiveWriter
from biosimulators_utils.config import get_config
from biosimulators_utils.report import data_model as report_data_model
from biosimulators_utils.report.io import ReportReader
from biosimulators_utils.simulator.exec import exec_sedml_docs_in_archive_with_containerized_simulator
from biosimulators_utils.simulator.specs import gen_algorithms_from_specs
from biosimulators_utils.sedml import data_model as sedml_data_model
from biosimulators_utils.sedml.io import SedmlSimulationWriter
from biosimulators_utils.sedml.utils import append_all_nested_children_to_doc
from biosimulators_utils.warnings import BioSimulatorsWarning
from kisao.exceptions import AlgorithmCannotBeSubstitutedException
from unittest import mock
import datetime
import dateutil.tz
import json
import numpy
import numpy.testing
import os
import shutil
import tempfile
import unittest
import yaml
class CliTestCase(unittest.TestCase):
EXAMPLE_SBML_MODEL_FILENAME = os.path.join(os.path.dirname(__file__), 'fixtures', 'example-model.xml')
EXAMPLE_ZGINML_MODEL_FILENAME = os.path.join(os.path.dirname(__file__), 'fixtures', 'SuppMat_Model_Master_Model.zginml')
SPECIFICATIONS_FILENAME = os.path.join(os.path.dirname(__file__), '..', 'biosimulators.json')
DOCKER_IMAGE = 'ghcr.io/biosimulators/biosimulators_ginsim/ginsim:latest'
NAMESPACES = {
None: 'http://sed-ml.org/sed-ml/level1/version3',
'sbml': 'http://www.sbml.org/sbml/level3/version1/core',
'qual': 'http://www.sbml.org/sbml/level3/version1/qual/version1',
}
def setUp(self):
self.dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dirname)
def test_exec_sbml_sed_task_successfully(self):
task = sedml_data_model.Task(
model=sedml_data_model.Model(
source=self.EXAMPLE_SBML_MODEL_FILENAME,
language=sedml_data_model.ModelLanguage.SBML.value,
),
simulation=sedml_data_model.UniformTimeCourseSimulation(
initial_time=0,
output_start_time=0,
output_end_time=10,
number_of_points=10,
algorithm=sedml_data_model.Algorithm(
kisao_id='KISAO_0000449',
),
),
)
variables = [
sedml_data_model.Variable(
id='Time',
symbol=sedml_data_model.Symbol.time,
task=task),
sedml_data_model.Variable(
id='G0',
target="/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='G0']/@level",
target_namespaces=self.NAMESPACES,
task=task),
sedml_data_model.Variable(
id='G1',
target="/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='G1']/@level",
target_namespaces=self.NAMESPACES,
task=task),
]
# synchronous method
task.simulation.algorithm.kisao_id = 'KISAO_0000449'
variable_results, log = core.exec_sed_task(task, variables)
self.assertEqual(set(variable_results.keys()), set(['Time', 'G0', 'G1']))
for variable_result in variable_results.values():
self.assertFalse(numpy.any(numpy.isnan(variable_result)))
numpy.testing.assert_allclose(variable_results['Time'], numpy.linspace(0, 10, 10 + 1))
# asynchronous method
task.simulation.algorithm.kisao_id = 'KISAO_0000450'
task.simulation.algorithm.changes.append(sedml_data_model.AlgorithmParameterChange(
kisao_id='KISAO_0000574',
))
variable_results, log = core.exec_sed_task(task, variables)
self.assertEqual(set(variable_results.keys()), set(['Time', 'G0', 'G1']))
for variable_result in variable_results.values():
self.assertFalse(numpy.any( | numpy.isnan(variable_result) | numpy.isnan |
import h5py
import pickle
import numpy as np
def load_weights():
fff = h5py.File('Mybase/mask_rcnn_coco.h5','r') #打开h5文件
#print(list(f.keys()))
mydict = {}
mydict['global_step:0'] = 1000
########res1########
dset = fff['conv1']
a = dset['conv1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn_conv1']
a = dset['bn_conv1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
########res2########
dset = fff['res2a_branch1']
a = dset['res2a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch1']
a = dset['bn2a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2a']
a = dset['res2a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2a']
a = dset['bn2a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2b']
a = dset['res2a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2b']
a = dset['bn2a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2c']
a = dset['res2a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2c']
a = dset['bn2a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res2b_branch2a']
a = dset['res2b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2a']
a = dset['bn2b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2b']
a = dset['res2b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2b']
a = dset['bn2b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2c']
a = dset['res2b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2c']
a = dset['bn2b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res2c_branch2a']
a = dset['res2c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2a']
a = dset['bn2c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2b']
a = dset['res2c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2b']
a = dset['bn2c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2c']
a = dset['res2c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2c']
a = dset['bn2c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res3########
dset = fff['res3a_branch1']
a = dset['res3a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch1']
a = dset['bn3a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2a']
a = dset['res3a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2a']
a = dset['bn3a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2b']
a = dset['res3a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2b']
a = dset['bn3a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2c']
a = dset['res3a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2c']
a = dset['bn3a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res3b_branch2a']
a = dset['res3b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2a']
a = dset['bn3b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2b']
a = dset['res3b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2b']
a = dset['bn3b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2c']
a = dset['res3b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2c']
a = dset['bn3b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3c_branch2a']
a = dset['res3c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2a']
a = dset['bn3c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2b']
a = dset['res3c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2b']
a = dset['bn3c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2c']
a = dset['res3c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2c']
a = dset['bn3c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3d_branch2a']
a = dset['res3d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2a']
a = dset['bn3d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2b']
a = dset['res3d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2b']
a = dset['bn3d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2c']
a = dset['res3d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2c']
a = dset['bn3d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res4########
dset = fff['res4a_branch1']
a = dset['res4a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch1']
a = dset['bn4a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2a']
a = dset['res4a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2a']
a = dset['bn4a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2b']
a = dset['res4a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2b']
a = dset['bn4a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2c']
a = dset['res4a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2c']
a = dset['bn4a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res4b_branch2a']
a = dset['res4b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2a']
a = dset['bn4b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2b']
a = dset['res4b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2b']
a = dset['bn4b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2c']
a = dset['res4b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2c']
a = dset['bn4b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4c_branch2a']
a = dset['res4c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2a']
a = dset['bn4c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2b']
a = dset['res4c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2b']
a = dset['bn4c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2c']
a = dset['res4c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2c']
a = dset['bn4c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4d_branch2a']
a = dset['res4d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2a']
a = dset['bn4d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2b']
a = dset['res4d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2b']
a = dset['bn4d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2c']
a = dset['res4d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2c']
a = dset['bn4d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4e_branch2a']
a = dset['res4e_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2a']
a = dset['bn4e_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2b']
a = dset['res4e_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2b']
a = dset['bn4e_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2c']
a = dset['res4e_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2c']
a = dset['bn4e_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4f_branch2a']
a = dset['res4f_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2a']
a = dset['bn4f_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2b']
a = dset['res4f_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2b']
a = dset['bn4f_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2c']
a = dset['res4f_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2c']
a = dset['bn4f_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4g_branch2a']
a = dset['res4g_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2a']
a = dset['bn4g_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2b']
a = dset['res4g_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2b']
a = dset['bn4g_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2c']
a = dset['res4g_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2c']
a = dset['bn4g_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4h_branch2a']
a = dset['res4h_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2a']
a = dset['bn4h_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2b']
a = dset['res4h_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2b']
a = dset['bn4h_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2c']
a = dset['res4h_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2c']
a = dset['bn4h_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4i_branch2a']
a = dset['res4i_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2a']
a = dset['bn4i_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2b']
a = dset['res4i_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2b']
a = dset['bn4i_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2c']
a = dset['res4i_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2c']
a = dset['bn4i_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4j_branch2a']
a = dset['res4j_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2a']
a = dset['bn4j_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2b']
a = dset['res4j_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2b']
a = dset['bn4j_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2c']
a = dset['res4j_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2c']
a = dset['bn4j_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4k_branch2a']
a = dset['res4k_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2a']
a = dset['bn4k_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2b']
a = dset['res4k_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2b']
a = dset['bn4k_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2c']
a = dset['res4k_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2c']
a = dset['bn4k_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4l_branch2a']
a = dset['res4l_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2a']
a = dset['bn4l_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2b']
a = dset['res4l_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2b']
a = dset['bn4l_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2c']
a = dset['res4l_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2c']
a = dset['bn4l_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4m_branch2a']
a = dset['res4m_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2a']
a = dset['bn4m_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2b']
a = dset['res4m_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2b']
a = dset['bn4m_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2c']
a = dset['res4m_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2c']
a = dset['bn4m_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = | np.array(a['moving_variance:0'], dtype=np.float32) | numpy.array |
import numpy as np
import random
import pandas as pd
from matplotlib import pyplot as plt
'''
Perceptron algorithm that finds the idea weights given a data set and labels
Parameters
----------
coords: data points in 2 dimensions ([x1, x2])
labels: binary target output for each data point (-1 or 1)
epochs: number of iterations that needs to be run
misclassifiedList
Returns
-------
w: resulting weight vector after learning the data points
'''
def perceptron_algorithm(coords, labels, epochs, misclassifiedList):
w = np.zeros((coords.shape[1]) + 1)
for i in range(epochs):
misclassified = 0
for x, y in zip(coords, labels):
dotProd = np.dot(x, w[1:]) + w[0]
target = 1.0 if (dotProd > 0.0) else -1.0
if(target != y):
print(y, target, "classified incorrectly")
misclassified += 1
w[1:] += y * x
w[0] += x[0]
else:
print(y, target, "classified correctly")
print("PRINTING W", w)
misclassifiedList.append(misclassified)
return w
if __name__ == "__main__":
# Set up the random generator seed
np.random.seed(121232141)
# Total number of random points
total = 1000
# Create 20 random points that are linearly separable
A = 2 * np.random.random_sample((total//2, 2)) + 0
B = 2 * | np.random.random_sample((total//2, 2)) | numpy.random.random_sample |
import glob, math, cv2
import numpy as np
from scipy import misc
from scipy import linalg
EPS = 1e-6
XMIN = -32 # right (neg is left)
XMAX = 32.0 # right
YMIN = -16.0 # down (neg is up)
YMAX = 16.0 # down
ZMIN = -32 # forward
ZMAX = 32 # forward
def print_stats(name, tensor):
print('%s min = %.2f, mean = %.2f, max = %.2f' % (name, np.min(tensor), np.mean(tensor), np.max(tensor)))
def reduce_masked_mean(x, mask, axis=None, keepdims=False):
# x and mask are the same shape
# returns shape-1
# axis can be a list of axes
prod = x*mask
numer = np.sum(prod, axis=axis, keepdims=keepdims)
denom = EPS+np.sum(mask, axis=axis, keepdims=keepdims)
mean = numer/denom
return mean
def reduce_masked_sum(x, mask, axis=None, keepdims=False):
# x and mask are the same shape
# returns shape-1
# axis can be a list of axes
prod = x*mask
numer = np.sum(prod, axis=axis, keepdims=keepdims)
return numer
def get_nFiles(path):
return len(glob.glob(path))
def get_file_list(path):
return glob.glob(path)
def rotm2eul(R):
# R is 3x3
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
if sy > 1e-6: # singular
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else:
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return x, y, z
def rad2deg(rad):
return rad*180.0/np.pi
def deg2rad(deg):
return deg/180.0*np.pi
def eul2rotm(rx, ry, rz):
# copy of matlab, but order of inputs is different
# R = [ cy*cz sy*sx*cz-sz*cx sy*cx*cz+sz*sx
# cy*sz sy*sx*sz+cz*cx sy*cx*sz-cz*sx
# -sy cy*sx cy*cx]
sinz = np.sin(rz)
siny = np.sin(ry)
sinx = np.sin(rx)
cosz = np.cos(rz)
cosy = np.cos(ry)
cosx = np.cos(rx)
r11 = cosy*cosz
r12 = sinx*siny*cosz - cosx*sinz
r13 = cosx*siny*cosz + sinx*sinz
r21 = cosy*sinz
r22 = sinx*siny*sinz + cosx*cosz
r23 = cosx*siny*sinz - sinx*cosz
r31 = -siny
r32 = sinx*cosy
r33 = cosx*cosy
r1 = np.stack([r11,r12,r13],axis=-1)
r2 = np.stack([r21,r22,r23],axis=-1)
r3 = np.stack([r31,r32,r33],axis=-1)
r = np.stack([r1,r2,r3],axis=-2)
return r
def wrap2pi(rad_angle):
# puts the angle into the range [-pi, pi]
return np.arctan2(np.sin(rad_angle), np.cos(rad_angle))
def rot2view(rx,ry,rz,x,y,z):
# takes rot angles and 3d position as input
# returns viewpoint angles as output
# (all in radians)
# it will perform strangely if z <= 0
az = wrap2pi(ry - (-np.arctan2(z, x) - 1.5*np.pi))
el = -wrap2pi(rx - (-np.arctan2(z, y) - 1.5*np.pi))
th = -rz
return az, el, th
def invAxB(a,b):
"""
Compute the relative 3D transformation between a and b.
Input:
a -- first pose (homogeneous 4x4 matrix)
b -- second pose (homogeneous 4x4 matrix)
Output:
Relative 3D transformation from a to b.
"""
return np.dot(np.linalg.inv(a),b)
def merge_rt(r, t):
# r is 3 x 3
# t is 3 or maybe 3 x 1
t = np.reshape(t, [3, 1])
rt = np.concatenate((r,t), axis=1)
# rt is 3 x 4
br = np.reshape(np.array([0,0,0,1], np.float32), [1, 4])
# br is 1 x 4
rt = np.concatenate((rt, br), axis=0)
# rt is 4 x 4
return rt
def merge_rts(r, t):
# r is S x 3 x 3
# t is S x 3 or maybe S x 3 x 1
S, D1, D2 = r.shape
assert(D1 == 3 and D2 == 3)
t = np.reshape(t, [S, 3, 1])
rt = np.concatenate((r,t), axis=-1)
# rt is S x 3 x 4
br = np.reshape(np.tile(np.array([0,0,0,1], np.float32), (S, 1)), [S, 1, 4])
# br is S x 1 x 4
rt = np.concatenate((rt, br), axis=1)
# rt is S x 4 x 4
return rt
def split_rt(rt):
r = rt[:3,:3]
t = rt[:3,3]
r = np.reshape(r, [3, 3])
t = np.reshape(t, [3, 1])
return r, t
def split_rts(rt):
N, _, _ = rt.shape
r = rt[:, :3, :3]
t = rt[:, :3, 3]
r = np.reshape(r, [N, 3, 3])
t = np.reshape(t, [N, 3, 1])
return r, t
def split_lrtlist(lrtlist):
# splits a BN x 19 tensor
# into N x 3 (lens)
# and N x 4 x 4 (rts)
N, D = list(lrtlist.shape)
assert(D==19)
lenlist = lrtlist[:,:3]
ref_T_objs_list = lrtlist[:,3:].reshape(N, 4, 4)
return lenlist, ref_T_objs_list
def merge_lrtlist(lenlist, rtlist):
# lenlist is N x 3
# rtlist is N x 4 x 4
# merges these into a N x 19 tensor
N, D = list(lenlist.shape)
assert(D==3)
N2, E, F = list(rtlist.shape)
assert(N==N2)
assert(E==4 and F==4)
rtlist = rtlist.reshape(N, 16)
lrtlist = np.concatenate([lenlist, rtlist], axis=1)
return lrtlist
def split_intrinsics(K):
# K is 3 x 4 or 4 x 4
fx = K[0,0]
fy = K[1,1]
x0 = K[0,2]
y0 = K[1,2]
return fx, fy, x0, y0
def merge_intrinsics(fx, fy, x0, y0):
# inputs are shaped []
K = np.eye(4)
K[0,0] = fx
K[1,1] = fy
K[0,2] = x0
K[1,2] = y0
# K is shaped 4 x 4
return K
def scale_intrinsics(K, sx, sy):
fx, fy, x0, y0 = split_intrinsics(K)
fx *= sx
fy *= sy
x0 *= sx
y0 *= sy
return merge_intrinsics(fx, fy, x0, y0)
# def meshgrid(H, W):
# x = np.linspace(0, W-1, W)
# y = np.linspace(0, H-1, H)
# xv, yv = np.meshgrid(x, y)
# return xv, yv
def compute_distance(transform):
"""
Compute the distance of the translational component of a 4x4 homogeneous matrix.
"""
return numpy.linalg.norm(transform[0:3,3])
def radian_l1_dist(e, g):
# if our angles are in [0, 360] we can follow this stack overflow answer:
# https://gamedev.stackexchange.com/questions/4467/comparing-angles-and-working-out-the-difference
# wrap2pi brings the angles to [-180, 180]; adding pi puts them in [0, 360]
e = wrap2pi(e)+np.pi
g = wrap2pi(g)+np.pi
l = np.abs(np.pi - np.abs(np.abs(e-g) - np.pi))
return l
def apply_4x4(RT, XYZ):
# RT is 4 x 4
# XYZ is N x 3
# put into homogeneous coords
X, Y, Z = np.split(XYZ, 3, axis=1)
ones = np.ones_like(X)
XYZ1 = np.concatenate([X, Y, Z, ones], axis=1)
# XYZ1 is N x 4
XYZ1_t = np.transpose(XYZ1)
# this is 4 x N
XYZ2_t = np.dot(RT, XYZ1_t)
# this is 4 x N
XYZ2 = np.transpose(XYZ2_t)
# this is N x 4
XYZ2 = XYZ2[:,:3]
# this is N x 3
return XYZ2
def apply_4x4s(RT, XYZ):
# RT is B x 4 x 4
# XYZ is B x N x 3
# put into homogeneous coords
X, Y, Z = np.split(XYZ, 3, axis=2)
ones = np.ones_like(X)
XYZ1 = np.concatenate([X, Y, Z, ones], axis=2)
# XYZ1 is B x N x 4
XYZ1_t = np.transpose(XYZ1, (0, 2, 1))
# this is B x 4 x N
XYZ2_t = np.matmul(RT, XYZ1_t)
# this is B x 4 x N
XYZ2 = np.transpose(XYZ2_t, (0, 2, 1))
# this is B x N x 4
XYZ2 = XYZ2[:, :, :3]
# this is B x N x 3
return XYZ2
def apply_4x4_to_lrtlist(Y_T_X, lrtlist_X):
N, D = list(lrtlist_X.shape)
assert (D == 19)
N2, E, F = list(Y_T_X.shape)
assert (N2 == N)
assert (E == 4 and F == 4)
lenlist, rtlist_X = split_lrtlist(lrtlist_X)
# rtlist_X is N x 4 x 4
rtlist_Y = np.matmul(Y_T_X, rtlist_X)
lrtlist_Y = merge_lrtlist(lenlist, rtlist_Y)
return lrtlist_Y
def apply_pix_T_cam(pix_T_cam, xyz):
fx, fy, x0, y0 = split_intrinsics(pix_T_cam)
# xyz is shaped B x H*W x 3
# returns xy, shaped B x H*W x 2
N, C = xyz.shape
x, y, z = np.split(xyz, 3, axis=-1)
EPS = 1e-4
z = np.clip(z, EPS, None)
x = (x*fx)/(z)+x0
y = (y*fy)/(z)+y0
xy = np.concatenate([x, y], axis=-1)
return xy
def Ref2Mem(xyz, Z, Y, X):
# xyz is N x 3, in ref coordinates
# transforms ref coordinates into mem coordinates
N, C = xyz.shape
assert(C==3)
mem_T_ref = get_mem_T_ref(Z, Y, X)
xyz = apply_4x4(mem_T_ref, xyz)
return xyz
# def Mem2Ref(xyz_mem, MH, MW, MD):
# # xyz is B x N x 3, in mem coordinates
# # transforms mem coordinates into ref coordinates
# B, N, C = xyz_mem.get_shape().as_list()
# ref_T_mem = get_ref_T_mem(B, MH, MW, MD)
# xyz_ref = utils_geom.apply_4x4(ref_T_mem, xyz_mem)
# return xyz_ref
def get_mem_T_ref(Z, Y, X):
# sometimes we want the mat itself
# note this is not a rigid transform
# for interpretability, let's construct this in two steps...
# translation
center_T_ref = np.eye(4, dtype=np.float32)
center_T_ref[0,3] = -XMIN
center_T_ref[1,3] = -YMIN
center_T_ref[2,3] = -ZMIN
VOX_SIZE_X = (XMAX-XMIN)/float(X)
VOX_SIZE_Y = (YMAX-YMIN)/float(Y)
VOX_SIZE_Z = (ZMAX-ZMIN)/float(Z)
# scaling
mem_T_center = np.eye(4, dtype=np.float32)
mem_T_center[0,0] = 1./VOX_SIZE_X
mem_T_center[1,1] = 1./VOX_SIZE_Y
mem_T_center[2,2] = 1./VOX_SIZE_Z
mem_T_ref = np.dot(mem_T_center, center_T_ref)
return mem_T_ref
def safe_inverse(a):
r, t = split_rt(a)
t = np.reshape(t, [3, 1])
r_transpose = r.T
inv = np.concatenate([r_transpose, -np.matmul(r_transpose, t)], 1)
bottom_row = a[3:4, :] # this is [0, 0, 0, 1]
inv = np.concatenate([inv, bottom_row], 0)
return inv
def get_ref_T_mem(Z, Y, X):
mem_T_ref = get_mem_T_ref(X, Y, X)
# note safe_inverse is inapplicable here,
# since the transform is nonrigid
ref_T_mem = np.linalg.inv(mem_T_ref)
return ref_T_mem
def voxelize_xyz(xyz_ref, Z, Y, X):
# xyz_ref is N x 3
xyz_mem = Ref2Mem(xyz_ref, Z, Y, X)
# this is N x 3
voxels = get_occupancy(xyz_mem, Z, Y, X)
voxels = np.reshape(voxels, [Z, Y, X, 1])
return voxels
def get_inbounds(xyz, Z, Y, X, already_mem=False):
# xyz is H*W x 3
# proto is MH x MW x MD
if not already_mem:
xyz = Ref2Mem(xyz, Z, Y, X)
x_valid = np.logical_and(
np.greater_equal(xyz[:,0], -0.5),
np.less(xyz[:,0], float(X)-0.5))
y_valid = np.logical_and(
np.greater_equal(xyz[:,1], -0.5),
np.less(xyz[:,1], float(Y)-0.5))
z_valid = np.logical_and(
np.greater_equal(xyz[:,2], -0.5),
np.less(xyz[:,2], float(Z)-0.5))
inbounds = np.logical_and(np.logical_and(x_valid, y_valid), z_valid)
return inbounds
def sub2ind3D_zyx(depth, height, width, d, h, w):
# same as sub2ind3D, but inputs in zyx order
# when gathering/scattering with these inds, the tensor should be Z x Y x X
return d*height*width + h*width + w
def sub2ind3D_yxz(height, width, depth, h, w, d):
return h*width*depth + w*depth + d
def get_occupancy(xyz_mem, Z, Y, X):
# xyz_mem is N x 3
# we want to fill a voxel tensor with 1's at these inds
inbounds = get_inbounds(xyz_mem, Z, Y, X, already_mem=True)
inds = np.where(inbounds)
xyz_mem = np.reshape(xyz_mem[inds], [-1, 3])
# xyz_mem is N x 3
# this is more accurate than a cast/floor, but runs into issues when Y==0
xyz_mem = np.round(xyz_mem).astype(np.int32)
x = xyz_mem[:,0]
y = xyz_mem[:,1]
z = xyz_mem[:,2]
voxels = np.zeros([Z, Y, X], np.float32)
voxels[z, y, x] = 1.0
return voxels
def Pixels2Camera(x,y,z,fx,fy,x0,y0):
# x and y are locations in pixel coordinates, z is a depth image in meters
# their shapes are H x W
# fx, fy, x0, y0 are scalar camera intrinsics
# returns xyz, sized [B,H*W,3]
H, W = z.shape
fx = np.reshape(fx, [1,1])
fy = np.reshape(fy, [1,1])
x0 = np.reshape(x0, [1,1])
y0 = np.reshape(y0, [1,1])
# unproject
x = ((z+EPS)/fx)*(x-x0)
y = ((z+EPS)/fy)*(y-y0)
x = np.reshape(x, [-1])
y = np.reshape(y, [-1])
z = np.reshape(z, [-1])
xyz = np.stack([x,y,z], axis=1)
return xyz
def depth2pointcloud(z, pix_T_cam):
H = z.shape[0]
W = z.shape[1]
y, x = meshgrid2D(H, W)
z = np.reshape(z, [H, W])
fx, fy, x0, y0 = split_intrinsics(pix_T_cam)
xyz = Pixels2Camera(x, y, z, fx, fy, x0, y0)
return xyz
def meshgrid2D(Y, X):
grid_y = np.linspace(0.0, Y-1, Y)
grid_y = np.reshape(grid_y, [Y, 1])
grid_y = np.tile(grid_y, [1, X])
grid_x = np.linspace(0.0, X-1, X)
grid_x = np.reshape(grid_x, [1, X])
grid_x = np.tile(grid_x, [Y, 1])
# outputs are Y x X
return grid_y, grid_x
def gridcloud3D(Y, X, Z):
x_ = np.linspace(0, X-1, X)
y_ = np.linspace(0, Y-1, Y)
z_ = np.linspace(0, Z-1, Z)
y, x, z = np.meshgrid(y_, x_, z_, indexing='ij')
x = np.reshape(x, [-1])
y = np.reshape(y, [-1])
z = np.reshape(z, [-1])
xyz = np.stack([x,y,z], axis=1).astype(np.float32)
return xyz
def gridcloud2D(Y, X):
x_ = | np.linspace(0, X-1, X) | numpy.linspace |
import re
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torchvision.datasets as dset
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from hanja import hangul
from scipy import signal
from scipy.io import wavfile
# 그저 one_hot ...
def one_hot(i, length):
if i == -75:
i += 1
pass
array = | np.zeros(length) | numpy.zeros |
# import required libraries
import numpy as np
import cv2
print('OpenCV version: '+cv2.__version__)
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import os
from collections import Counter
# Set source folder
SRC_FOLDER = "C:/Users/raksh/OneDrive - The Pennsylvania State University/PhD Research/Paper-4/SysID Experiment/OL Test 3/"
# open and read file containing start and end timestamps of the videos
df_vidTimes = pd.read_excel(SRC_FOLDER + "Video_Timestamps_1.xlsx")
df_vidTimes.drop(df_vidTimes.columns[0],axis=1,inplace=True)
################ ALL FUNCTIONS DEFINITIONS ################
def perspCorrection(img,pt1,pt2,pt3,pt4,scale_width,scale_height):
# Create a copy of the image
img_copy = np.copy(img)
# Convert to RGB so as to display via matplotlib
# Using Matplotlib we can easily find the coordinates of the 4 points that is essential for finding then transformation matrix
#img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# to calculate the transformation matrix
input_pts = np.float32([pt1,pt2,pt3,pt4])
output_pts = np.float32([[0,0],[scale_width-1,0],[0,scale_height-1],[scale_width-1,scale_height-1]])
# Compute the perspective transform M
M = cv2.getPerspectiveTransform(input_pts,output_pts)
# Apply the perspective transformation to the image
imgPersp = cv2.warpPerspective(img,M,(scale_width, scale_height)) #,flags=cv2.INTER_LINEAR) cv2.INTER_CUBIC is also an option
imgGrayPersp = cv2.cvtColor(imgPersp, cv2.COLOR_BGR2GRAY)
# visulaize corners using cv2 circles
for x in range (0,4):
cv2.circle(img_copy,(round(input_pts[x][0]),round(input_pts[x][1])),5,(0,0,255),cv2.FILLED)
return [img_copy,imgPersp,imgGrayPersp]
def extractTopBottom(img,tStart,tEnd,bStart,bEnd):
img_top = img[tStart[1]:tEnd[1],tStart[0]:tEnd[0]]
img_bottom = img[bStart[1]:bEnd[1],bStart[0]:bEnd[0]]
return [img_top,img_bottom]
def gaussianBlur(img,fsize):
# gaussian blur
gblur = cv2.GaussianBlur(img,(fsize,fsize),0)
return gblur
def medianBlur(img,fsize=3):
# median blur - effective at removing salt and pepper noise
mblur = cv2.medianBlur(img,fsize)
return mblur
def bilateralFilter(img):
# Bilateral filter preserves edges while removing noise
bfblur = cv2.bilateralFilter(img,9,75,75)
return bfblur
def gAdaptiveThresholding(img):
# median filtering
adaptive_gaussian = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
return adaptive_gaussian
def morphOps(img,kernel1,kernel2,k1_num_passes=2):
# Closing = Dilation + Erosion
# dilation
mask_dil = cv2.dilate(img,kernel1,iterations = k1_num_passes)
# erosion
mask_erode = cv2.erode(mask_dil,kernel2,iterations = 1)
return mask_erode
def computeW_Rev(img,img_debug):
avg_num_pixels = 159
scaling_factor = 1.0
mm_per_pixel = ((1/32)*25.4)/(scaling_factor*avg_num_pixels)
edge_length_threshold = 55
min_L_edge_threshold = False
min_R_edge_threshold = False
# Predefine arrays for data storage
approx_edges = 10
num_edges = np.zeros(img.shape[0]) #,dtype=np.uint16)
edge_start = np.zeros([img.shape[0],approx_edges])#,dtype=np.uint16)
edge_end = np.zeros([img.shape[0],approx_edges])#,dtype=np.uint16)
edge_count = 0
k=0
sse = False
tse = False
# start scanning from (0,0) until black pixel is found
# go across columns first
for i in range(img.shape[0]):
found_edge = False
temp_edge_count = 0
k=0
for j in range(img.shape[1]):
if(img[i,j]<=50):
# Black pixel found - edge
if(found_edge==False):
found_edge = True
temp_edge_count += 1
num_edges[i] = temp_edge_count
edge_start[i][k] = j
k += 1
else:
if(found_edge):
edge_end[i][k-1] = j-1
found_edge = False
x = Counter(num_edges)
y = {z:count for z, count in x.items() if count >= edge_length_threshold and z > 1}
#print(y)
if(len(y)!=0):
edge_condition = sorted(y,key=y.get)[0]
else:
print('num_edges > 1 and length(num_edges) >= threshold not satisfied . . . Lowering threshold to identify matches')
w = {z:count for z, count in x.items() if count < edge_length_threshold and z > 1}
if(len(w)!=0):
print('Found num_edges > 1 and length(num_edges) < threshold!')
edge_condition = sorted(w,key=w.get)[0]
else:
print('Unable to find edge condition . . . check image')
edge_condition = -1
if img_debug:
print('edge condition: ' + str(edge_condition))
if edge_condition == 2: #max(num_edges)==2:
# max num_edges = 2
L1_edge_start = edge_start[:,0][np.argwhere(num_edges==2)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==2)]>60,edge_start[:,0][np.argwhere(num_edges==2)]<300)]
L1_edge_end = edge_end[:,0][np.argwhere(num_edges==2)][np.logical_and(edge_end[:,0][np.argwhere(num_edges==2)]>60,edge_end[:,0][np.argwhere(num_edges==2)]<300)]
if(np.max(L1_edge_start)-np.min(L1_edge_start)>13):
L1_edge_start = L1_edge_start[L1_edge_start >= (np.max(L1_edge_start)-10)]
if(np.max(L1_edge_end)-np.min(L1_edge_end)>15):
L1_edge_end = L1_edge_end[L1_edge_end >= (np.max(L1_edge_end)-10)]
trueLedge_start = L1_edge_start
trueLedge_end = L1_edge_end
R1_edge_start = edge_start[:,1][np.argwhere(num_edges==2)][edge_start[:,1][np.argwhere(num_edges==2)]>350]
R1_edge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]
if(np.max(R1_edge_start)-np.min(R1_edge_start)>13):
R1_edge_start = R1_edge_start[R1_edge_start <= ( | np.min(R1_edge_start) | numpy.min |
#!/usr/bin/env python
from builtins import str
import unittest
from math import sqrt
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.abstract_2d_finite_volumes.generic_domain import Generic_Domain
from anuga.abstract_2d_finite_volumes.tag_region import *
#from anuga.config import epsilon
import numpy as num
"""
This is what the mesh in these tests look like;
3---7
|5 /|
| /6|
2---6
|3 /|
| /2|
1---5
|1 /|
| /0|
0---4
"""
def add_x_y(x, y):
return x+y
def give_me_23(x, y):
return 23.0
class Test_tag_region(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_region_tags(self):
"""get values based on triangle lists."""
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
#Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.build_tagged_elements_dictionary({'bottom': [0,1],
'top': [4,5],
'all': [0,1,2,3,4,5]})
#Set friction
manning = 0.07
domain.set_quantity('friction', manning)
a = Set_tag_region('bottom', 'friction', 0.09)
b = Set_tag_region('top', 'friction', 1.0)
domain.set_tag_region([a, b])
expected = [[ 0.09, 0.09, 0.09],
[ 0.09, 0.09, 0.09],
[ 0.07, 0.07, 0.07],
[ 0.07, 0.07, 0.07],
[ 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0]]
msg = ("\ndomain.quantities['friction']=%s\nexpected value=%s"
% (str(domain.quantities['friction'].get_values()),
str(expected)))
assert num.allclose(domain.quantities['friction'].get_values(),
expected), msg
#c = Add_Value_To_region('all', 'friction', 10.0)
domain.set_tag_region(Add_value_to_region('all', 'friction', 10.0))
#print domain.quantities['friction'].get_values()
assert num.allclose(domain.quantities['friction'].get_values(),
[[ 10.09, 10.09, 10.09],
[ 10.09, 10.09, 10.09],
[ 10.07, 10.07, 10.07],
[ 10.07, 10.07, 10.07],
[ 11.0, 11.0, 11.0],
[ 11.0, 11.0, 11.0]])
# trying a function
domain.set_tag_region(Set_tag_region('top', 'friction', add_x_y))
#print domain.quantities['friction'].get_values()
assert num.allclose(domain.quantities['friction'].get_values(),
[[ 10.09, 10.09, 10.09],
[ 10.09, 10.09, 10.09],
[ 10.07, 10.07, 10.07],
[ 10.07, 10.07, 10.07],
[ 5./3, 2.0, 2./3],
[ 1.0, 2./3, 2.0]])
domain.set_quantity('elevation', 10.0)
domain.set_quantity('stage', 10.0)
domain.set_tag_region(Add_value_to_region('top', 'stage', 1.0,initial_quantity='elevation'))
#print domain.quantities['stage'].get_values()
assert num.allclose(domain.quantities['stage'].get_values(),
[[ 10., 10., 10.],
[ 10., 10., 10.],
[ 10., 10., 10.],
[ 10., 10., 10.],
[ 11.0, 11.0, 11.0],
[ 11.0, 11.0, 11.0]])
domain.set_quantity('elevation', 10.0)
domain.set_quantity('stage', give_me_23)
#this works as well, (is cleaner, but doesn't work for regions)
#domain.set_quantity('stage',
# domain.quantities['stage'].vertex_values+ \
# domain.quantities['elevation'].vertex_values)
domain.set_tag_region(Add_quantities('top', 'elevation','stage'))
#print domain.quantities['stage'].get_values()
assert num.allclose(domain.quantities['elevation'].get_values(),
[[ 10., 10., 10.],
[ 10., 10., 10.],
[ 10., 10., 10.],
[ 10., 10., 10.],
[ 33., 33.0, 33.],
[ 33.0, 33., 33.]])
def test_unique_vertices(self):
"""get values based on triangle lists."""
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
#Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.build_tagged_elements_dictionary({'bottom':[0,1],
'top':[4,5],
'all':[0,1,2,3,4,5]})
#Set friction
manning = 0.07
domain.set_quantity('friction', manning)
a = Set_tag_region('bottom', 'friction', 0.09, location = 'unique vertices')
domain.set_tag_region(a)
assert num.allclose(domain.quantities['friction'].get_values(),
[[ 0.09, 0.09, 0.09],
[ 0.09, 0.09, 0.09],
[ 0.09, 0.07, 0.09],
[ 0.07, 0.09, 0.07],
[ 0.07, 0.07, 0.07],
[ 0.07, 0.07, 0.07]])
def test_unique_verticesII(self):
"""
get values based on triangle lists.
"""
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
#Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.build_tagged_elements_dictionary({'bottom':[0,1],
'top':[4,5],
'all':[0,1,2,3,4,5]})
#Set friction
manning = 0.07
domain.set_quantity('friction', manning)
domain.set_tag_region(Add_value_to_region('bottom', 'friction', 1.0,initial_quantity='friction', location = 'unique vertices'))
#print domain.quantities['friction'].get_values()
assert num.allclose(domain.quantities['friction'].get_values(),\
[[ 1.07, 1.07, 1.07],
[ 1.07, 1.07, 1.07],
[ 1.07, 0.07, 1.07],
[ 0.07, 1.07, 0.07],
[ 0.07, 0.07, 0.07],
[ 0.07, 0.07, 0.07]])
def test_unique_vertices_average_loc_vert(self):
"""Get values based on triangle lists."""
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
#Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.build_tagged_elements_dictionary({'bottom': [0, 1],
'top': [4, 5],
'not_bottom': [2, 3, 4, 5]})
#Set friction
domain.set_quantity('friction', add_x_y)
av_bottom = 2.0 / 3.0
add = 60.0
calc_frict = av_bottom + add
domain.set_tag_region(Add_value_to_region('bottom', 'friction', add,
initial_quantity='friction',
location='vertices',
average=True))
frict_points = domain.quantities['friction'].get_values()
expected = [calc_frict, calc_frict, calc_frict]
msg = ('frict_points[0]=%s\nexpected=%s'
% (str(frict_points[0]), str(expected)))
assert num.allclose(frict_points[0], expected), msg
msg = ('frict_points[1]=%s\nexpected=%s'
% (str(frict_points[1]), str(expected)))
assert num.allclose(frict_points[1], expected), msg
def test_unique_vertices_average_loc_unique_vert_1_5(self):
"""
get values based on triangle lists.
"""
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
#Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.set_flow_algorithm('1_5')
domain.build_tagged_elements_dictionary({'bottom':[0,1],
'top':[4,5],
'not_bottom':[2,3,4,5]})
#Set friction
domain.set_quantity('friction', add_x_y)
av_bottom = 2.0/3.0
add = 60.0
calc_frict = av_bottom + add
domain.set_tag_region(Add_value_to_region('bottom', 'friction', add,
initial_quantity='friction',
location='unique vertices',
average=True
))
#print domain.quantities['friction'].get_values()
frict_points = domain.quantities['friction'].get_values()
assert num.allclose(frict_points[0],\
[ calc_frict, calc_frict, calc_frict])
assert num.allclose(frict_points[1],\
[ calc_frict, calc_frict, calc_frict])
assert num.allclose(frict_points[2],\
[ calc_frict, 1.0 + 2.0/3.0, calc_frict])
assert num.allclose(frict_points[3],\
[ 2.0/3.0,calc_frict, 1.0 + 2.0/3.0])
def test_unique_vertices_average_loc_unique_vert_de0(self):
"""
get values based on triangle lists.
"""
#Create basic mesh
points, vertices, boundary = rectangular(1, 3)
#Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.build_tagged_elements_dictionary({'bottom':[0,1],
'top':[4,5],
'not_bottom':[2,3,4,5]})
#Set friction
domain.set_quantity('friction', add_x_y)
av_bottom = 2.0/3.0
add = 60.0
calc_frict = av_bottom + add
domain.set_tag_region(Add_value_to_region('bottom', 'friction', add,
initial_quantity='friction',
location='unique vertices',
average=True
))
#print domain.quantities['friction'].get_values()
frict_points = domain.quantities['friction'].get_values()
expected0 = [ 60.77777778, 60.77777778, 60.77777778]
expected1 = [ 60.77777778, 60.77777778, 60.77777778]
expected2 = [ 60.77777778, 1.66666667, 60.77777778]
expected3 = [ 0.66666667, 60.77777778, 1.66666667]
assert num.allclose(frict_points[0],expected0)
assert | num.allclose(frict_points[1],expected1) | numpy.allclose |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements common utility functions and classes for the star
finders.
"""
import math
import warnings
from astropy.stats import gaussian_fwhm_to_sigma
import numpy as np
from .peakfinder import find_peaks
from ..utils._convolution import _filter_data
from ..utils.exceptions import NoDetectionsWarning
class _StarFinderKernel:
"""
Class to calculate a 2D Gaussian density enhancement kernel.
The kernel has negative wings and sums to zero. It is used by both
`DAOStarFinder` and `IRAFStarFinder`.
Parameters
----------
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor and major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel, measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``]. The default is 1.5.
normalize_zerosum : bool, optional
Whether to normalize the Gaussian kernel to have zero sum, The
default is `True`, which generates a density-enhancement kernel.
Notes
-----
The class attributes include the dimensions of the elliptical kernel
and the coefficients of a 2D elliptical Gaussian function expressed
as:
``f(x,y) = A * exp(-g(x,y))``
where
``g(x,y) = a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2``
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5,
normalize_zerosum=True):
if fwhm < 0:
raise ValueError('fwhm must be positive.')
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be positive and less or equal '
'than 1.')
if sigma_radius <= 0:
raise ValueError('sigma_radius must be positive.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.xsigma = self.fwhm * gaussian_fwhm_to_sigma
self.ysigma = self.xsigma * self.ratio
theta_radians = np.deg2rad(self.theta)
cost = np.cos(theta_radians)
sint = np.sin(theta_radians)
xsigma2 = self.xsigma**2
ysigma2 = self.ysigma**2
self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2))
# CCW
self.b = 0.5 * cost * sint * ((1.0 / xsigma2) - (1.0 / ysigma2))
self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2))
# find the extent of an ellipse with radius = sigma_radius*sigma;
# solve for the horizontal and vertical tangents of an ellipse
# defined by g(x,y) = f
self.f = self.sigma_radius**2 / 2.0
denom = (self.a * self.c) - self.b**2
# nx and ny are always odd
self.nx = 2 * int(max(2, math.sqrt(self.c * self.f / denom))) + 1
self.ny = 2 * int(max(2, math.sqrt(self.a * self.f / denom))) + 1
self.xc = self.xradius = self.nx // 2
self.yc = self.yradius = self.ny // 2
# define the kernel on a 2D grid
yy, xx = np.mgrid[0:self.ny, 0:self.nx]
self.circular_radius = np.sqrt((xx - self.xc)**2 + (yy - self.yc)**2)
self.elliptical_radius = (self.a * (xx - self.xc)**2
+ 2.0 * self.b * (xx - self.xc)
* (yy - self.yc)
+ self.c * (yy - self.yc)**2)
self.mask = np.where(
(self.elliptical_radius <= self.f)
| (self.circular_radius <= 2.0), 1, 0).astype(int)
self.npixels = self.mask.sum()
# NOTE: the central (peak) pixel of gaussian_kernel has a value of 1.
self.gaussian_kernel_unmasked = np.exp(-self.elliptical_radius)
self.gaussian_kernel = self.gaussian_kernel_unmasked * self.mask
# denom = variance * npixels
denom = ((self.gaussian_kernel**2).sum()
- (self.gaussian_kernel.sum()**2 / self.npixels))
self.relerr = 1.0 / np.sqrt(denom)
# normalize the kernel to zero sum
if normalize_zerosum:
self.data = ((self.gaussian_kernel
- (self.gaussian_kernel.sum() / self.npixels))
/ denom) * self.mask
else:
self.data = self.gaussian_kernel
self.shape = self.data.shape
class _StarCutout:
"""
Class to hold a 2D image cutout of a single star for the star finder
classes.
Parameters
----------
data : 2D array_like
The cutout 2D image from the input unconvolved 2D image.
convdata : 2D array_like
The cutout 2D image from the convolved 2D image.
slices : tuple of two slices
A tuple of two slices representing the minimal box of the cutout
from the original image.
xpeak, ypeak : float
The (x, y) pixel coordinates of the peak pixel.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``data``.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold value input to the star finder
class multiplied by the kernel relerr.
"""
def __init__(self, data, convdata, slices, xpeak, ypeak, kernel,
threshold_eff):
self.data = data
self.convdata = convdata
self.slices = slices
self.xpeak = xpeak
self.ypeak = ypeak
self.kernel = kernel
self.threshold_eff = threshold_eff
self.shape = data.shape
self.nx = self.shape[1] # always odd
self.ny = self.shape[0] # always odd
self.cutout_xcenter = int(self.nx // 2)
self.cutout_ycenter = int(self.ny // 2)
self.xorigin = self.slices[1].start # in original image
self.yorigin = self.slices[0].start # in original image
self.mask = kernel.mask # kernel mask
self.npixels = kernel.npixels # unmasked pixels
self.data_masked = self.data * self.mask
def _find_stars(data, kernel, threshold_eff, min_separation=None,
mask=None, exclude_border=False):
"""
Find stars in an image.
Parameters
----------
data : 2D array_like
The 2D array of the image.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
min_separation : float, optional
The minimum separation for detected objects in pixels.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
objects : list of `_StarCutout`
A list of `_StarCutout` objects containing the image cutout for
each source.
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
.. _starfind: https://iraf.net/irafhelp.php?val=starfind
"""
convolved_data = _filter_data(data, kernel.data, mode='constant',
fill_value=0.0, check_normalization=False)
# define a local footprint for the peak finder
if min_separation is None: # daofind
footprint = kernel.mask.astype(bool)
else:
# define a circular footprint
idx = np.arange(-min_separation, min_separation + 1)
xx, yy = np.meshgrid(idx, idx)
footprint = | np.array((xx**2 + yy**2) <= min_separation**2, dtype=int) | numpy.array |
import numpy as np
import tensorflow as tf
import glob
import os
import re
from multiprocessing import Value
from src.readers.file_reader import NReader, AerReader
from src.readers.event_reader import EventReader
from src.readers.file_reader import FileReader
class FileAnnotationsReader(FileReader):
def __init__(self, **kargs):
super().__init__(**kargs)
def read_annotation(self, filename):
bboxes = np.load(filename)
return bboxes
def read_example_and_annotation(self, events_filename):
filename, ext = os.path.splitext(events_filename)
bboxes_filename = os.path.join(os.path.dirname(filename), "annotations", os.path.basename(filename) + ".npy")
l, x, y, ts, p = self.read_example(events_filename)
bboxes = self.read_annotation(bboxes_filename)
return l, x, y, ts, p, bboxes
class DetectionReader(EventReader, FileAnnotationsReader):
"""
A modified version of the reader that considers labels as features allowing padding. The main difference is
the fact that next_batch_** functions will return [length, labels, features...] instead of
[labels, length, features...], and the fact that preprocessing function have (length, label, x, y, ts, p) arguments
"""
def _init(self, path, validation_size=None, test_size=None, tmp_dir=None, seed=1234):
# Sets the seed
np.random.seed(seed)
self._seed = seed
self._path = path
self._tmp_dir = tmp_dir
# Loads the filenames of the whole dataset
test_filenames = glob.glob(os.path.join(path, 'test', '*.*'))
train_filenames = glob.glob(os.path.join(path, 'train', '*.*'))
validation_filenames = glob.glob(os.path.join(path, 'validation', '*.*'))
params_filename = os.path.join(path, 'params.npz')
if len(test_filenames) == 0 or len(train_filenames) == 0 or len(validation_filenames) == 0 or \
not os.path.exists(params_filename):
raise Exception("The provided path does not contain any data or the directory structure is not valid.")
params_values= np.load(params_filename)
num_classes = np.asscalar(params_values['num_classes'])
# Computes a mapping between directory names and integer labels
self._labels = list(params_values['label_to_idx'])
self._dir_to_label = dict(zip(params_values['label_to_idx'], np.arange(num_classes)))
# Shuffles the filenames in-place
| np.random.shuffle(train_filenames) | numpy.random.shuffle |
#!/usr/bin/python3
#coding=utf-8
import os
import sys
sys.path.insert(0, '../')
sys.dont_write_bytecode = True
import cv2
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import dataset
from net import F3Net
from transform import *
import pydensecrf.densecrf as dcrf
class Test(object):
def __init__(self, Dataset, Network, path):
## dataset
self.cfg = Dataset.Config(datapath=path, snapshot=args.model, mode='test')
self.data = Dataset.Data(self.cfg)
self.loader = DataLoader(self.data, batch_size=1, shuffle=False, num_workers=8)
## network
self.net = Network(self.cfg)
self.net.train(False)
self.net.to('cuda:1')
self.path = path
def save(self):
with torch.no_grad():
import datetime
#start = datetime.datetime.now()
cnt = 1
total = datetime.datetime(1999,1,1)
for image, mask, shape, name in self.loader:
#image.shape (1,3,352,352)
#shape: init img shape ,which is for pre_mask to match the size of init img
image = image.to('cuda:1').float()
start = datetime.datetime.now()
out1u, out2u, out2r, out3r, out4r, out5r = self.net(image, shape)
total += datetime.datetime.now()-start
print("inference time: ",(total-datetime.datetime(1999,1,1))/cnt)
out = out2u
pred = (torch.sigmoid(out[0,0])*255).cpu().numpy()
#
# Q = None
# if args.crf:
# Q = self.dense_crf(user_img.numpy().astype(np.uint8),pred)
head = '../eval/maps/F3Net/'+ self.cfg.datapath.split('/')[-1]
if not os.path.exists(head):
os.makedirs(head)
cv2.imwrite(head+'/'+name[0]+'.png', | np.round(pred) | numpy.round |
import argparse
import numpy as np
def solve(*args, **kwargs):
return None
def main(args):
print('Solving')
print('Input: {0}'.format(args.input))
print('Output: {0}'.format(args.output))
with open(args.input, 'r') as in_file:import argparse
import numpy as np
import matplotlib.pyplot as plt
import utils.parallel as parallel
def preprocess(idx, photo1: tuple, all_photos: list):
n = len(all_photos)
intersections = np.zeros(n, np.int16)
diffsizehalf = np.zeros(n, np.int16)
u = np.zeros(n, np.int16)
tags1 = photo1[3]
for i, photo2 in enumerate(all_photos):
if idx == i:
continue
tags2 = photo2[3]
num_intersections = len(tags1.intersection(tags2))
if num_intersections == 0:
continue
h = int(np.abs(len(tags1) - len(tags2))) // 2
intersections[i] = num_intersections
diffsizehalf[i] = h
if photo2[1]:
u[i] = intersections[i]
else:
u[i] = np.maximum(intersections[i] - diffsizehalf[i], 0)
return idx, intersections, diffsizehalf, u
def merge(v1, v2):
assert v1[1] == v2[1] == True
merged_tags = v1[3].union(v2[3])
return [v1[0], v2[0]], False, len(merged_tags), merged_tags
#def select_vertical(photos):
def solve(photos):
tags_set = set()
n = len(photos)
intersections = np.zeros((n, n), dtype=np.int16)
diffsizehalf = np.zeros((n, n), dtype=np.int16)
"""
for id, is_vertical, num_tags, tags in photos:
tags_set.intersection_update(tags)
for t in tags:
tags_set.add(t)
"""
sorted_by_num_tags = sorted(photos, key=lambda v: v[2])
"""
p = parallel.ParallelExecutor('thread')
with p:
for i in range(n - 1):
p.dispatch(preprocess, i, sorted_by_num_tags[i], sorted_by_num_tags)
for res in p.iterate_results():
idx, intersections_local, diffsizehalf_local = res
intersections[idx, :] = intersections_local
diffsizehalf[idx, :] = diffsizehalf_local
"""
results = []
start_idx = 0
#horizontal = list(filter(lambda p: p[1] == False, sorted_by_num_tags))
#vertical = filter(lambda p: p[1] == True, sorted_by_num_tags)
merged = []
vertical = list(filter(lambda p: p[1], sorted_by_num_tags))
while True:
if len(vertical) == 1:
vertical.remove(vertical[0])
if len(vertical) == 0:
break
v1 = vertical[np.random.choice(len(vertical))]
_, v1_intersections, _, _ = preprocess(v1[0], v1, vertical)
non_zero_indices = | np.nonzero(v1_intersections) | numpy.nonzero |
import numpy as np
import matplotlib.pyplot as plt
from utils.unit_conversions import db_to_lin
from utils import constants
from .aoa import make_gain_functions
def crlb(snr, M, g, g_dot, psi_samples, psi_true):
"""
Computes the CRLB for a directional antenna with amplitude measurements taken at a series of angles. Supports M
measurements from each of N different angles.
If there are multiple true angles of arrival provided (psi_true), then the CRLB is computed independently for each
one.
Ported from MATLAB Code.
<NAME>
14 January 2021
:param snr: Signal-to-Noise ratio [dB]
:param M: Number of samples for each antenna position
:param g: Function handle to g(psi)
:param g_dot: Function handle to g_dot(psi)
:param psi_samples: The sampled steering angles (radians)
:param psi_true: The true angle of arrival (radians)
:return crlb: Lower bound on the Mean Squared Error of an unbiased estimation of psi (radians)
"""
# Convert SNR from dB to linear units
snr_lin = db_to_lin(snr)
# Evaluate the antenna pattern and antenna gradient at each of the steering angles sampled.
g_vec = np.array([g(psi-psi_true) for psi in psi_samples])
g_dot_vec = np.array([g_dot(psi-psi_true) for psi in psi_samples])
# Pre-compute steering vector inner products
g_g = np.sum(g_vec * g_vec, axis=0)
g_dot_g = np.sum(g_vec * g_dot_vec, axis=0)
g_dot_g_dot = np.sum(g_dot_vec * g_dot_vec, axis=0)
# Compute CRLB for each true angle theta
jacobian = 2*M*snr_lin*(g_dot_g_dot-g_dot_g**2 / g_g) # Eq 7.25
return 1./jacobian # 1 x numel(th)
def compute_df(s, psi_samples, g, psi_res = 0.1, min_psi = -np.pi, max_psi = np.pi):
"""
Computes an estimate of the angle of arrival psi (in radians) for a set of amplitude measurements s, taken at
various steering angles psi_samples
Ported from MATLAB Code.
<NAME>
14 January 2021
:param s: Set of num_samples measurements taken at each of num_steering steering angles.
:param psi_samples: Steering angles at which measurements were taken [radians]
:param g: Function handle to gain equation, g(psi)
:param psi_res: Desired resolution for output estimate [defualt = .1]
:param min_psi: Lower bound on valid region for psi [default = -pi]
:param max_psi: Upper bound on valid region for psi [default = pi]
:return: Estimated angle of arrival [radians]
"""
# Determine how many samples exist
num_steering, num_samples = np.shape(s)
# Initialize the outer loop to .1 radians
this_psi_res = .1
psi_vec = np.arange(start=min_psi, stop=max_psi + this_psi_res, step=this_psi_res) # Set up search vector
psi = 0. # Initialize output
# Iteratively search for the optimal index, until the sample spacing is less than the desired resolution
if psi_res > this_psi_res:
psi_res = this_psi_res
while this_psi_res >= psi_res:
# Find the difference between each possible AoA (psi_vec) and the measurement steering angles
psi_diff = np.expand_dims(psi_samples, axis=1) - np.expand_dims(psi_vec, axis=0)
# Compute the expected gain pattern for each candidate AoA value
g_vec = np.reshape(np.asarray([g(psi) for psi in psi_diff]), newshape=psi_diff.shape)
# Find the candidate AoA value that minimizes the MSE between the
# expected and received gain patterns.
sse = np.sum(np.sum((np.reshape(s, [num_steering, 1, num_samples])-np.expand_dims(g_vec, axis=2))**2,
axis=2), axis=0)
idx_opt = np.argmin(sse)
psi = psi_vec[idx_opt]
# Set up the bounds and resolution for the next iteration
this_psi_res = this_psi_res / 10
min_psi = psi_vec[np.maximum(0, idx_opt - 2)]
max_psi = psi_vec[np.minimum(np.size(psi_vec)-1, idx_opt + 2)]
psi_vec = np.arange(start=min_psi, stop=max_psi + this_psi_res, step=this_psi_res)
return psi
def run_example():
"""
Example evaluation of an Adcock and Rectangular-aperture DF receiver
Ported from MATLAB code.
<NAME>
14 January 2021
:return: None
"""
# ================================ Adcock Test Script ================================
# Create the antenna pattern generating function
# --- NOTE --- g,g_dot take radian inputs (psi, not theta)
d_lam = .25
[g,g_dot] = make_gain_functions(type='adcock', d_lam=d_lam, psi_0=0.)
# Generate the angular samples and true gain values
th_true = 5. # degrees
psi_true = np.deg2rad(th_true) # radians
psi_res = .001 # desired resolution from multi-stage search directional_df() for details.
num_angles = 10. # Number of angular samples
th = np.linspace(start=-180, stop=180, num=num_angles, endpoint=False) # evenly spaced across unit circle
psi = np.deg2rad(th)
x = g(psi-psi_true) # Actual gain values
# Set up the parameter sweep
num_samples_vec = | np.array([1, 10, 100]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.