prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import time
import numpy as np
import pdb
import math
import pandas as pd
import json
import sys
import torch
from collections import defaultdict
import random
import copy
from query_representation.utils import *
from evaluation.eval_fns import *
from .dataset import QueryDataset, pad_sets, to_variable,\
mscn_collate_fn,mscn_collate_fn_together
from .nets import *
from evaluation.flow_loss import FlowLoss, \
get_optimization_variables, get_subsetg_vectors
from torch.utils import data
from torch.nn.utils.clip_grad import clip_grad_norm_
from torch.optim.swa_utils import AveragedModel, SWALR
import wandb
import random
import pickle
QERR_MIN_EPS=0.0
def qloss_torch(yhat, ytrue):
assert yhat.shape == ytrue.shape
epsilons = to_variable([QERR_MIN_EPS]*len(yhat)).float()
ytrue = torch.max(ytrue, epsilons)
yhat = torch.max(yhat, epsilons)
errors = torch.max( (ytrue / yhat), (yhat / ytrue))
return errors
def mse_pos(yhat, ytrue):
assert yhat.shape == ytrue.shape
errors = torch.nn.MSELoss(reduction="none")(yhat, ytrue)
for i,err in enumerate(errors):
if yhat[i] < ytrue[i]:
errors[i] *= 10
return errors
def mse_ranknet(yhat, ytrue):
mseloss = torch.nn.MSELoss(reduction="mean")(yhat, ytrue)
rloss = ranknet_loss(yhat, ytrue)
return mseloss + 0.1*rloss
def ranknet_loss(batch_pred, batch_label):
'''
:param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc
:param batch_label: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc
:return:
'''
batch_pred = batch_pred.unsqueeze(0)
batch_label = batch_label.unsqueeze(0)
# batch_pred = batch_pred.T
# batch_label = batch_label.T
sigma = 1.0
batch_s_ij = torch.unsqueeze(batch_pred, dim=2) - torch.unsqueeze(batch_pred, dim=1) # computing pairwise differences w.r.t. predictions, i.e., s_i - s_j
batch_p_ij = 1.0 / (torch.exp(-sigma * batch_s_ij) + 1.0)
batch_std_diffs = torch.unsqueeze(batch_label, dim=2) - torch.unsqueeze(batch_label, dim=1) # computing pairwise differences w.r.t. standard labels, i.e., S_{ij}
batch_Sij = torch.clamp(batch_std_diffs, min=-1.0, max=1.0) # ensuring S_{ij} \in {-1, 0, 1}
batch_std_p_ij = 0.5 * (1.0 + batch_Sij)
# about reduction, both mean & sum would work, mean seems straightforward due to the fact that the number of pairs differs from query to query
batch_loss = F.binary_cross_entropy(input=torch.triu(batch_p_ij, diagonal=1), target=torch.triu(batch_std_p_ij, diagonal=1), reduction='mean')
return batch_loss
class CardinalityEstimationAlg():
def __init__(self, *args, **kwargs):
# TODO: set each of the kwargs as variables
pass
def train(self, training_samples, **kwargs):
pass
def test(self, test_samples, **kwargs):
'''
@test_samples: [sql_rep objects]
@ret: [dicts]. Each element is a dictionary with cardinality estimate
for each subset graph node (subplan). Each key should be ' ' separated
list of aliases / table names
'''
pass
def get_exp_name(self):
name = self.__str__()
if not hasattr(self, "rand_id"):
self.rand_id = str(random.getrandbits(32))
print("Experiment name will be: ", name + self.rand_id)
name += self.rand_id
return name
def num_parameters(self):
'''
size of the parameters needed so we can compare across different algorithms.
'''
return 0
def __str__(self):
return self.__class__.__name__
def save_model(self, save_dir="./", suffix_name=""):
pass
def get_true_ests(samples, featurizer):
all_ests = []
query_idx = 0
for sample in samples:
ests = {}
node_keys = list(sample["subset_graph"].nodes())
if SOURCE_NODE in node_keys:
node_keys.remove(SOURCE_NODE)
node_keys.sort()
for subq_idx, node in enumerate(node_keys):
cards = sample["subset_graph"].nodes()[node]["cardinality"]
alias_key = node
est_card = cards["actual"]
# idx = query_idx + subq_idx
# est_card = featurizer.unnormalize(pred[idx], cards["total"])
# assert est_card > 0
ests[alias_key] = est_card
all_ests.append(ests)
query_idx += len(node_keys)
return all_ests
def format_model_test_output_joinkey(pred, samples, featurizer):
all_ests = []
query_idx = 0
for si, sample in enumerate(samples):
ests = {}
edge_keys = list(sample["subset_graph"].edges())
edge_keys.sort(key = lambda x: str(x))
subq_idx = 0
for _, edge in enumerate(edge_keys):
# cards = sample["subset_graph"].nodes()[node]["cardinality"]
edgek = edge
idx = query_idx + subq_idx
est_card = featurizer.unnormalize(pred[idx], None)
assert est_card >= 0
ests[edgek] = est_card
subq_idx += 1
all_ests.append(ests)
query_idx += subq_idx
return all_ests
def format_model_test_output(pred, samples, featurizer):
all_ests = []
query_idx = 0
# print("len pred: ", len(pred))
for si, sample in enumerate(samples):
ests = {}
node_keys = list(sample["subset_graph"].nodes())
if SOURCE_NODE in node_keys:
node_keys.remove(SOURCE_NODE)
node_keys.sort()
subq_idx = 0
for _, node in enumerate(node_keys):
if featurizer.max_num_tables != -1 and \
featurizer.max_num_tables < len(node):
# dummy estimate
ests[node] = 1.0
continue
cards = sample["subset_graph"].nodes()[node]["cardinality"]
alias_key = node
idx = query_idx + subq_idx
if "total" in cards:
est_card = featurizer.unnormalize(pred[idx], cards["total"])
else:
est_card = featurizer.unnormalize(pred[idx], None)
assert est_card > 0
ests[alias_key] = est_card
subq_idx += 1
all_ests.append(ests)
# query_idx += len(node_keys)
query_idx += subq_idx
return all_ests
class NN(CardinalityEstimationAlg):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
for k, val in kwargs.items():
self.__setattr__(k, val)
# when estimates are log-normalized, then optimizing for mse is
# basically equivalent to optimizing for q-error
self.num_workers = 8
if self.loss_func_name == "qloss":
self.loss_func = qloss_torch
self.load_query_together = False
elif self.loss_func_name == "mse":
self.loss_func = torch.nn.MSELoss(reduction="none")
self.load_query_together = False
elif self.loss_func_name == "mse_pos":
self.loss_func = mse_pos
self.load_query_together = False
elif self.loss_func_name == "flowloss":
self.loss_func = FlowLoss.apply
self.load_query_together = True
if self.mb_size > 16:
self.mb_size = 1
self.num_workers = 1
# self.collate_fn = None
elif self.loss_func_name == "mse+ranknet":
self.loss_func = mse_ranknet
self.load_query_together = True
if self.mb_size > 16:
self.mb_size = 1
else:
assert False
if self.load_query_together:
self.collate_fn = mscn_collate_fn_together
else:
if hasattr(self, "load_padded_mscn_feats"):
if self.load_padded_mscn_feats:
self.collate_fn = None
else:
self.collate_fn = mscn_collate_fn
else:
self.collate_fn = None
self.eval_fn_handles = []
for efn in self.eval_fns.split(","):
if efn in ["planloss", "ppc2"]:
print("skipping eval fn: ", efn)
continue
self.eval_fn_handles.append(get_eval_fn(efn))
def init_net(self, sample):
net = self._init_net(sample)
print(net)
if self.optimizer_name == "ams":
optimizer = torch.optim.Adam(net.parameters(), lr=self.lr,
amsgrad=True, weight_decay=self.weight_decay)
elif self.optimizer_name == "adam":
optimizer = torch.optim.Adam(net.parameters(), lr=self.lr,
amsgrad=False, weight_decay=self.weight_decay)
elif self.optimizer_name == "adamw":
optimizer = torch.optim.Adam(net.parameters(), lr=self.lr,
amsgrad=False, weight_decay=self.weight_decay)
elif self.optimizer_name == "sgd":
optimizer = torch.optim.SGD(net.parameters(),
lr=self.lr, momentum=0.9, weight_decay=self.weight_decay)
else:
assert False
# if self.use_wandb:
# wandb.watch(net)
return net, optimizer
def periodic_eval(self):
if not self.use_wandb:
return
start = time.time()
curerrs = {}
for st, ds in self.eval_ds.items():
if st == "train":
continue
samples = self.samples[st]
preds, _ = self._eval_ds(ds, samples)
if self.featurizer.card_type == "joinkey":
preds1 = format_model_test_output_joinkey(preds,
samples, self.featurizer)
preds = joinkey_cards_to_subplan_cards(samples, preds1,
"actual", 2)
# def joinkey_cards_to_subplan_cards(samples, joinkey_cards,
# basecard_type, basecard_tables):
else:
preds = format_model_test_output(preds,
samples, self.featurizer)
# do evaluations
for efunc in self.eval_fn_handles:
if "Constraint" in str(efunc):
continue
if "PostgresPlanCost-C" == str(efunc):
if self.true_costs[st] == 0:
truepreds = get_true_ests(samples, self.featurizer)
truecosts = efunc.eval(samples, truepreds,
args=None, samples_type=st,
result_dir=None,
query_dir = None,
user = self.featurizer.user,
db_name = self.featurizer.db_name,
db_host = self.featurizer.db_host,
port = self.featurizer.port,
num_processes = 16,
alg_name = self.__str__(),
save_pdf_plans=False,
use_wandb=False)
self.true_costs[st] = np.sum(truecosts)
truecost = np.sum(truecosts)
else:
truecost = self.true_costs[st]
errors = efunc.eval(samples, preds,
args=None, samples_type=st,
result_dir=None,
user = self.featurizer.user,
query_dir = None,
db_name = self.featurizer.db_name,
db_host = self.featurizer.db_host,
port = self.featurizer.port,
num_processes = 16,
alg_name = self.__str__(),
save_pdf_plans=False,
use_wandb=False)
if "PostgresPlanCost-C" == str(efunc):
assert truecost != 0.0
totcost = np.sum(errors)
relcost = totcost / truecost
key = str(efunc)+"-Relative-"+st
wandb.log({key: relcost, "epoch":self.epoch})
curerrs[key] = round(relcost,4)
else:
err = np.mean(errors)
wandb.log({str(efunc)+"-"+st: err, "epoch":self.epoch})
curerrs[str(efunc)+"-"+st] = round(err,4)
if self.early_stopping == 2:
self.all_errs.append(curerrs)
print("Epoch ", self.epoch, curerrs)
print("periodic_eval took: ", time.time()-start)
def update_flow_training_info(self):
fstart = time.time()
# precompute a whole bunch of training things
self.flow_training_info = []
# farchive = klepto.archives.dir_archive("./flow_info_archive",
# cached=True, serialized=True)
# farchive.load()
new_seen = False
for sample in self.training_samples:
qkey = deterministic_hash(sample["sql"])
# if qkey in farchive:
if False:
subsetg_vectors = farchive[qkey]
assert len(subsetg_vectors) == 10
else:
new_seen = True
subsetg_vectors = list(get_subsetg_vectors(sample,
self.cost_model))
true_cards = np.zeros(len(subsetg_vectors[0]),
dtype=np.float32)
nodes = list(sample["subset_graph"].nodes())
if SOURCE_NODE in nodes:
nodes.remove(SOURCE_NODE)
nodes.sort()
for i, node in enumerate(nodes):
true_cards[i] = \
sample["subset_graph"].nodes()[node]["cardinality"]["actual"]
trueC_vec, dgdxT, G, Q = \
get_optimization_variables(true_cards,
subsetg_vectors[0], self.featurizer.min_val,
self.featurizer.max_val,
self.featurizer.ynormalization,
subsetg_vectors[4],
subsetg_vectors[5],
subsetg_vectors[3],
subsetg_vectors[1],
subsetg_vectors[2],
subsetg_vectors[6],
subsetg_vectors[7],
self.cost_model, subsetg_vectors[-1])
Gv = to_variable(np.zeros(len(subsetg_vectors[0]))).float()
Gv[subsetg_vectors[-2]] = 1.0
trueC_vec = to_variable(trueC_vec).float()
dgdxT = to_variable(dgdxT).float()
G = to_variable(G).float()
Q = to_variable(Q).float()
trueC = torch.eye(len(trueC_vec)).float().detach()
for i, curC in enumerate(trueC_vec):
trueC[i,i] = curC
invG = torch.inverse(G)
v = invG @ Gv
left = (Gv @ torch.transpose(invG,0,1)) @ torch.transpose(Q, 0, 1)
right = Q @ (v)
left = left.detach().cpu()
right = right.detach().cpu()
opt_flow_loss = left @ trueC @ right
del trueC
# print(opt_flow_loss)
# pdb.set_trace()
self.flow_training_info.append((subsetg_vectors, trueC_vec,
opt_flow_loss))
print("precomputing flow info took: ", time.time()-fstart)
def train(self, training_samples, **kwargs):
self.all_errs = []
self.best_model_epoch = -1
self.model_weights = []
self.true_costs = {}
self.true_costs["val"] = 0.0
self.true_costs["test"] = 0.0
# self.true_costs["job"] = 0.0
# self.true_costs["jobm"] = 0.0
assert isinstance(training_samples[0], dict)
self.featurizer = kwargs["featurizer"]
self.training_samples = training_samples
self.seen_subplans = set()
for sample in training_samples:
for node in sample["subset_graph"].nodes():
self.seen_subplans.add(str(node))
self.trainds = self.init_dataset(training_samples,
self.load_query_together)
self.trainloader = data.DataLoader(self.trainds,
batch_size=self.mb_size, shuffle=True,
collate_fn=self.collate_fn,
# num_workers=self.num_workers
)
self.eval_ds = {}
self.samples = {}
self.eval_ds["train"] = self.trainds
if "valqs" in kwargs and len(kwargs["valqs"]) > 0:
self.eval_ds["val"] = self.init_dataset(kwargs["valqs"], False)
self.samples["val"] = kwargs["valqs"]
if self.eval_epoch < self.max_epochs:
# if "valqs" in kwargs and len(kwargs["valqs"]) > 0:
# pass
if "testqs" in kwargs and len(kwargs["testqs"]) > 0:
if len(kwargs["testqs"]) > 400:
ns = int(len(kwargs["testqs"]) / 10)
random.seed(42)
testqs = random.sample(kwargs["testqs"], ns)
else:
testqs = kwargs["testqs"]
self.eval_ds["test"] = self.init_dataset(testqs,
False)
self.samples["test"] = testqs
if "evalqs" in kwargs and len(kwargs["eval_qdirs"]) > 0:
eval_qdirs = kwargs["eval_qdirs"]
for ei, cur_evalqs in enumerate(kwargs["evalqs"]):
evalqname = eval_qdirs[ei]
if "job" in evalqname:
evalqname = "JOB"
elif "imdb" in evalqname:
evalqname = "CEB-IMDb"
if len(cur_evalqs) > 400:
ns = int(len(cur_evalqs) / 10)
random.seed(42)
cur_evalqs = random.sample(cur_evalqs, ns)
self.eval_ds[evalqname] = self.init_dataset(cur_evalqs,
False)
self.true_costs[evalqname] = 0.0
self.samples[evalqname] = cur_evalqs
# self.true_costs["jobm"] = 0.0
# TODO: initialize self.num_features
self.net, self.optimizer = self.init_net(self.trainds[0])
model_size = self.num_parameters()
print("""Training samples: {}, Model size: {}""".
format(len(self.trainds), model_size))
if "flow" in self.loss_func_name:
self.update_flow_training_info()
if self.training_opt == "swa":
self.swa_net = AveragedModel(self.net)
# self.swa_start = self.swa_start
self.swa_scheduler = SWALR(self.optimizer, swa_lr=self.opt_lr)
if self.max_epochs == -1:
total_epochs = 1000
else:
total_epochs = self.max_epochs
if self.early_stopping:
eplosses = []
pct_chngs = []
for self.epoch in range(0,total_epochs):
if self.epoch % self.eval_epoch == 0 \
and self.epoch != 0:
self.periodic_eval()
self.train_one_epoch()
self.model_weights.append(copy.deepcopy(self.net.state_dict()))
# TODO: needs to decide if we should stop training
if self.early_stopping == 1:
if "val" in self.eval_ds:
ds = self.eval_ds["val"]
else:
ds = self.eval_ds["train"]
preds, ys = self._eval_ds(ds)
losses = self.loss_func(torch.from_numpy(preds), torch.from_numpy(ys))
eploss = torch.mean(losses).item()
if len(eplosses) >= 1:
pct = 100* ((eploss-eplosses[-1])/eplosses[-1])
pct_chngs.append(pct)
eplosses.append(eploss)
if len(pct_chngs) > 5:
trailing_chng = np.mean(pct_chngs[-5:-1])
if trailing_chng > -0.1:
print("Going to exit training at epoch: ", self.epoch)
break
elif self.early_stopping == 2:
self.periodic_eval()
ppc_rel = self.all_errs[-1]['PostgresPlanCost-C-Relative-val']
if len(eplosses) >= 1:
pct = 100* ((ppc_rel-eplosses[-1])/eplosses[-1])
pct_chngs.append(pct)
eplosses.append(ppc_rel)
if self.epoch > 2 and pct_chngs[-1] > 1:
print(eplosses)
print(pct_chngs)
# print(eplosses[-5:-1])
# print(pct_chngs[-5:-1])
# revert to model before this epoch's training
print("Going to exit training at epoch: ", self.epoch)
self.best_model_epoch = self.epoch-1
break
if self.training_opt == "swa":
torch.optim.swa_utils.update_bn(self.trainloader, self.swa_net)
if self.best_model_epoch != -1:
print("""training done, will update our model based on validation set""")
assert len(self.model_weights) > 0
self.net.load_state_dict(self.model_weights[self.best_model_epoch])
# self.nets[0].load_state_dict(self.best_model_dict)
# self.nets[0].eval()
def _eval_ds(self, ds, samples=None):
torch.set_grad_enabled(False)
if self.training_opt == "swa":
net = self.swa_net
else:
net = self.net
# important to not shuffle the data so correct order preserved!
loader = data.DataLoader(ds,
batch_size=5000, shuffle=False,
# collate_fn=self.collate_fn
)
allpreds = []
allys = []
for (xbatch,ybatch,info) in loader:
ybatch = ybatch.to(device, non_blocking=True)
if self.mask_unseen_subplans:
start = time.time()
pf_mask = torch.from_numpy(self.featurizer.pred_onehot_mask).float()
jf_mask = torch.from_numpy(self.featurizer.join_onehot_mask).float()
tf_mask = torch.from_numpy(self.featurizer.table_onehot_mask).float()
for ci,curnode in enumerate(info["node"]):
if not curnode in self.seen_subplans:
if self.featurizer.pred_features:
xbatch["pred"][ci] = xbatch["pred"][ci] * pf_mask
if self.featurizer.join_features:
xbatch["join"][ci] = xbatch["join"][ci] * jf_mask
if self.featurizer.table_features:
xbatch["table"][ci] = xbatch["table"][ci] * tf_mask
# print("masking unseen subplans took: ", time.time()-start)
if self.subplan_level_outputs:
pred = net(xbatch).squeeze(1)
idxs = torch.zeros(pred.shape,dtype=torch.bool)
for i, nt in enumerate(info["num_tables"]):
if nt >= 10:
nt = 10
nt -= 1
idxs[i,nt] = True
pred = pred[idxs]
else:
pred = net(xbatch).squeeze(1)
allpreds.append(pred)
allys.append(ybatch)
preds = torch.cat(allpreds).detach().cpu().numpy()
ys = torch.cat(allys).detach().cpu().numpy()
torch.set_grad_enabled(True)
if self.heuristic_unseen_preds == "pg" and samples is not None:
newpreds = []
query_idx = 0
for sample in samples:
node_keys = list(sample["subset_graph"].nodes())
if SOURCE_NODE in node_keys:
node_keys.remove(SOURCE_NODE)
node_keys.sort()
for subq_idx, node in enumerate(node_keys):
cards = sample["subset_graph"].nodes()[node]["cardinality"]
idx = query_idx + subq_idx
est_card = preds[idx]
# were all columns in this subplan + constants seen in the
# training set?
print(node)
pdb.set_trace()
preds = np.array(newpreds)
pdb.set_trace()
return preds, ys
def _get_onehot_mask(self, vec):
tmask = ~ | np.array(vec, dtype="bool") | numpy.array |
import os
import numpy as np
from itertools import chain
from pathlib import Path
from functools import wraps, partial
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from scipy.optimize import curve_fit
from astropy.stats import mad_std
from astropy.wcs import WCS
from astropy.table import Table
from astropy.io import fits
from .database.api import get_scan
from .kiss_data import KissData
from .kids_plots import show_contmap
from kidsdata.settings import CALIB_DIR
plt.ion()
__all__ = ["beammap", "contmap", "contmap_coadd", "check_pointing", "skydip"]
def read_scan(scan, array=None, extra_data=None):
"""Read all common and some additionnal extra
Parameters
----------
scan : int
the scan number
array : str (A|B|None)
read only one array (default: all)
extra_data : list of str
list of extra data to read
Returns
-------
kd : KissRawData
the corresponding object
"""
kd = KissData(get_scan(scan))
list_data = kd.names.DataSc + kd.names.DataUc
# Do not use F_sky_* from file....
remove_Uc = ["F_sky_Az", "F_sky_El"]
remove_Ud = ["F_tel_Az", "F_tel_El"]
for item in chain(remove_Uc, remove_Ud):
if item in list_data:
list_data.remove(item)
# Add extra data at read time directly... otherwise there is a core dump...
if extra_data is not None:
list_data = list_data + extra_data
list_detector = kd.get_list_detector(array, flag=0, typedet=1)
# Read data
kd.read_data(list_data=list_data, list_detector=list_detector, silent=True)
return kd
# Following David Baezly generic pattern
# https://rednafi.github.io/digressions/python/2020/05/13/python-decorators.html
def kd_or_scan(func=None, array=None, extra_data=None):
"""Decorator to allow functions to be call with a scan number or kd object """
if func is None:
return partial(kd_or_scan, array=array, extra_data=extra_data)
@wraps(func)
def wrapper(scan, *args, **kwargs):
# If scan number given, read the scan into the object and pass it to function
if isinstance(scan, (int, np.int, np.int64)):
scan = read_scan(scan, array=array, extra_data=extra_data)
return func(scan, *args, **kwargs)
return wrapper
@kd_or_scan(array=None, extra_data=["I", "Q"])
def beammap(kd: KissData):
"""Display a beammap.
Parameters
----------
kd : `kissdata.KissRawData` or int
the KissRawData object to check or scan number to read
Returns
-------
kd, (fig_beammap, fig_geometry)
return the read `kissdata.KissRawData`, as well as the beammap and geometry figures
"""
kd._KissRawData__check_attributes(["R0", "P0", "calfact", "F_sky_Az", "F_sky_El", "A_hours", "A_time_pps"])
# Compute & plot beammap
fig_beammap, (_, _, kidpar) = kd.plot_beammap(coord="pdiff", flatfield=None)
# Apply kidpar to dataset
kd._extended_kidpar = kidpar
# plot geometry
fig_geometry, fwhm = kd.plot_kidpar()
# Plot the combined map
# with default KIDs selection
kid_mask = kd._kids_selection()
ikid = np.where(kid_mask)[0]
if len(ikid) > 5:
fig_coadd, _ = kd.plot_contmap(coord="pdiff", ikid=ikid, cdelt=0.05)
else:
fig_coadd = None
return kd, (fig_beammap, fig_geometry, fig_coadd)
@kd_or_scan(array=None, extra_data=["I", "Q"])
def contmap(kd, e_kidpar="e_kidpar_median.fits", cm_func="kidsdata.common_mode.pca_filtering", **kwargs):
"""Display a continuum map.
Parameters
----------
kd : `kissdata.KissRawData` or int
the KissRawData object to check or scan number to read
Returns
-------
kd, (fig_beammap, fig_geometry)
return the read `kissdata.KissRawData`, as well as the beammap and geometry figures
"""
kd._KissRawData__check_attributes(
["R0", "P0", "calfact", "mask_tel", "F_sky_Az", "F_sky_El", "A_hours", "A_time_pps"]
)
kd._extended_kidpar = Table.read(Path(CALIB_DIR) / e_kidpar)
# kids selection
kid_mask = kd._kids_selection(std_dev=0.3)
ikid_KA = np.where(kid_mask & np.char.startswith(kd.list_detector, "KA"))[0]
ikid_KB = np.where(kid_mask & np.char.startswith(kd.list_detector, "KB"))[0]
ikid_KAB = np.concatenate([ikid_KA, ikid_KB])
# Compute & plot continuum map
fig, _ = kd.plot_contmap(
ikid=[ikid_KA, ikid_KB, ikid_KAB], coord="pdiff", flatfield="amplitude", cm_func=cm_func, **kwargs
)
return kd, fig
def contmap_coadd(scans, e_kidpar="e_kidpar_median.fits", cm_func="kidsdata.common_mode.pca_filtering", **kwargs):
"""Continuum coaddition of several scans.
Parameters
----------
scans : list of int
the list of scans to be coadd
e_kidpar: str
the extended kidpar filename to be used
cm_func : str
the continuum pipeline function to be used...
**kwargs:
... and its additionnal keyword
Returns
-------
fake_kd : namedtuple
a fake KissRawData to be used with kids_plot.show_contmap
fig : matplotlib.figure
the displayed figure
combined_map, combined_weights : astropy.io.fits.ImageHDU
the combined data and weights
Notes
-----
the resulting arguments can be displayed with show_contmap
>>> from kidsdata.kids_plots import show_contmap
>>> show_contmap(fake_kd, [combined_map], [combined_weights], None)
"""
# Define a common wcs/shape
cdelt = kwargs.get("cdelt", 0.01)
wcs = WCS(naxis=2)
wcs.wcs.ctype = ("OLON-SFL", "OLAT-SFL")
wcs.wcs.cdelt = (cdelt, cdelt)
wcs.wcs.cunit = ["deg", "deg"]
wcs.wcs.crpix = (100, 100)
shape = (200, 200)
results = []
for scan in scans:
kd = read_scan(scan, extra_data=["I", "Q"])
kd._KissRawData__check_attributes(["R0", "P0", "calfact", "F_sky_Az", "F_sky_El", "A_hours", "A_time_pps"])
kd._extended_kidpar = Table.read(Path(CALIB_DIR) / e_kidpar)
# kids selection
kid_mask = kd._kids_selection(std_dev=0.3)
ikid_KA = np.where(kid_mask & np.char.startswith(kd.list_detector, "KA"))[0]
ikid_KB = np.where(kid_mask & np.char.startswith(kd.list_detector, "KB"))[0]
ikid_KAB = | np.concatenate([ikid_KA, ikid_KB]) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 60
"""
Particles orbiting around a central point (with traces)
"""
import numpy as np
from vispy import gloo
from vispy import app
from vispy.gloo import gl
from vispy.util.transforms import perspective, translate, rotate
n, p = 250, 50
T = np.random.uniform(0, 2 * np.pi, n)
dT = np.random.uniform(50, 100, n) / 3000
position = np.zeros((n, 2), dtype=np.float32)
position[:, 0] = np.cos(T)
position[:, 1] = np.sin(T)
rot = np.random.uniform(0, 2 * np.pi, (n, 4)).astype(np.float32)
color = np.ones((n, 4), dtype=np.float32) * (1, 1, 1, 1)
u_size = 6
data = np.zeros(n * p, [('a_position', np.float32, 2),
('a_color', np.float32, 4),
('a_rot', np.float32, 4)])
data['a_position'] = np.repeat(position, p, axis=0)
data['a_color'] = | np.repeat(color, p, axis=0) | numpy.repeat |
import numpy as np
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
# Make dataset
# ground truth label: 0 or 1
# predict probs: (0, 1)
# logistic loss
def gen_sample_data():
sampleNo = 1000
mu = np.array([[1, 5]])
sigma = | np.array([[2, 0], [0, 3]]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import process_tc as pt
def get_quantiles(values):
stats = {}
stats["mean"], stats["median"] = | np.mean(values) | numpy.mean |
import numpy as np
import pytest
from ansys import dpf
from ansys.dpf.core import TimeFreqSupport, Model
from ansys.dpf.core import examples
from ansys.dpf.core import fields_factory
from ansys.dpf.core.common import locations
from ansys.dpf.core.check_version import meets_version, get_server_version
SERVER_VERSION_HIGHER_THAN_3_0 = meets_version(get_server_version(dpf.core._global_server()), "3.0")
@pytest.fixture()
def vel_acc_model(velocity_acceleration):
return dpf.core.Model(velocity_acceleration)
def test_get_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
assert res.n_sets == 5
assert res.get_frequency(0, 0) == 0.02
assert res.get_frequency(0, 1) == 0.04
assert res.get_frequency(cumulative_index=2) == 0.06
assert res.get_cumulative_index(0, 0) == 0
assert res.get_cumulative_index(freq=0.06) == 2
def test_model_time_freq_support(vel_acc_model):
timefreq = vel_acc_model.metadata.time_freq_support
assert str(timefreq.n_sets) in str(timefreq)
assert len(timefreq.time_frequencies.data) == timefreq.n_sets
expected_data = [0.02, 0.04, 0.06, 0.08, 0.1]
assert np.allclose(expected_data, timefreq.time_frequencies.data)
def test_get_frequencies_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
freq = res.time_frequencies
assert np.allclose(freq.data, [0.02, 0.04, 0.06, 0.08, 0.1])
assert freq.scoping.ids == [1]
def test_print_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
assert "Number of sets: 5" in str(res)
assert "Time (s)" in str(res)
assert "LoadStep" in str(res)
assert "Substep" in str(res)
def test_delete_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
res.__del__()
with pytest.raises(Exception):
res.get_frequence(0, 0)
def test_delete_auto_timefreqsupport(simple_rst):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(simple_rst)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
res1 = dpf.core.TimeFreqSupport(res._message)
res.__del__()
with pytest.raises(Exception):
res1.n_sets
def test_create_time_freq_support():
tfq = TimeFreqSupport()
assert tfq is not None
def test_update_time_freq_support_real_freq():
tfq = TimeFreqSupport()
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.time_frequencies = frequencies
frequencies_check = tfq.time_frequencies
assert np.allclose(frequencies.data, frequencies_check.data)
assert tfq.rpms is None
assert tfq.complex_frequencies is None
def test_update_time_freq_support_im_freq():
tfq = TimeFreqSupport()
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.complex_frequencies = frequencies
frequencies_check = tfq.complex_frequencies
assert np.allclose(frequencies.data, frequencies_check.data)
assert tfq.rpms is None
assert tfq.time_frequencies is None
def test_update_time_freq_support_rpms():
tfq = TimeFreqSupport()
rpm = fields_factory.create_scalar_field(3)
rpm.data = [0.1, 0.32, 0.4]
tfq.rpms = rpm
rpm_check = tfq.rpms
assert np.allclose(rpm.data, rpm_check.data)
assert tfq.time_frequencies is None
assert tfq.complex_frequencies is None
def test_update_time_freq_support_harmonic_indeces():
tfq = TimeFreqSupport()
harm = fields_factory.create_scalar_field(3)
harm.data = [0.1, 0.32, 0.4]
tfq.set_harmonic_indices(harm)
harm_check = tfq.get_harmonic_indices()
assert np.allclose(harm.data, harm_check.data)
assert tfq.time_frequencies is None
assert tfq.complex_frequencies is None
assert tfq.rpms is None
def test_update_time_freq_support_harmonic_indeces_with_num_stage():
tfq = TimeFreqSupport()
harm = fields_factory.create_scalar_field(3)
harm.data = [0.12, 0.32, 0.8]
tfq.set_harmonic_indices(harm, 2)
harm_check = tfq.get_harmonic_indices(2)
assert np.allclose(harm.data, harm_check.data)
assert tfq.time_frequencies is None
assert tfq.complex_frequencies is None
assert tfq.rpms is None
harm_check_2 = tfq.get_harmonic_indices(3)
assert harm_check_2 is None
harm_check_3 = tfq.get_harmonic_indices(0)
assert harm_check_3 is None
harm_check_4 = tfq.get_harmonic_indices()
assert harm_check_4 is None
def test_update_time_freq_support_real_freq_with_ds(velocity_acceleration):
model = Model(velocity_acceleration)
disp = model.results.displacement()
tfq = disp.outputs.fields_container().time_freq_support
assert tfq.time_frequencies is not None
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.time_frequencies = frequencies
frequencies_check = tfq.time_frequencies
assert np.allclose(frequencies.data, frequencies_check.data)
def test_append_step_1():
tfq = TimeFreqSupport()
frequencies = [0.1, 0.21, 1.0]
tfq.append_step(1, frequencies, rpm_value=2.0)
assert len(tfq.rpms.data) == 1
assert len(tfq.time_frequencies.data) == 3
assert tfq.rpms.location == locations.time_freq_step
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose(frequencies, tfq.time_frequencies.data)
assert np.allclose(2.0, tfq.rpms.data)
assert tfq.complex_frequencies is None
assert tfq.get_harmonic_indices() is None
frequencies2 = [1.1, 2.0]
tfq.append_step(1, frequencies2, rpm_value=2.0)
assert len(tfq.rpms.data) == 2
assert len(tfq.time_frequencies.data) == 5
assert tfq.rpms.location == locations.time_freq_step
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose(frequencies + frequencies2, tfq.time_frequencies.data)
assert np.allclose(2.0, tfq.rpms.data)
assert tfq.complex_frequencies is None
assert tfq.get_harmonic_indices() is None
def test_append_step_2():
tfq = TimeFreqSupport()
tfq.append_step(
1, [0.1, 0.21, 1.0], rpm_value=2.0, step_harmonic_indices=[1.0, 2.0, 3.0]
)
tfq.append_step(2, [1.1, 2.0], rpm_value=2.3, step_harmonic_indices=[1.0, 2.0])
tfq.append_step(3, [0.23, 0.25], rpm_value=3.0, step_harmonic_indices=[1.0, 2.0])
assert len(tfq.rpms.data) == 3
assert len(tfq.time_frequencies.data) == 7
assert len(tfq.get_harmonic_indices().data) == 7
assert tfq.rpms.location == locations.time_freq_step
assert tfq.get_harmonic_indices().location == locations.time_freq
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose(
[0.1, 0.21, 1.0, 1.1, 2.0, 0.23, 0.25], tfq.time_frequencies.data
)
assert np.allclose([2.0, 2.3, 3.0], tfq.rpms.data)
assert tfq.complex_frequencies is None
def test_append_step_3():
tfq = TimeFreqSupport()
tfq.append_step(
1,
[0.1, 0.21],
rpm_value=2.0,
step_harmonic_indices={1: [1.0, 2.0], 2: [3.0, 3.1]},
)
assert len(tfq.rpms.data) == 1
assert len(tfq.time_frequencies.data) == 2
assert len(tfq.get_harmonic_indices(1).data) == 2
assert len(tfq.get_harmonic_indices(2).data) == 2
assert tfq.get_harmonic_indices() is None
assert tfq.rpms.location == locations.time_freq_step
assert tfq.get_harmonic_indices(1).location == locations.time_freq
assert tfq.get_harmonic_indices(2).location == locations.time_freq
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose([1.0, 2.0], tfq.get_harmonic_indices(1).data)
assert np.allclose([3.0, 3.1], tfq.get_harmonic_indices(2).data)
assert tfq.complex_frequencies is None
def test_deep_copy_time_freq_support(velocity_acceleration):
model = Model(velocity_acceleration)
tf = model.metadata.time_freq_support
copy = tf.deep_copy()
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
def test_deep_copy_time_freq_support_harmonic():
model = Model(examples.download_multi_harmonic_result())
tf = model.metadata.time_freq_support
copy = tf.deep_copy()
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
assert tf.time_frequencies.unit == copy.time_frequencies.unit
assert | np.allclose(tf.complex_frequencies.data, copy.complex_frequencies.data) | numpy.allclose |
# !usr/bin/env python
# coding:utf-8
"""
训练线性模型
author: prucehuang
email: <EMAIL>
date: 2018/12/27
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge, Lasso, LogisticRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
X = 2 * np.random.rand(100, 1)
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
m = len(X_b)
y = 1 + 2 * X + np.random.randn(100, 1)
# 正规方程求解
def normal_equation():
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([0, 2, 0, 15])
# 方式一,使用numpy函数
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) # 上公式
print(theta_best) # [[3.95336514] [3.07080187]]
# 方式二,使用sklearn函数
lin_reg = LinearRegression()
lin_reg.fit(X, y)
print(lin_reg.intercept_, lin_reg.coef_) # [3.95336514] [[3.07080187]]
# 预测两个点 并将这两个点连成线段画出来
y_predict = X_new_b.dot(theta_best)
print(y_predict)
plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions")
plt.legend(loc="upper left", fontsize=14)
plt.show()
# 批量梯度下降实现
def plot_batch_gradient_descent(theta, eta, n_iterations=50, theta_path=None):
plt.plot(X, y, "b.")
for iteration in range(n_iterations):
if iteration < 10: # 只画前几次迭代的曲线
y_predict = X_new_b.dot(theta)
style = "b-" if iteration > 0 else "r--"
plt.plot(X_new, y_predict, style)
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.axis([0, 2, 0, 15])
plt.title(r"BGD $\eta = {}$".format(eta), fontsize=12)
def learning_schedule(t):
t0, t1 = 100, 2000
return t0 / (t + t1)
# 随机梯度下降实现,绘制第一轮的前十条预测曲线
def plot_stochastic_gradient_descent(theta, eta_init=0.1, n_iterations=50, theta_path=None):
plt.plot(X, y, "b.")
for iteration in range(n_iterations):
for i in range(m):
if i==0:
y_predict = X_new_b.dot(theta)
style = "b-" if iteration>0 else "r--"
plt.plot(X_new, y_predict, style)
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = eta_init*m/(iteration * m + i + m) # 定义一个衰减的学习率, 第一轮eta=eta_init
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.axis([0, 2, 0, 15])
plt.title(r"SGD $\eta = {}$".format(eta_init), fontsize=12)
# 小批量梯度下降
def plot_mine_batch_gradient_descent(theta, eta_init, n_iterations=80, minibatch_size=20, theta_path=None):
plt.plot(X, y, "b.")
t = 0
for iteration in range(n_iterations):
shuffled_indices = np.random.permutation(m)
X_b_shuffled = X_b[shuffled_indices]
y_shuffled = y[shuffled_indices]
for i in range(0, m, minibatch_size):
if i==0:
y_predict = X_new_b.dot(theta)
style = "b-" if iteration > 0 else "r--"
plt.plot(X_new, y_predict, style)
xi = X_b_shuffled[i:i + minibatch_size]
yi = y_shuffled[i:i + minibatch_size]
gradients = 2 / minibatch_size * xi.T.dot(xi.dot(theta) - yi)
eta = 100 * eta_init / (t + 1000) # 定义一个衰减的学习率, 第一轮eta=eta_init
t += 1
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 2, 0, 15])
plt.title(r"MBGD $\eta = {}$".format(eta_init), fontsize=12)
# 对比分析三种梯度下降
def gradient_descent():
plt.figure(figsize=(14, 12))
theta = np.random.randn(2, 1)
# 绘制不同的学习率给训练带来的影响图
# Batch Gradient Descent 批量梯度下降
theta_path_bgd = []
theta_bgd = theta.copy()
plt.subplot(331)
plot_batch_gradient_descent(theta_bgd, eta=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(332)
plot_batch_gradient_descent(theta_bgd, eta=0.1, theta_path=theta_path_bgd)
plt.subplot(333)
plot_batch_gradient_descent(theta_bgd, eta=0.5)
# Stochastic Gradient Descent 随机梯度下降
theta_path_sgd = []
theta_sgd = theta.copy()
plt.subplot(334)
plot_stochastic_gradient_descent(theta_sgd, eta_init=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(335)
plot_stochastic_gradient_descent(theta_sgd, eta_init=0.1, theta_path=theta_path_sgd)
plt.subplot(336)
plot_stochastic_gradient_descent(theta_sgd, eta_init=0.5)
# 直接求解随机梯度下降
# sgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1, random_state=42, tol=1e-3)
# sgd_reg.fit(X, y.ravel())
# print(sgd_reg.intercept_, sgd_reg.coef_)
# Mini-batch Gradient Descent 小批量梯度下降
theta_path_mbgd = []
theta_mbgd = theta.copy()
plt.subplot(337)
plot_mine_batch_gradient_descent(theta_mbgd, eta_init=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(338)
plot_mine_batch_gradient_descent(theta_mbgd, eta_init=0.1, theta_path=theta_path_mbgd)
plt.subplot(339)
plot_mine_batch_gradient_descent(theta_mbgd, eta_init=0.5)
# 绘制theta的改变路径
theta_path_bgd = np.array(theta_path_bgd)
theta_path_sgd = np.array(theta_path_sgd)
theta_path_mbgd = np.array(theta_path_mbgd)
plt.figure(figsize=(7, 4))
plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch")
plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic")
plt.plot(theta_path_mbgd[:, 0], theta_path_mbgd[:, 1], "g-+", linewidth=2, label="Mini-batch")
plt.legend(loc="upper left", fontsize=16)
plt.xlabel(r"$\theta_0$", fontsize=20)
plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0)
plt.axis([2.5, 4.5, 2.3, 3.9])
plt.show()
# 多项式回归, 使用Pipe来对比不同复杂度模型的表现
def polynomial_regression():
m = 100
X = 6 * np.random.rand(m, 1) - 3
X_new = np.linspace(-3, 3, 100).reshape(100, 1)
y = 0.5 * X ** 2 + X + 2 + np.random.randn(m, 1)
for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
std_scaler = StandardScaler()
lin_reg = LinearRegression()
polynomial_regression = Pipeline([
("poly_features", polybig_features), # x0, a, a**2
("std_scaler", std_scaler),
("lin_reg", lin_reg),
])
polynomial_regression.fit(X, y)
y_newbig = polynomial_regression.predict(X_new)
plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)
plt.plot(X, y, "b.", linewidth=3)
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([-3, 3, 0, 10])
plt.show()
# 画出模型随着样本量变化的训练、验证误差
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14) # not shown
# 通过学习曲线判断模型状态
def learning_curves():
plt.figure(figsize=(10, 4))
plt.subplot(121)
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
plt.axis([0, 80, 0, 3])
plt.title(r"underfitting_learning_curves_plot", fontsize=12)
plt.subplot(122)
std_scaler = StandardScaler()
polynomial_regression = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("std_scaler", std_scaler),
("lin_reg", LinearRegression()),
])
plot_learning_curves(polynomial_regression, X, y)
plt.axis([0, 80, 0, 3])
plt.title(r"overfitting_learning_curves_plot", fontsize=12)
plt.show()
# 画出模型预测的曲线,对比加上正则项的效果
def plot_model(X, y, X_new, model_class, polynomial, alphas, **model_kargs):
for alpha, style in zip(alphas, ("b-", "g--", "r:")):
model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression()
if polynomial:
model = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("std_scaler", StandardScaler()),
("regul_reg", model),
])
model.fit(X, y)
y_new_regul = model.predict(X_new)
lw = 3 if alpha > 0 else 2
plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha))
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left", fontsize=15)
plt.axis([0, 3, 0, 4])
# 对比正则项对模型的效果
def regularized_models():
m = 50
X = 3 * np.random.rand(m, 1)
y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5
X_new = np.linspace(0, 3, 100).reshape(100, 1)
plt.figure(figsize=(16, 8))
# Ridge Regularized
plt.subplot(221)
plot_model(X, y, X_new, Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.title('Ridge')
plt.subplot(222)
plot_model(X, y, X_new, Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42)
plt.title('Ridge Polynomial')
# Lasso Regularized
plt.subplot(223)
plot_model(X, y, X_new, Lasso, polynomial=False, alphas=(0, 0.1, 1), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.xlabel("$x_1$", fontsize=18)
plt.title('Lasso')
plt.subplot(224)
plot_model(X, y, X_new, Lasso, polynomial=True, alphas=(0, 10**-5, 1), tol=1, random_state=42)
plt.title('Lasso Polynomial')
plt.xlabel("$x_1$", fontsize=18)
plt.show()
'''
# L2惩罚项实现
# 方式一
ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
# 方式二
sgd_reg = SGDRegressor(max_iter=5, penalty="l2", random_state=42)
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
# 方式三
ridge_reg = Ridge(alpha=1, solver="sag", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
# L1惩罚项实现
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
# 弹性网络惩罚项实现
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5, random_state=42)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
'''
# 用控制迭代次数的办法来控制模型选择参数,选择验证集误差减少后即将增加的拐点时的模型(避免模型进一步训练后过拟合)
def early_stopping():
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1)
X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10)
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler()),
])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
sgd_reg = SGDRegressor(max_iter=2, # 用这个参数可以控制每轮训练几次,外配合总训练轮数就可以打印训练epoch-RMSE曲线
tol=1e-3,
penalty=None,
eta0=0.0005,
warm_start=True,
learning_rate="constant",
random_state=42)
n_epochs = 500
train_errors, val_errors = [], []
# 训练n_epochs,记录训练、验证集误差
for epoch in range(n_epochs):
sgd_reg.fit(X_train_poly_scaled, y_train)
y_train_predict = sgd_reg.predict(X_train_poly_scaled)
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
train_errors.append(mean_squared_error(y_train, y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
best_epoch = np.argmin(val_errors) # 返回最小值对应的数组下标
best_val_rmse = np.sqrt(val_errors[int(best_epoch)])
plt.annotate('Best model',
xy=(best_epoch, best_val_rmse),
xytext=(best_epoch, best_val_rmse + 1),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
)
best_val_rmse -= 0.03 # just to make the graph look better
plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2)
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="Validation set")
plt.plot( | np.sqrt(train_errors) | numpy.sqrt |
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Contains tests of the functions in summa.py.
"""
import functools
import jax
import jax.numpy as jnp
from jax import lax
import numpy as np
import pytest
from distla_core.utils import pops
from distla_core.blas.summa import summa
DTYPE = jnp.float32
AXIS_NAME = pops.AXIS_NAME
NROW = pops.NROWS
NCOL = pops.NCOLS
matrix_shapes = [(16, 16), (32, 16), (16, 32), (128, 128)]
p_szs = [3, 4, 8, 16]
precisions = [lax.Precision.DEFAULT, lax.Precision.HIGH, lax.Precision.HIGHEST]
@pytest.mark.parametrize("matrix_shape", matrix_shapes)
@pytest.mark.parametrize("p_sz", p_szs)
@pytest.mark.parametrize("precision", precisions)
def test_summa_TT(matrix_shape, p_sz, precision):
np.random.seed(10)
A = np.random.randn(*matrix_shape).astype(DTYPE)
B = np.random.randn(*matrix_shape).astype(DTYPE)
Ap = pops.distribute(A)
Bp = pops.distribute(B)
summa_f = functools.partial(
summa.summa,
p_sz=p_sz,
transpose_A=True,
transpose_B=True,
precision=precision)
with pytest.raises(NotImplementedError):
_ = jax.pmap(summa_f, axis_name=AXIS_NAME)(Ap, Bp)
@pytest.mark.parametrize("matrix_shape", matrix_shapes)
@pytest.mark.parametrize("p_sz", p_szs)
@pytest.mark.parametrize("precision", precisions)
def test_summa_TN(matrix_shape, p_sz, precision):
| np.random.seed(10) | numpy.random.seed |
# note: documentation not written yet
import time
import numba
import numpy as np
from scipy import optimize
from scipy import interpolate
from types import SimpleNamespace
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
class AiyagariModel:
############
# 1. setup #
############
def __init__(self, name="", **kwargs):
self.name = name
self.setup_parameters()
self.update_parameters(kwargs)
self.setup_primitive_functions()
self.setup_misc()
def setup_parameters(self):
# a. model parameters
self.beta = 0.96 # discount factor
self.delta = 0.08 # depreciation rate
self.sigma = 4 # crra coefficient
self.alpha = 1 / 3 # cobb-douglas coeffient
# b. solution
self.tol_cfunc_inf = 1e-6 # tolerance for consumption function
self.cfunc_maxiter = (
2000
) # maximum number of iterations when finding consumption function
# income
self.unemp_p = 0.05 # unemployment probability
self.unemp_b = 0.15 # unemployment benefits
self.Nz = 2 # number of productivity states
self.grid_z = np.array([0.90, 1.10]) # productivity values
self.trans_p_z = np.array(
[[0.95, 0.05], [0.05, 0.95]]
) # transition probabilities
# end-of-period assets grid
self.Na = 200
self.a_min = 0
self.a_max = 20
self.a_phi = 1.1
# cash-on-hand grid
self.Nm = 500
self.m_max = 20
self.m_phi = 1.1
# c. simulation
self.seed = 2018
# d. steady state
self.ss_R_tol = 1e-7 # tolerance for finding interest rate
self.ss_a0 = 4.0 # initial cash-on-hand (homogenous)
self.ss_simN = 50000 # number of households
self.ss_simT = 2000 # number of time-periods
self.ss_sim_burnin = 1000 # burn-in periods before calculating average savings
# e. transition path
self.transN = 50000 # number of households
self.transT = 200 # number of periods
self.trans_maxiter = 200
self.trans_tol = 1e-4 # tolerance for convergence
def update_parameters(self, kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def setup_primitive_functions(self):
eps = 1e-8
# a. utility function
if self.sigma == 1:
self.u = lambda c: np.log(np.fmax(c, eps))
else:
self.u = lambda c: np.fmax(c, eps) ** (1 - self.sigma) / (1 - self.sigma)
self.u_prime = lambda c: np.fmax(c, eps) ** (-self.sigma)
self.u_prime_inv = lambda x: x ** (-1 / self.sigma)
# b. production function
self.f = lambda k: np.fmax(k, eps) ** self.alpha
self.f_prime = lambda k: self.alpha * np.fmax(k, eps) ** (self.alpha - 1)
self.f_prime_inv = lambda x: (np.fmax(x, eps) / self.alpha) ** (
1 / (self.alpha - 1)
)
def setup_misc(self):
def nonlinspace(
min_val, max_val, num, phi
): # phi up, more points close to min_val
x = np.zeros(num)
x[0] = min_val
for i in range(1, num):
x[i] = x[i - 1] + (max_val - x[i - 1]) / (num - i) ** phi
return x
# a. grids
self.grid_a = nonlinspace(self.a_min, self.a_max, self.Na, self.a_phi)
self.grid_m = nonlinspace(0, self.m_max, self.Nm, self.m_phi)
# b. initial distribution of z
z_diag = np.diag(self.trans_p_z ** 1000)
self.ini_p_z = z_diag / np.sum(z_diag)
avg_z = np.sum(self.grid_z * self.ini_p_z)
self.grid_z = self.grid_z / avg_z # force mean one
# c. bounds on interst factor
self.R_high = 1 / self.beta + 0.005
self.R_low = 1 / self.beta - 0.005
# d. misc
self.c_transition_path = np.empty((1, 1, 1)) # raw allocate
######################
# 2. model functions #
######################
def R_func(self, k):
return 1 + self.f_prime(k) - self.delta
def w_func(self, k):
return self.f(k) - self.f_prime(k) * k
def w_from_R_func(self, R):
k = self.f_prime_inv(R - 1 + self.delta)
return self.w_func(k)
############
# 3. solve #
############
def solve_step(self, c_plus_interp, R, w):
c_func = []
for i_z in range(self.Nz):
# a. find next-period average marginal utility
avg_marg_u_plus = np.zeros(self.Na)
for i_zplus in range(self.Nz):
for u in [0, 1]:
# i. future cash-on-hand
if u == 0:
m_plus = R * self.grid_a + w * (
self.grid_z[i_zplus] - self.unemp_p * self.unemp_b
) / (1 - self.unemp_p)
else:
m_plus = R * self.grid_a + w * self.unemp_b
# ii. future consumption
c_plus = c_plus_interp[i_zplus](m_plus)
# iii. future marginal utility
marg_u_plus = self.u_prime(c_plus)
# iv. accumulate average marginal utility
weight = self.trans_p_z[i_z, i_zplus]
if u == 0:
weight *= 1 - self.unemp_p
else:
weight *= self.unemp_p
avg_marg_u_plus += weight * marg_u_plus
# b. find current consumption and cash-on-hand
c = self.u_prime_inv(R * self.beta * avg_marg_u_plus)
m = self.grid_a + c
m = np.insert(m, 0, 0) # add 0 in beginning
c = np.insert(c, 0, 0) # add 0 in beginning
# c. interpolate to common grid
c_raw_func = interpolate.RegularGridInterpolator(
[m], c, method="linear", bounds_error=False, fill_value=None
)
# d. construct interpolator at common grid
c_func_now = interpolate.RegularGridInterpolator(
[self.grid_m],
c_raw_func(self.grid_m),
method="linear",
bounds_error=False,
fill_value=None,
)
c_func.append(c_func_now)
return c_func
def solve_inf_horizon(self):
# a. initial guess (consume everything)
c_func_inf = []
for i_z in range(self.Nz):
# i. consume everything
m = self.grid_m
c = m
# ii. create linear interpolator
interp = interpolate.RegularGridInterpolator(
[m], c, method="linear", bounds_error=False, fill_value=None
)
# iii. append
c_func_inf.append(interp)
# b. solve household problem
diff_cfunc = np.inf
it = 0
while diff_cfunc > self.tol_cfunc_inf:
it += 1
# i. remember previous
c_func_inf_old = c_func_inf
# ii. solve one step further
c_func_inf = self.solve_step(c_func_inf_old, self.R_ss, self.w_ss)
# iii. maximum absolute difference
diff_cfunc = []
for i_z in range(self.Nz):
diff_cfunc.append(
np.amax(np.abs(c_func_inf_old[i_z].values - c_func_inf[i_z].values))
)
diff_cfunc = max(diff_cfunc)
# iv. do not reach 2000 iterations
if it > self.cfunc_maxiter:
break
# c. save interpolators
self.c_func_inf = c_func_inf
# d. save values
self.c_inf = np.empty((self.Nz, self.Nm))
for z in range(self.Nz):
self.c_inf[z, :] = c_func_inf[z].values
def solve_transition_path(self):
# a. allocate memory
self.c_func_transition_path = [None] * self.transT
self.c_transition_path = np.empty((self.transT, self.Nz, self.Nm))
# b. solve backwards along transition path
for t in reversed(range(self.transT)):
# i. solve
if t == self.transT - 1:
c_plus_func = self.c_func_inf
self.c_func_transition_path[t] = self.solve_step(
c_plus_func, self.R_ss, self.w_ss
)
else:
c_plus_func = self.c_func_transition_path[t + 1]
self.c_func_transition_path[t] = self.solve_step(
c_plus_func, self.sim_R[t + 1], self.sim_w[t + 1]
)
# ii. save values
for z in range(self.Nz):
self.c_transition_path[t, z, :] = self.c_func_transition_path[t][
z
].values
#############################
# 4. stationary equilibrium #
#############################
def check_supply_and_demand(self, R_ss_guess, a0, z0, print_results=False):
# a. prices
self.R_ss = R_ss_guess
self.w_ss = self.w_from_R_func(self.R_ss)
# b. solve infinite horizon problem
t0 = time.time()
self.solve_inf_horizon()
time_sol = time.time() - t0
# c. simulate
t0 = time.time()
# prices
self.ss_sim_R = self.R_ss * np.ones(self.ss_simT)
self.ss_sim_w = self.w_ss * np.ones(self.ss_simT)
# simulate
self.ss_sim_k, self.ss_sim_a, self.ss_sim_z = simulate(
a0,
z0,
self.ss_sim_R,
self.ss_sim_w,
self.ss_simN,
self.ss_simT,
self.grid_z,
self.grid_m,
self.c_inf,
self.trans_p_z,
self.unemp_p,
self.unemp_b,
self.c_transition_path,
0,
self.seed,
)
time_sim = time.time() - t0
# d. calculate difference
self.k_ss = np.mean(self.ss_sim_k[self.ss_sim_burnin :])
R_ss_implied = self.R_func(self.k_ss)
diff = R_ss_implied - R_ss_guess
# e. print results
if print_results:
print(
f" guess on R = {R_ss_guess:.5f} -> implied R = {R_ss_implied:.5f} (diff = {diff:8.5f})"
)
# print(f' time to solve = {time_sol:.1f}, time to simulate = {time_sim:.1f}')
return diff
def find_stationary_equilibrium(self, print_results=True):
print(f"find stationary equilibrium (R in [{self.R_low:.5f};{self.R_high:.5f}]")
# a. initial values
a0 = self.ss_a0 * np.ones(self.ss_simN)
z0 = np.zeros(self.ss_simN, dtype=np.int32)
z0[np.linspace(0, 1, self.ss_simN) > self.ini_p_z[0]] = 1
# b. find R_ss (first go)
self.R_ss = optimize.bisect(
self.check_supply_and_demand,
self.R_low,
self.R_high,
args=(a0, z0, print_results),
xtol=self.ss_R_tol * 100,
)
self.check_supply_and_demand(self.R_ss, a0, z0)
print(f" update initial distribution")
# b. find R_ss (second go)
a0 = np.copy(self.ss_sim_a)
z0 = np.copy(self.ss_sim_z)
self.R_ss = optimize.bisect(
self.check_supply_and_demand,
self.R_low,
self.R_high,
args=(a0, z0, print_results),
xtol=self.ss_R_tol,
)
self.check_supply_and_demand(self.R_ss, a0, z0)
print(f"steady state R = {self.R_ss:.5f} with k = {self.k_ss:.2f}")
######################
# 5. transition path #
######################
def find_transition_path(self, mu, **kwargs):
print("finding transition path")
# a. guess on interest rate
self.sim_R = np.zeros(self.transT)
for t in range(self.transT):
if t == 0:
self.sim_R[0] = self.R_func(np.mean(self.trans_sim_a0))
elif t < self.transT / 2:
self.sim_R[t] = self.sim_R[t - 1] + mu * (self.R_ss - self.sim_R[t - 1])
else:
self.sim_R[t] = self.R_ss
# b. update guess
count = 0
while True:
# i. implied wage path
self.sim_w = self.w_from_R_func(self.sim_R)
# ii. solve
self.solve_transition_path()
# iii. simulate
self.sim_k, self.sim_a, self.sim_z = simulate(
self.trans_sim_a0,
self.trans_sim_z0,
self.sim_R,
self.sim_w,
self.transN,
self.transT,
self.grid_z,
self.grid_m,
self.c_inf,
self.trans_p_z,
self.unemp_p,
self.unemp_b,
self.c_transition_path,
self.transT,
self.seed,
)
# iv. new R path
R_old = self.sim_R
R_new = self.R_func(self.sim_k)
# v. done or update
max_diff = np.amax(np.abs(R_new - R_old))
if max_diff < self.trans_tol or count >= self.trans_maxiter: # done
# raise Exception('transition path has not converged')
break
else: # update
self.sim_R = 0.9 * R_old + 0.1 * R_new
if count == 1 or count % 5 == 0:
print(f"{count:3d}: {max_diff:.8f}")
count += 1
# c. save
self.sim_R = R_new
self.sim_w = self.w_from_R_func(self.sim_R)
@numba.njit
def simulate(
a0,
z0,
sim_R,
sim_w,
simN,
simT,
grid_z,
grid_m,
c_inf,
trans_p_z,
unemp_p,
unemp_b,
c_transition_path,
transT,
seed,
):
np.random.seed(seed)
# 1. allocate
sim_a = np.zeros(simN)
sim_z = np.zeros(simN, np.int32)
sim_k = np.zeros(simT)
# 2. simulate
for t in range(simT):
draw = np.linspace(0, 1, simN)
| np.random.shuffle(draw) | numpy.random.shuffle |
#!/usr/bin/env python
from collections import defaultdict
import numpy
from numpy import array
from cogent3 import make_tree
from cogent3.cluster.UPGMA import (
UPGMA_cluster,
condense_matrix,
condense_node_order,
find_smallest_index,
inputs_from_dict_array,
upgma,
)
from cogent3.core.tree import PhyloNode
from cogent3.util.dict_array import DictArray, DictArrayTemplate, convert2DDict
from cogent3.util.unit_test import TestCase, main
Float = | numpy.core.numerictypes.sctype2char(float) | numpy.core.numerictypes.sctype2char |
import numpy as np
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
################################################################
N = 1000
################################################################
import matplotlib
matplotlib.rcParams.update({'text.usetex': False, 'font.family': 'stixgeneral', 'mathtext.fontset': 'stix',})
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
################################################################
anatomy_categories = ['Self', u'\u24D8\u2194\u24D9',u'\u24D8\u2192\u24D9', u'\u24D8\u2192\u24de\u2192\u24D9', u'\u24D8\u2190\u24de\u2192\u24D9', 'None']
################################################################
ttot = 1800
twindow, tbin = 0.5*1000, 0.001*1000
timecc = np.arange(-twindow, twindow+tbin, tbin)
##############################################################
def plot_cc(i, j, ax, direction):
position = i/2*(1999-i) +j
cc = ccorr[int(position)]
if direction=='f':ax.plot(timecc, cc, color = 'k')
if direction=='r':ax.plot(timecc, cc[::-1], color = 'k')
###############################################################
for net in ['small', 'random']:
ccorr = np.load('cc_%s.npy'%net, allow_pickle = 'true')
z = np.loadtxt('../../simulation_files/network_matrix_%s.dat'%net).astype(int)
rates = np.loadtxt('../%s/rates_and_cv.dat'%(net))[0]
isflat = np.loadtxt('data/Isflat_%s'%net)
anatomy = np.loadtxt('data/anatomy_categories_%s.txt'%net)
################################################################
################################################################
plt.close('all')
fig = plt.figure(figsize = [6,7])
gm = gridspec.GridSpec(185, 200, figure = fig)
ax = plt.subplot(gm[:,:])
axes = [plt.subplot(gm[j*40:j*40+25, i*100:i*100+80]) for j in range(5) for i in range(2)]
tuples = np.where(anatomy==1)
ntuples = len(tuples[0])
n=0
for k in range(100):
indx = np.random.randint(ntuples) #random couple
a,b = tuples[0][indx], tuples[1][indx]
i,j = np.min([a,b]), np.max([a,b])
if (isflat[i, j]==1):
print(n, i, j, isflat[i,j])
if rates[i] > rates[j]: plot_cc(i,j, axes[n], 'f')
else: plot_cc(i,j, axes[n], 'r')
n+=1
if n==10:break
for k, ax in enumerate(axes):
ax.axvline(x = 0, linestyle = '--', linewidth = 0.2, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticklabels([])
ax.set_xlim(-100, 100)
if k%2==0: ax.set_ylabel('Rate (Hz)', fontsize = 9)
if k in [8, 9]:
ax.set_xlabel('Time (ms)', fontsize = 9)
ax.set_xticklabels([-100, -50, 0, 50, 100])
plt.figtext(0.5, 0.96, net + ' %s'%anatomy_categories[1], fontsize = 24, ha = 'center')
fig.subplots_adjust(left = 0.12, bottom = 0.06, right = 0.98, top = 0.9)
plt.savefig('CC_examples_%s_mutual.png'%net, dpi = 300)
################################################################
#################################################################
plt.close('all')
fig = plt.figure(figsize = [6,7])
gm = gridspec.GridSpec(185, 200, figure = fig)
ax = plt.subplot(gm[:,:])
axes = [plt.subplot(gm[j*40:j*40+25, i*100:i*100+80]) for j in range(5) for i in range(2)]
tuples = np.where(anatomy==2)
ntuples = len(tuples[0])
n=0
for k in range(100):
indx = np.random.randint(ntuples) #random couple
a,b = tuples[0][indx], tuples[1][indx]
i,j = np.min([a,b]), np.max([a,b])
if (isflat[i, j]==1):
print(n, i, j, isflat[i,j])
if z[i,j]==1: plot_cc(i,j, axes[n], 'f')
if z[j,i]==1: plot_cc(i,j, axes[n], 'r')
n+=1
if n==10:break
for k, ax in enumerate(axes):
ax.axvline(x = 0, linestyle = '--', linewidth = 0.2, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticklabels([])
ax.set_xlim(-100, 100)
if k%2==0: ax.set_ylabel('Rate (Hz)', fontsize = 9)
if k in [8, 9]:
ax.set_xlabel('Time (ms)', fontsize = 9)
ax.set_xticklabels([-100, -50, 0, 50, 100])
plt.figtext(0.5, 0.96, net + ' %s'%anatomy_categories[2], fontsize = 24, ha = 'center')
fig.subplots_adjust(left = 0.12, bottom = 0.06, right = 0.98, top = 0.9)
plt.savefig('CC_examples_%s_unidirectional.png'%net, dpi = 300)
#################################################################
#################################################################
plt.close('all')
fig = plt.figure(figsize = [6,7])
gm = gridspec.GridSpec(185, 200, figure = fig)
ax = plt.subplot(gm[:,:])
axes = [plt.subplot(gm[j*40:j*40+25, i*100:i*100+80]) for j in range(5) for i in range(2)]
tuples = np.where(anatomy==3)
ntuples = len(tuples[0])
n=0
for k in range(300):
indx = np.random.randint(ntuples) #random couple
a,b = tuples[0][indx], tuples[1][indx]
i,j = np.min([a,b]), np.max([a,b])
if (isflat[i, j]==1):
print(n, i, j, isflat[i,j])
if np.sum(z[i,:]*z[:,j])>=1:
axes[n].text(0.1, 1.00, '#(i>o>j) = %d'%np.sum(z[i,:]*z[:,j]), transform=axes[n].transAxes)
if np.sum(z[j,:]*z[:,i])>=1:
axes[n].text(0.6, 1.00, '#(j>o>i) = %d'%np.sum(z[j,:]*z[:,i]), transform=axes[n].transAxes)
if np.sum(z[:,j]*z[:,i])>=1:
axes[n].text(0.35, 1.24, '#(j<o>i) = %d'%np.sum(z[:,j]*z[:,i]), transform=axes[n].transAxes)
plot_cc(i,j, axes[n], 'f')
n+=1
if n==10:break
for k, ax in enumerate(axes):
ax.axvline(x = 0, linestyle = '--', linewidth = 0.2, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticklabels([])
ax.set_xlim(-100, 100)
if k%2==0: ax.set_ylabel('Rate (Hz)', fontsize = 9)
if k in [8, 9]:
ax.set_xlabel('Time (ms)', fontsize = 9)
ax.set_xticklabels([-100, -50, 0, 50, 100])
plt.figtext(0.5, 0.96, net + ' %s'%anatomy_categories[3], fontsize = 24, ha = 'center')
fig.subplots_adjust(left = 0.12, bottom = 0.06, right = 0.98, top = 0.9)
plt.savefig('CC_examples_%s_two_synapses_non_flat.png'%net, dpi = 300)
#################################################################
#################################################################
plt.close('all')
fig = plt.figure(figsize = [6,7])
gm = gridspec.GridSpec(185, 200, figure = fig)
ax = plt.subplot(gm[:,:])
axes = [plt.subplot(gm[j*40:j*40+25, i*100:i*100+80]) for j in range(5) for i in range(2)]
tuples = np.where(anatomy==4)
ntuples = len(tuples[0])
n=0
for k in range(3000):
indx = np.random.randint(ntuples) #random couple
a,b = tuples[0][indx], tuples[1][indx]
i,j = np.min([a,b]), np.max([a,b])
if (isflat[i, j]==1):
print(n, i, j, isflat[i,j])
if | np.sum(z[:,j]*z[:,i]) | numpy.sum |
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from fitstools import mask_fits, row_avg, manage_dtype, common_header, pad_array, display
from spectra import spectrum, interp_add, rmean_spectra, scale_spectra
from astropy.io import fits
class fibers:
def __init__(self, init_spectra={}, init_header=None):
self.spectra = dict(init_spectra)
self.header = init_header
def add_spectrum(self, fiber_num, spec):
self.spectra[fiber_num] = spec
def get_spectra(self, as_dict=False):
if as_dict:
return self.spectra
fiber_nums = sorted(self.spectra.keys())
return [self.spectra[fiber_num] for fiber_num in fiber_nums]
def get_spectrum(self, fiber_num):
return self.spectra[fiber_num]
def set_spectrum(self, fiber_num, spec):
self.spectra[fiber_num] = spec
def scale_spectra(self):
fiber_nums = sorted(self.spectra.keys())
sp_list = [self.spectra[fnum] for fnum in fiber_nums]
scaled_sp_list = scale_spectra(sp_list)
self.spectra = {fnum:sp for fnum,sp in zip(fiber_nums, scaled_sp_list)}
def get_fiber_numbers(self):
return sorted(self.spectra.keys())
def __getitem__(self, i):
return self.get_spectra()[i]
def __setitem__(self, i, new_spec):
fiber_nums = sorted(self.spectra.keys())
fiber_num = fiber_nums[i]
self.spectra[fiber_num] = new_spec
a = np.array(new_spec.get_flux())
def generate_header(self, headers):
self.header = common_header(headers)
if self.header == None:
self.header = fits.PrimaryHDU(np.array([])).header
for i, fnum in enumerate(sorted(self.spectra.keys())):
try:
apid = self.header['SLFIB'+str(int(fnum))]
if len(apid.split(' ')) >= 6:
apid = ' '.join(apid.split(' ')[4:])
apid = 'SLFIB'+str(int(fnum))+", "+apid
except TypeError:
apid = ''
self.header['APID'+str(int(i+1))] = apid
def save(self, savepath):
length = max([len(spec.get_flux()) for spec in self.get_spectra()])
flux_dat = np.asarray([pad_array(spec.get_flux(), np.nan, length) for spec in self.get_spectra()], dtype='float64')
flux = fits.PrimaryHDU(flux_dat, self.header)
flux.header['EXTNAME'] = 'FLUX'
wavelength_dat = np.asarray([pad_array(spec.get_wavelength(), np.nan, length) for spec in self.get_spectra()])
wavelength = fits.ImageHDU(wavelength_dat, self.header)
wavelength.header['EXTNAME'] = 'WAVELENGTH'
flux_err_dat = np.asarray([pad_array(spec.get_flux_err(), np.nan, length) for spec in self.get_spectra()])
flux_err = fits.ImageHDU(flux_err_dat, self.header)
flux_err.header['EXTNAME'] = 'FLUX_ERR'
f = fits.HDUList([flux, wavelength, flux_err])
f.writeto(savepath, clobber=True)
def extract_counts(img, fiber_mask, fiber_num):
'''
Function that extracts a 1D list of counts from a fiber.
ARGUMENTS
----------------------------------------------------------------------------
img: A 2D array containing count information for each fiber.
fiber_mask: A 2D array that specifies the locations of fibers on the image,
img.
fiber_num: The integer ID of the fiber to be extracted. This should be an
existing fiber in the fiber_mask.
'''
fiber = mask_fits(img, fiber_mask, fiber_num, reshape=True)
#display(img)
counts = row_avg(fiber)
return counts
def simple_extraction(fiber_mask, fiber_num, img, wvlsol):
'''
Function that extracts a 1D spectrum for a specified fiber.
ARGUMENTS:
----------------------------------------------------------------------------
fiber_mask: A 2D array that specifies the locations of fibers on the image,
img.
fiber_num: The integer ID of the fiber to be extracted. This should be an
existing fiber in the fiber_mask.
img: A 2D array containing count information for each fiber.
wvlsol: A 2D array containing wavelength information for each fiber.
'''
#Extract the fiber from both the wavelength solution and the image.
fiber_counts = mask_fits(img, fiber_mask, fiber_num, reshape=True)
fiber_wvlsol = mask_fits(wvlsol, fiber_mask, fiber_num, reshape=True)
#Use the center of the fiber as the wavelength domain.
center_i = fiber_wvlsol.shape[1]//2
wavelength = fiber_wvlsol[:,center_i]
if wavelength[0] > wavelength[-1]:
wavelength = wavelength[::-1]
#After interpolating to the central wavelength domain, add up counts
# from each fiber slice.
wvlsol_slices = [fiber_wvlsol[:,i] for i in range(len(fiber_wvlsol[0]))]
counts_slices = [fiber_counts[:,i] for i in range(len(fiber_counts[0]))]
wavelength, flux = interp_add(*zip(wvlsol_slices, counts_slices), x_interp_i=center_i)
return spectrum(wavelength, flux)
@manage_dtype(with_header=[0])
def optimal_extraction(image, fiber_mask, profile_map, wvlsol=None, use_fibers=None):
if type(use_fibers)==type(None):
use_fibers = list({n for row in fiber_mask for n in row if n != 0})
#Get image header and data
header = image[1]
image = image[0]
#Take info from the header and use it to find out dark noise.
rn = header['RDNOISE']
gain = header['GAIN']
exptime = header['EXPTIME']
dark_noise = fits.open('calib/master_calib/dark_err.fits')[0].data
dark_noise /= gain
dark_noise *= exptime
#Make copy of header without SLFIB keywords.
h = header.copy()
SLFIB_keywords = [k for k in h.keys() if 'SLFIB' in k]
APINFO = {}
for kw in SLFIB_keywords:
try:
fnum = int(kw.split('SLFIB')[-1])
if fnum in use_fibers:
APINFO[fnum] = h[kw]
except ValueError:
pass
del h[kw]
res = fibers()
for fnum in use_fibers:
D = mask_fits(image, fiber_mask, maskval=fnum, reshape=True)
P = mask_fits(profile_map, fiber_mask, maskval=fnum, reshape=True)
dn = mask_fits(dark_noise, fiber_mask, maskval=fnum, reshape=True)
one = | np.ones_like(D) | numpy.ones_like |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 14:44:30 2022
@author: jstra
"""
import numpy as np
class Vector:
def __init__(self, components, units=None):
self.components = | np.array(components) | numpy.array |
"""
Copyright [2021] [<NAME>]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import numpy as np
import actor as act
import pygame
import random
# controls:
# w/s (while mousing over circle): increase/decrease circle radius
# a/d (while mousing over circle): decrease/increase circle speed
# up arrow/down arrow: increase/decrease all circle radii
# left arrow/right arrow: decrease/increase all circle speeds
# q/e: decrease/increase step size
act.h = 2
n = 200
screen_width, screen_height = 1920, 1080
res = (screen_width, screen_height)
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
done = False
clock = pygame.time.Clock()
msElapsed = clock.tick(30)
positions = act.actor_positions(n, res)
rand = [random.random() * 5 for i in range(n)]
rand_s = [random.random() * 5 ** 0.5 for i in range(n)]
actors = [
act.Actor(screen, np.array(positions[i]), act.actor_directions(positions, res)[i], s=1 + rand_s[i],
chars={'r': 10 + rand[i] ** 0.5, 'm': 1 + rand[i]},
res=res) for i in range(n)]
while not done:
pressed = pygame.key.get_pressed()
if pressed[pygame.K_RIGHT]:
for i in actors: i.v *= 1.05
if pressed[pygame.K_LEFT]:
for i in actors: i.v /= 1.05
if pressed[pygame.K_q]:
act.Actor.h /= 1.05
if pressed[pygame.K_e]:
act.Actor.h *= 1.05
if pressed[pygame.K_w]:
for i in actors:
if np.linalg.norm(i.x - np.array(pygame.mouse.get_pos())) < i.chars['r']:
i.chars['m'] *= 1.05
i.chars['r'] *= 1.05 ** 0.5
if pressed[pygame.K_s]:
for i in actors:
if np.linalg.norm(i.x - np.array(pygame.mouse.get_pos())) < i.chars['r']:
i.chars['m'] /= 1.05
i.chars['r'] /= 1.05 ** 0.5
if pressed[pygame.K_a]:
for i in actors:
if np.linalg.norm(i.x - np.array(pygame.mouse.get_pos())) < i.chars['r']:
i.v /= 1.05
if pressed[pygame.K_d]:
for i in actors:
if np.linalg.norm(i.x - np.array(pygame.mouse.get_pos())) < i.chars['r']:
i.v *= 1.05
if pressed[pygame.K_UP]:
for i in actors:
i.chars['m'] *= 1.05
i.chars['r'] *= 1.05 ** 0.5
if pressed[pygame.K_DOWN]:
for i in actors:
i.chars['m'] /= 1.05
i.chars['r'] /= 1.05 ** 0.5
screen.fill((10, 10, 10))
for i in actors: i.update()
a = | np.array([i.x for i in actors]) | numpy.array |
import tensorflow as tf
import datetime
import os
import h5py
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# decode the data
class DataDecoder():
def __init__(self, trainset, testset):
self.trainset = trainset
self.testset = testset
print('DataDecoder Initialized:\nEncoded train data: '+self.trainset+'\nEncoded test data: '+self.testset+'\n')
def read_file(self, filepath):
f = h5py.File(filepath, "r")
color_codes, rgb, seg = f['color_codes'][:], f['rgb'][:], f['seg'][:]
return f, color_codes, rgb, seg
def decode_train_rgb(self):
_, _, rgb_train, _ = self.read_file(self.trainset)
num_train = rgb_train.shape[0]
for i in range(num_train):
image = Image.fromarray(rgb_train[i])
image.save('./Train_rgb/' + str(i) + '_rgb.png')
def decode_train_seg(self):
_, _, _, seg_train = self.read_file(self.trainset)
num_train = seg_train.shape[0]
for i in range(num_train):
image = Image.fromarray(np.squeeze((seg_train[i]))) # You have to squeeze it !
image.save('./Train_seg/' + str(i) + '_seg.png')
def decode_test_rgb(self):
_, _, rgb_test, _ = self.read_file(self.testset)
num_test = rgb_test.shape[0]
for i in range(num_test):
image = Image.fromarray(rgb_test[i])
image.save('./Test_rgb/' + str(i) + '_rgb.png')
def decode_test_seg(self):
_, _, _, seg_test = self.read_file(self.testset)
num_test = seg_test.shape[0]
for i in range(num_test):
image = Image.fromarray( | np.squeeze(seg_test[i]) | numpy.squeeze |
import re
import numpy as np
from django.core.mail import send_mail
from django.db.models.functions import Lower
from django.template import loader
import slack
from djangoProject.settings import SLACK_TOKEN
from .models import BadgeCategory, Reward
from django.db.models import Count, Sum
import datetime
from django.utils import timezone
def collect_titles(badges):
titles = []
for badge in badges:
titles.append(badge.badges.title)
return set(titles)
def collect_badges(user):
badge = user.reward_set.all()
titles = collect_titles(badges=badge)
rewards = []
count = []
for title in titles:
rewards.append(user.reward_set.filter(badges__title=title).first())
count.append(user.reward_set.filter(badges__title=title).count())
return rewards, count
def get_house_points(house):
points = 0
for team in house.teams.all():
for member in team.members.all():
points += member.get_point()
return points
def get_house_data(houses):
for house in houses:
house.points = get_house_points(house)
house.save()
def get_team_points(teams):
points = 0
for member in teams.members.all():
points += member.get_point()
return points
def get_team_data(teams):
for team in teams:
team.points = get_team_points(team)
team.save()
def email_check(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if re.search(regex, email):
return True
else:
return None
def send_reward_mail(array):
email = array[0]
timestamp = array[1]
awarded_by = array[2]
description = array[3]
badge = array[4]
name = array[5]
logo = array[6]
html_message = loader.render_to_string(
'email/message.html',
{
'image': logo,
'name': name,
'badge': badge,
'awarded': awarded_by,
'reason': description,
})
subject = f'A {badge} Badge from {awarded_by}'
comment = f'''
Dear {name},
Congratulations.
You have been awarded with {badge} by {awarded_by} for {description}. Please visit your profile page on
Sushiksha Website to see the badge. Badges are an amazing way to express your feelings to fellow sophists.
Congrats once again,
Best Wishes,
Convener
Sushiksha
Alumni Mentoring Programme
World Konkani Centre
'''
send_mail(subject, comment, None, [email], html_message=html_message)
print("email sent")
def send_reward_slack(array):
timestamp = array[1]
awarded_by = array[2]
description = array[3]
badge = array[4]
name = array[5]
image = array[6]
message = {
'channel': '#sushiksha-badges',
"blocks": [
{
"type": "divider"
},
{
"type": "header",
"text": {
"type": "plain_text",
"text": "Congratulations " + name + " 🎉"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*From:* " + awarded_by + "\n*Badge Given:* " + badge + "\n*Message:*\n" + description
},
"accessory": {
"type": "image",
"image_url": image,
"alt_text": badge
}
},
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": ":wkc-badge1: <https://sushiksha.konkanischolarship.com/user/rewards/|Sushiksha Badges>"
}
]
},
]
}
client_obj = slack.WebClient(token=SLACK_TOKEN)
client_obj.chat_postMessage(**message)
print("slack message sent")
def format_result(result, headers):
output = []
array = | np.array(result) | numpy.array |
#!/usr/bin/env python
import sys, os
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
from Proj import rd
title = ""
ofstr = 'metar_map'
# x1=west, x2=east, y1=south, y2=north
x1 = -122
x2 = -45
y1 = 22
y2 = 50
lon, lat, step = -100.0, 50.0, 20
fig = plt.figure()
ax = plt.subplot(111)
plt.title(title)
m = Basemap(resolution='l',projection='laea',
llcrnrlat=y1,urcrnrlat=y2,llcrnrlon=x1,urcrnrlon=x2,
lon_0=lon,lat_0=lat,lat_ts=(y1+y2)/2)
#m.shadedrelief()
#m.bluemarble()
m.drawmapboundary(fill_color='#76A6CC')
m.fillcontinents(color='tan', lake_color='#76A6CC')
#m.drawrivers(color='#76A6CC')
m.drawcoastlines()
m.drawcountries()
m.drawparallels( | np.arange(15,70,5.) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
Department of Energy/National Nuclear Security Administration. All rights in the program are
reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
Security Administration. The Government is granted for itself and others acting on its behalf a
nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
others to do so.
LANL software release C19112
Author: <NAME>
"""
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.pyplot as plt
from itertools import combinations, chain
from scipy.special import comb
from collections import namedtuple
from pathos.multiprocessing import ProcessingPool as Pool
import time
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--', color='red')
pos = lambda a: (abs(a) + a) / 2 # same as max(0,a)
def const(signs, knots):
"""Get max value of BASS basis function, assuming 0-1 range of inputs"""
cc = np.prod(((signs + 1) / 2 - signs * knots))
if cc == 0:
return 1
return cc
def makeBasis(signs, vs, knots, xdata):
"""Make basis function using continuous variables"""
cc = const(signs, knots)
temp1 = pos(signs * (xdata[:, vs] - knots))
if len(signs) == 1:
return temp1 / cc
temp2 = np.prod(temp1, axis=1) / cc
return temp2
def normalize(x, bounds):
"""Normalize to 0-1 scale"""
return (x - bounds[:, 0]) / (bounds[:, 1] - bounds[:, 0])
def unnormalize(z, bounds):
"""Inverse of normalize"""
return z * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
def comb_index(n, k):
"""Get all combinations of indices from 0:n of length k"""
# https://stackoverflow.com/questions/16003217/n-d-version-of-itertools-combinations-in-numpy
count = comb(n, k, exact=True)
index = np.fromiter(chain.from_iterable(combinations(range(n), k)),
int, count=count * k)
return index.reshape(-1, k)
def dmwnchBass(z_vec, vars_use):
"""Multivariate Walenius' noncentral hypergeometric density function with some variables fixed"""
alpha = z_vec[vars_use - 1] / sum(np.delete(z_vec, vars_use))
j = len(alpha)
ss = 1 + (-1) ** j * 1 / (sum(alpha) + 1)
for i in range(j - 1):
idx = comb_index(j, i + 1)
temp = alpha[idx]
ss = ss + (-1) ** (i + 1) * sum(1 / (temp.sum(axis=1) + 1))
return ss
Qf = namedtuple('Qf', 'R bhat qf')
def getQf(XtX, Xty):
"""Get the quadratic form y'X solve(X'X) X'y, as well as least squares beta and cholesky of X'X"""
try:
R = sp.linalg.cholesky(XtX, lower=False) # might be a better way to do this with sp.linalg.cho_factor
except np.linalg.LinAlgError as e:
return None
dr = np.diag(R)
if len(dr) > 1:
if max(dr[1:]) / min(dr) > 1e3:
return None
bhat = sp.linalg.solve_triangular(R, sp.linalg.solve_triangular(R, Xty, trans=1))
qf = np.dot(bhat, Xty)
return Qf(R, bhat, qf)
def logProbChangeMod(n_int, vars_use, I_vec, z_vec, p, maxInt):
"""Get reversibility factor for RJMCMC acceptance ratio, and also prior"""
if n_int == 1:
out = (np.log(I_vec[n_int - 1]) - np.log(2 * p) # proposal
+ np.log(2 * p) + np.log(maxInt))
else:
x = np.zeros(p)
x[vars_use] = 1
lprob_vars_noReplace = np.log(dmwnchBass(z_vec, vars_use))
out = (np.log(I_vec[n_int - 1]) + lprob_vars_noReplace - n_int * np.log(2) # proposal
+ n_int * np.log(2) + np.log(comb(p, n_int)) + np.log(maxInt)) # prior
return out
CandidateBasis = namedtuple('CandidateBasis', 'basis n_int signs vs knots lbmcmp')
def genCandBasis(maxInt, I_vec, z_vec, p, xdata):
"""Generate a candidate basis for birth step, as well as the RJMCMC reversibility factor and prior"""
n_int = int(np.random.choice(range(maxInt), p=I_vec) + 1)
signs = np.random.choice([-1, 1], size=n_int, replace=True)
# knots = np.random.rand(n_int)
knots = np.zeros(n_int)
if n_int == 1:
vs = np.random.choice(p)
knots = np.random.choice(xdata[:, vs], size=1)
else:
vs = np.sort(np.random.choice(p, size=n_int, p=z_vec, replace=False))
for i in range(n_int):
knots[i] = np.random.choice(xdata[:, vs[i]], size=1)
basis = makeBasis(signs, vs, knots, xdata)
lbmcmp = logProbChangeMod(n_int, vs, I_vec, z_vec, p, maxInt)
return CandidateBasis(basis, n_int, signs, vs, knots, lbmcmp)
BasisChange = namedtuple('BasisChange', 'basis signs vs knots')
def genBasisChange(knots, signs, vs, tochange_int, xdata):
"""Generate a condidate basis for change step"""
knots_cand = knots.copy()
signs_cand = signs.copy()
signs_cand[tochange_int] = np.random.choice([-1, 1], size=1)
knots_cand[tochange_int] = np.random.choice(xdata[:, vs[tochange_int]], size=1) # np.random.rand(1)
basis = makeBasis(signs_cand, vs, knots_cand, xdata)
return BasisChange(basis, signs_cand, vs, knots_cand)
class BassPrior:
"""Structure to store prior"""
def __init__(self, maxInt, maxBasis, npart, g1, g2, s2_lower, h1, h2, a_tau, b_tau, w1, w2):
self.maxInt = maxInt
self.maxBasis = maxBasis
self.npart = npart
self.g1 = g1
self.g2 = g2
self.s2_lower = s2_lower
self.h1 = h1
self.h2 = h2
self.a_tau = a_tau
self.b_tau = b_tau
self.w1 = w1
self.w2 = w2
return
class BassData:
"""Structure to store data"""
def __init__(self, xx, y):
self.xx_orig = xx
self.y = y
self.ssy = sum(y * y)
self.n = len(xx)
self.p = len(xx[0])
self.bounds = np.zeros([self.p, 2])
for i in range(self.p):
self.bounds[i, 0] = np.min(xx[:, i])
self.bounds[i, 1] = np.max(xx[:, i])
self.xx = normalize(self.xx_orig, self.bounds)
return
Samples = namedtuple('Samples', 's2 lam tau nbasis nbasis_models n_int signs vs knots beta')
Sample = namedtuple('Sample', 's2 lam tau nbasis nbasis_models n_int signs vs knots beta')
class BassState:
"""The current state of the RJMCMC chain, with methods for getting the log posterior and for updating the state"""
def __init__(self, data, prior):
self.data = data
self.prior = prior
self.s2 = 1.
self.nbasis = 0
self.tau = 1.
self.s2_rate = 1.
self.R = 1
self.lam = 1
self.I_star = np.ones(prior.maxInt) * prior.w1
self.I_vec = self.I_star / np.sum(self.I_star)
self.z_star = np.ones(data.p) * prior.w2
self.z_vec = self.z_star / np.sum(self.z_star)
self.basis = np.ones([data.n, 1])
self.nc = 1
self.knots = np.zeros([prior.maxBasis, prior.maxInt])
self.signs = np.zeros([prior.maxBasis, prior.maxInt],
dtype=int) # could do "bool_", but would have to transform 0 to -1
self.vs = np.zeros([prior.maxBasis, prior.maxInt], dtype=int)
self.n_int = np.zeros([prior.maxBasis], dtype=int)
self.Xty = np.zeros(prior.maxBasis + 2)
self.Xty[0] = np.sum(data.y)
self.XtX = np.zeros([prior.maxBasis + 2, prior.maxBasis + 2])
self.XtX[0, 0] = data.n
self.R = np.array([[np.sqrt(data.n)]]) # np.linalg.cholesky(self.XtX[0, 0])
self.R_inv_t = np.array([[1 / np.sqrt(data.n)]])
self.bhat = np.mean(data.y)
self.qf = pow(np.sqrt(data.n) * np.mean(data.y), 2)
self.count = np.zeros(3)
self.cmod = False # has the state changed since the last write (i.e., has a birth, death, or change been accepted)?
return
def log_post(self): # needs updating
"""get current log posterior"""
lp = (
- (self.s2_rate + self.prior.g2) / self.s2
- (self.data.n / 2 + 1 + (self.nbasis + 1) / 2 + self.prior.g1) * np.log(self.s2)
+ np.sum(np.log(abs(np.diag(self.R)))) # .5*determinant of XtX
+ (self.prior.a_tau + (self.nbasis + 1) / 2 - 1) * np.log(self.tau) - self.prior.a_tau * self.tau
- (self.nbasis + 1) / 2 * np.log(2 * np.pi)
+ (self.prior.h1 + self.nbasis - 1) * np.log(self.lam) - self.lam * (self.prior.h2 + 1)
) # curr$nbasis-1 because poisson prior is excluding intercept (for curr$nbasis instead of curr$nbasis+1)
# -lfactorial(curr$nbasis) # added, but maybe cancels with prior
self.lp = lp
return
def update(self):
"""Update the current state using a RJMCMC step (and Gibbs steps at the end of this function)"""
move_type = np.random.choice([1, 2, 3])
if self.nbasis == 0:
move_type = 1
if self.nbasis == self.prior.maxBasis:
move_type = np.random.choice(np.array([2, 3]))
if move_type == 1:
## BIRTH step
cand = genCandBasis(self.prior.maxInt, self.I_vec, self.z_vec, self.data.p, self.data.xx)
if (cand.basis > 0).sum() < self.prior.npart: # if proposed basis function has too few non-zero entries, dont change the state
return
ata = np.dot(cand.basis, cand.basis)
Xta = np.dot(self.basis.T, cand.basis)
aty = np.dot(cand.basis, self.data.y)
self.Xty[self.nc] = aty
self.XtX[0:self.nc, self.nc] = Xta
self.XtX[self.nc, 0:(self.nc)] = Xta
self.XtX[self.nc, self.nc] = ata
qf_cand = getQf(self.XtX[0:(self.nc + 1), 0:(self.nc + 1)], self.Xty[0:(self.nc + 1)])
fullRank = qf_cand != None
if not fullRank:
return
alpha = .5 / self.s2 * (qf_cand.qf - self.qf) / (1 + self.tau) + np.log(self.lam) - np.log(self.nc) + np.log(
1 / 3) - np.log(1 / 3) - cand.lbmcmp + .5 * np.log(self.tau) - .5 * np.log(1 + self.tau)
if np.log(np.random.rand()) < alpha:
self.cmod = True
# note, XtX and Xty are already updated
self.nbasis = self.nbasis + 1
self.nc = self.nbasis + 1
self.qf = qf_cand.qf
self.bhat = qf_cand.bhat
self.R = qf_cand.R
self.R_inv_t = sp.linalg.solve_triangular(self.R, np.identity(self.nc))
self.count[0] = self.count[0] + 1
self.n_int[self.nbasis - 1] = cand.n_int
self.knots[self.nbasis - 1, 0:(cand.n_int)] = cand.knots
self.signs[self.nbasis - 1, 0:(cand.n_int)] = cand.signs
self.vs[self.nbasis - 1, 0:(cand.n_int)] = cand.vs
self.I_star[cand.n_int - 1] = self.I_star[cand.n_int - 1] + 1
self.I_vec = self.I_star / sum(self.I_star)
self.z_star[cand.vs] = self.z_star[cand.vs] + 1
self.z_vec = self.z_star / sum(self.z_star)
self.basis = np.append(self.basis, cand.basis.reshape(self.data.n, 1), axis=1)
elif move_type == 2:
## DEATH step
tokill_ind = np.random.choice(self.nbasis)
ind = list(range(self.nc))
del ind[tokill_ind + 1]
qf_cand = getQf(self.XtX[np.ix_(ind, ind)], self.Xty[ind])
fullRank = qf_cand != None
if not fullRank:
return
I_star = self.I_star.copy()
I_star[self.n_int[tokill_ind] - 1] = I_star[self.n_int[tokill_ind] - 1] - 1
I_vec = I_star / sum(I_star)
z_star = self.z_star.copy()
z_star[self.vs[tokill_ind, 0:self.n_int[tokill_ind]]] = z_star[self.vs[tokill_ind,
0:self.n_int[tokill_ind]]] - 1
z_vec = z_star / sum(z_star)
lbmcmp = logProbChangeMod(self.n_int[tokill_ind], self.vs[tokill_ind, 0:self.n_int[tokill_ind]], I_vec,
z_vec, self.data.p, self.prior.maxInt)
alpha = .5 / self.s2 * (qf_cand.qf - self.qf) / (1 + self.tau) - np.log(self.lam) + np.log(self.nbasis) + np.log(
1 / 3) - np.log(1 / 3) + lbmcmp - .5 * np.log(self.tau) + .5 * np.log(1 + self.tau)
if np.log(np.random.rand()) < alpha:
self.cmod = True
self.nbasis = self.nbasis - 1
self.nc = self.nbasis + 1
self.qf = qf_cand.qf
self.bhat = qf_cand.bhat
self.R = qf_cand.R
self.R_inv_t = sp.linalg.solve_triangular(self.R, np.identity(self.nc))
self.count[1] = self.count[1] + 1
self.Xty[0:self.nc] = self.Xty[ind]
self.XtX[0:self.nc, 0:self.nc] = self.XtX[ | np.ix_(ind, ind) | numpy.ix_ |
"""
Contains functions for generating prior pseudo-samples and maximizing weighted log likelihood in logistic regression examples
Uses scipy optimize for gradient descent
"""
import numpy as np
import copy
import scipy as sp
from scipy.stats import bernoulli
def sampleprior(x,N_data,D_covariate,T_trunc,B_postsamples): #sample prior pseudo-samples
ind_x = np.random.randint(low = 0, high = N_data, size = (B_postsamples,T_trunc)) #sample x indices with replacement
x_prior = x[ind_x]
y_prior = bernoulli.rvs(0.5, size = (B_postsamples,T_trunc))
return y_prior, x_prior
def func(beta,weights,y,x,a,b,gamma): #calculate weighted loss
N_data = np.shape(y)[0]
D_covariate = np.shape(x)[1]
z = np.dot(x,beta[0:D_covariate])+ beta[D_covariate]
logeta = -np.logaddexp(0,-z)
lognegeta = -np.logaddexp(0,z)
loglik_i = y*logeta + (1-y)*lognegeta
loglik = np.sum(loglik_i * weights)
k = -gamma * ((2*a + 1)/2)*np.sum(np.log(1+ (1/(2*b))*beta[0:D_covariate]**2))
return -(loglik +k)
def grad(beta,weights,y,x,a,b,gamma): #calculate weighted loss gradient
N_data = np.shape(y)[0]
D_covariate = np.shape(x)[1]
z = np.dot(x,beta[0:D_covariate])+ beta[D_covariate]
err = ((y - sp.special.expit(z))*weights)
gradient = np.zeros(D_covariate+1)
gradient[0:D_covariate] = | np.dot(err,x) | numpy.dot |
from numpy import hstack
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.ensemble import GradientBoostingRegressor as GBR
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor as MLP
from .EnsembleSearch import EnsembleSearch
class MMFFBlending:
def __init__(self,X_train, y_train, X_test, y_test,
models=[('mlp', MLP()), ('rfr', RFR()), ('gbr', GBR()), ('svm', SVR())],
blender = DecisionTreeRegressor()):
self._X_train = X_train
self._y_train = y_train
self._X_test = X_test
self._y_test = y_test
self._models = models
self._blender = blender
def set_models(self, models=[]):
self._models = models
# get a list of base models
def get_models(self):
return self._models
# fit the blending ensemble
def fit_ensemble(self, models):
# fit all models on the training set and predict on hold out set
meta_X = list()
for _, model in models:
# fit in training set
model.fit(self._X_train, self._y_train)
# predict on hold out set
yhat = model.predict(self._X_train)
# reshape predictions into a matrix with one column
yhat = yhat.reshape(len(yhat), 1)
# store predictions as input for blending
meta_X.append(yhat)
# create 2d array from predictions, each set is an input feature
meta_X = | hstack(meta_X) | numpy.hstack |
import numpy as np
import math
import sys
from timeit import default_timer as timer
sys.path.append("../../")
from core import wnn
from encoding import mushroom
from encoding import hamming_code
#Load Mushroom data
base_path = "../../dataset/mushroom/"
#2/3 Test
train_data, train_label, test_data, test_label = mushroom.load_3data(base_path)
nominal_length = [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8]
nominal_length2 = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
train_bin = []
test_bin = []
i = 0
for data in train_data:
train_bin.append(np.array([], dtype=bool))
for a in range(len(data)):
#binarr = hamming_code.get_code(data[a], nominal_length[a])
p = data[a] * nominal_length2[a]
binarr = np.zeros(nominal_length2[a]*len(mushroom.nominal_att[a]), dtype=bool)
for b in range(nominal_length2[a]):
binarr[p] = 1
p += 1
train_bin[i] = np.append(train_bin[i], binarr)
i += 1
i = 0
for data in test_data:
test_bin.append(np.array([], dtype=bool))
for a in range(len(data)):
#binarr = hamming_code.get_code(data[a], nominal_length[a])
p = data[a] * nominal_length2[a]
binarr = np.zeros(nominal_length2[a]*len(mushroom.nominal_att[a]), dtype=bool)
for b in range(nominal_length2[a]):
binarr[p] = 1
p += 1
test_bin[i] = np.append(test_bin[i], binarr)
i += 1
#K-fold
folds_train_bin = []
folds_test_bin = []
folds_train_label = []
folds_test_label = []
k = 10
for i in range(k):
aux_train_data, aux_train_label, aux_test_data, aux_test_label = mushroom.load_fold(base_path, i)
folds_train_label.append(aux_train_label)
folds_test_label.append(aux_test_label)
folds_train_bin.append([])
folds_test_bin.append([])
j = 0
for data in aux_train_data:
folds_train_bin[i].append([])
folds_train_bin[i][j].append(np.array([], dtype=bool))
for a in range(len(data)):
binarr = hamming_code.get_code(data[a], nominal_length[a])
#p = data[a] * nominal_length2[a]
#binarr = np.zeros(nominal_length2[a]*len(mushroom.nominal_att[a]), dtype=bool)
#for b in range(nominal_length2[a]):
# binarr[p] = 1
# p += 1
folds_train_bin[i][j] = np.append(folds_train_bin[i][j], binarr)
j += 1
j = 0
for data in aux_test_data:
folds_test_bin[i].append([])
folds_test_bin[i][j].append(np.array([], dtype=bool))
for a in range(len(data)):
binarr = hamming_code.get_code(data[a], nominal_length[a])
#p = data[a] * nominal_length2[a]
#binarr = np.zeros(nominal_length2[a]*len(mushroom.nominal_att[a]), dtype=bool)
#for b in range(nominal_length2[a]):
# binarr[p] = 1
# p += 1
folds_test_bin[i][j] = np.append(folds_test_bin[i][j], binarr)
j += 1
#Parameters
num_classes = 2
tuple_bit = 20
test_length = len(test_label)
num_runs = 20
acc_list = []
training_time = []
testing_time = []
dacc_list = []
dtraining_time = []
dtesting_time = []
bacc_list = []
btraining_time = []
btesting_time = []
entry_size = len(train_bin[0])
#Wisard
for r in range(num_runs):
wisard = wnn.Wisard(entry_size, tuple_bit, num_classes)
#Training
start = timer()
wisard.train(train_bin, train_label)
training_time.append(timer() - start)
#Testing
start = timer()
rank_result = wisard.rank(test_bin)
testing_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == test_label[i]:
if not (rank_result[i] ^ test_label[i]):
num_hits += 1
acc_list.append(float(num_hits)/float(test_length))
wisard_stats = wisard.stats()
del wisard
#DictWisard
for r in range(num_runs):
dwisard = wnn.DictWisard(entry_size, tuple_bit, num_classes)
#Training
start = timer()
dwisard.train(train_bin, train_label)
dtraining_time.append(timer() - start)
#Testing
start = timer()
rank_result = dwisard.rank(test_bin)
dtesting_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == test_label[i]:
if not (rank_result[i] ^ test_label[i]):
num_hits += 1
dacc_list.append(float(num_hits)/float(test_length))
dwisard_stats = dwisard.stats()
del dwisard
#Bloom Wisard
#capacity = len(train_label)
capacity = 100
error = 0.1
errors = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
b_stats = []
b_training_time = []
b_testing_time = []
b_acc = []
b_error = []
for e in range(len(errors)):
btraining_time = []
btesting_time = []
bacc_list = []
for r in range(num_runs):
bwisard = wnn.BloomWisard(entry_size, tuple_bit, num_classes, capacity, error=errors[e])
#Training
start = timer()
bwisard.train(train_bin, train_label)
btraining_time.append(timer() - start)
#Testing
start = timer()
rank_result = bwisard.rank(test_bin)
btesting_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == test_label[i]:
if not (rank_result[i] ^ test_label[i]):
num_hits += 1
bacc_list.append(float(num_hits)/float(test_length))
b_training_time.append(btraining_time)
b_testing_time.append(btesting_time)
b_acc.append(bacc_list)
b_stats.append(bwisard.stats())
b_error.append(bwisard.error())
#bwisard_stats = bwisard.stats()
#berror = bwisard.error()
del bwisard
#K-fold cross validation ---------------------------------------------------------
#Wisard
test_length = len(folds_test_label[0])
kf_training_time = []
kf_testing_time = []
kf_wacc_list = []
for r in range(num_runs):
for f in range(k):
wisard = wnn.Wisard(entry_size, tuple_bit, num_classes)
#Training
start = timer()
wisard.train(folds_train_bin[f], folds_train_label[f])
kf_training_time.append(timer() - start)
#Testing
start = timer()
rank_result = wisard.rank(folds_test_bin[f])
kf_testing_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == folds_test_label[f][i]:
if not (rank_result[i] ^ folds_test_label[f][i]):
num_hits += 1
kf_wacc_list.append(float(num_hits)/float(test_length))
kf_wisard_stats = wisard.stats()
del wisard
#DictWisard
kf_dtraining_time = []
kf_dtesting_time = []
kf_dacc_list = []
for r in range(num_runs):
for f in range(k):
dwisard = wnn.DictWisard(entry_size, tuple_bit, num_classes)
#Training
start = timer()
dwisard.train(folds_train_bin[f], folds_train_label[f])
kf_dtraining_time.append(timer() - start)
#Testing
start = timer()
rank_result = dwisard.rank(folds_test_bin[f])
kf_dtesting_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == folds_test_label[f][i]:
if not (rank_result[i] ^ folds_test_label[f][i]):
num_hits += 1
kf_dacc_list.append(float(num_hits)/float(test_length))
kf_dwisard_stats = dwisard.stats()
del dwisard
#Bloom Wisard
#capacity2 = len(folds_train_label[0])
capacity2 = 100
error = 0.1
kf_btraining_time = []
kf_btesting_time = []
kf_bacc_list = []
kb_stats = []
kb_training_time = []
kb_testing_time = []
kb_acc = []
kb_error = []
for e in range(len(errors)):
kf_btraining_time = []
kf_btesting_time = []
kf_bacc_list = []
for r in range(num_runs):
for f in range(k):
bwisard = wnn.BloomWisard(entry_size, tuple_bit, num_classes, capacity2, error=errors[e])
#Training
start = timer()
bwisard.train(folds_train_bin[f], folds_train_label[f])
kf_btraining_time.append(timer() - start)
#Testing
start = timer()
rank_result = bwisard.rank(folds_test_bin[f])
kf_btesting_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == folds_test_label[f][i]:
if not (rank_result[i] ^ folds_test_label[f][i]):
num_hits += 1
kf_bacc_list.append(float(num_hits)/float(test_length))
kb_training_time.append(kf_btraining_time)
kb_testing_time.append(kf_btesting_time)
kb_acc.append(kf_bacc_list)
kb_stats.append(bwisard.stats())
kb_error.append(bwisard.error())
#kf_bwisard_stats = bwisard.stats()
#kf_berror = bwisard.error()
del bwisard
#Writing output file
with open("stats.csv", "w") as out:
out.write("WNN; Entry size; Tuple size; # Rams; Capacity; Error; # Hashes; Ram size; # Discriminators; Total Bits; Acc(%); Acc Std; Training(s); Training Std; Testing(s); Testing Std; Runs;\n")
out.write("Wisard;" + str(entry_size) + ";" + str(tuple_bit) + ";" + str(wisard_stats[0]) + ";;;;" + str(wisard_stats[1]) + ";" + str(num_classes) + ";" + str(wisard_stats[3]) + ";")
out.write(str(np.mean(acc_list)) + ";" + str( | np.std(acc_list) | numpy.std |
#!/usr/bin/env python
######################################################
# Python script for analysis of dynamic rheology
# from Cognac Calculations
# 2007/2/20 <NAME>, <NAME>
# 2007/9/01 <NAME>
# 2016/4/18 H.Sasaki
######################################################
import numpy
from math import sin, cos, tan, pi, sqrt
import os
import gnuplot2
from time import sleep
#========== parameters ======================================================
# number of cycles to be omitted from the analysis
skip_cycles = 4
# make this smaller for higher accuracy
epsilon = 0.01
# maximum number of iteration
max_iter = 1000000
#========== functions =======================================================
#---------- private functions ----------
#---- initialize
def _initialize(udf,verbose=0):
global uobj, num_data, dt,interval, amp, freq, N, num_cycles, dphi
uobj = udf
# read simulation conditions
loc = "Simulation_Conditions.Dynamics_Conditions."
[dt, totalSteps, interval] = uobj.get(loc + "Time")
[amp, freq] = uobj.get(loc + "Deformation.Lees_Edwards.Dynamic")
# N = number of output per cycle
num_data = uobj.totalRecord() - 1 # no stress in record 0
tmp = 1.0/(interval*dt*freq)
N = int(round(tmp))
if abs(tmp - N) > 1e-3:
print("1/(interval*dt*freq) =",tmp,"should be (almost) integer")
if (num_data % N != 0):
print("num_data =",num_data,"should be multipe of N =",N)
return -1
num_cycles = int(num_data/N)
dphi = 2.0*pi/N
if verbose:
print("total_output:", uobj.totalRecord() - 1)
print("output_per_cycle:",N)
print("num_cycles:",num_cycles)
print("skip_cycles:",skip_cycles)
return 0
#---- read stress from UDF
def _read_stress():
global stress, strain, time
stress = [0.0]*num_data
strain = [0.0]*num_data
time = [0.0]*num_data
# read stress and strain
loc = "Statistics_Data.Stress."
for rec in range(1,uobj.totalRecord()): # no Stress in rec=0
uobj.jump(rec)
stress[rec-1] = uobj.get(loc+"Total.Batch_Average.xy")
strain_i = uobj.get("Structure.Unit_Cell.Shear_Strain")
if abs(strain_i - amp*sin(dphi*rec)) > 1e-5:
print("strain mismatch at rec =",rec)
return -1
for i in range(num_data):
time[i] = dt*(i + 0.5 + 0.5/interval)
strain[i] = amp*sin(dphi*(i + 0.5 + 0.5/interval))
return 0
def _average_stress():
global sxy, fit, shift, ss
# allocate arrays
sxy = numpy.zeros(N,float)
fit = numpy.zeros(N,float)
# calculate average stress sxy[]
for i in range(skip_cycles*N,num_data):
sxy[i % N] = sxy[i % N] + stress[i]
sxy = sxy/(num_cycles - skip_cycles)
# calculate average and amplitude of sxy
shift = numpy.add.reduce(sxy)/N
ss = numpy.dot(sxy-shift,sxy-shift)
#----- calculate sigma_0 and set fit[]
def _set_fit(delta):
global fit, sigma_0
for i in range(N):
fit[i] = 0.
for j in range(interval):
fit[i] = fit[i] + sin(dphi*(i+(j+1.)/interval)+delta)
fit /= interval
sigma_0 = sqrt(ss/numpy.dot(fit,fit))
fit = sigma_0*fit + shift
#----- find the best vallue of delta
def _find_delta(verbose=0):
global delta, err
# start from delta = 0
delta = 0.0
_set_fit(delta)
err = | numpy.dot(sxy-fit,sxy-fit) | numpy.dot |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mie solution
Calculates the electric field component (Ex) for a plane wave that
is scattered by a dielectric sphere.
Some of this code is a partial translation of the Matlab code from
<NAME>
http://www.mathworks.de/matlabcentral/fileexchange/30162-cylinder-scattering
"""
from __future__ import division
from __future__ import print_function
import numpy as np
from ._Classes import *
from ._Functions import *
__all__ = ["GetFieldSphere", "io_GetCartesianField2D" ]
def GetFieldSphere(radius, nmed, nsphe, lD, size, res):
sphere = DielectricMaterial(nsphe**2,0.0)
background = DielectricMaterial(nmed**2,0.0)
reference = DielectricMaterial(nmed**2,0.0) #HHH reference is medium...
lambref = reference.getElectromagneticWavelength(1.0)
xmax = size / res / 2.0
# the detector resolution is not dependent on the medium
detector = np.linspace(-xmax, xmax, size, endpoint=True) * lambref
sensor_location = np.zeros((3,size))
sensor_location[2] = lD*lambref # optical path length to detector
sensor_location[1] = detector
sensor_location[0] = detector #HHH 3D experience
return getDielectricSphereFieldUnderPlaneWave(radius*lambref,
sphere, background, sensor_location)
def getDielectricSphereFieldUnderPlaneWave(radius, sphere, background,
sensor_location, frequency=1):
"""
Calculate the field scattered by a dielectric sphere centered at
the origine due to an incident x-polarized plane wave. The
scattered field is in (11-239) in [Balanis1989]. see the notes
on 2008-05-24 for the coefficients, a_n, b_n, and c_n.
See Fig. 11-25 in [Balanis1989] for the exact geometry.
Input:
radius scalar to denote the radius of the sphere (m)
sphere object of DielectricMaterial
background object of DielectricMaterial
sensor_location 3x1 vector in the form of [x; y; z] (m)
frequency Nx1 vector in (Hz)
Output:
E_r Nx1 vector (V/m)
E_phi Nx1 vector (V/m)
E_theta Nx1 vector (V/m)
H_r Nx1 vector (A/m)
H_phi Nx1 vector (A/m)
H_theta Nx1 vector (A/m)
This function is a translation to Python from a matlab script
by <NAME>.
"""
#p = inputParser
#p.addRequired[int('radius')-1,int(isnumeric)-1]
#p.addRequired[int('sphere')-1,int(isobject)-1]
#p.addRequired[int('background')-1,int(isobject)-1]
#p.addRequired[int('sensor_location')-1,int(isvector)-1]
#p.addRequired[int('frequency')-1,int(isnumeric)-1]
#p.addParamValue[int('debug')-1,-1,lambda x: x == 0. or x == 1.]
#p.parse[int(radius)-1,int(sphere)-1,int(background)-1,int(sensor_location)-1,int(frequency)-1,varargin.cell[:]]
#if p.Results.debug:
# np.disp((p.Results))
# Compute all intrinsic variables
EPS_O = 8.8541878176*1e-12; #HHH ....this was not here before. changes results BY A LOT hmm.........
MU_O = 4*np.pi*1e-7; #HHH as above
#omega = 2.*np.pi*frequency
eta = background.getIntrinsicImpedance(frequency)
k = background.getElectromagneticWaveNumber(frequency)
mu = background.getComplexPermeability(frequency) * MU_O #HHH as mentioned, this multiplication was not being done.....
eps = background.getComplexPermittivity(frequency) * EPS_O #HHH as above
eta_d = sphere.getIntrinsicImpedance(frequency)
k_d = sphere.getElectromagneticWaveNumber(frequency)
mu_d = sphere.getComplexPermeability(frequency) * MU_O #HHH as above above
eps_d = sphere.getComplexPermittivity(frequency) * EPS_O #HHH as above above above
N = getN_max(radius, sphere, background, frequency)
#N = getN_max((p.Results.radius), cellarray(np.hstack((p.Results.sphere))), (p.Results.background), (p.Results.frequency))
#N = matcompat.max(N)
nu = np.arange(N) + 1
[r, theta, phi] = cart2sph(sensor_location[0], sensor_location[1], sensor_location[2])
#r.resize(len(r),1) #HHH This is used to flatten to a column. Deprecated because I changed cart2sph to build the variables in column already.
#theta.resize(len(theta),1) #HHH as above
#phi.resize(len(phi),1) #HHH as above above
# Compute coefficients
a_n = 1j**(-nu) * (2*nu+1) / (nu*(nu+1))
#a_n = np.dot(np.ones(nFreq, 1.), a_n)
# temp2 denotes the expression
# kzlegendre(nu,1,cos(theta))/sin(theta). Here I am using a
# recursive relation to compute temp2, which avoids the numerical
# difficulty when theta == 0 or PI.
temp2 = np.zeros((len(theta),len(nu))) #HHH matlab original: temp2 = np.zeros((len(nu), len(theta))) ###### changed to akin to matlab
temp2[:,0] = -1 #HHH all in first column
temp2[:,1] = (-3*np.cos(theta)).T #HHH matlab original: temp2(2) = -3*cos(theta) ##### Transverse or it doens't work. You need to replace a column with a row, figure that.
# if N = 10, then nu = [1,2,3,4,5,6,7,8,9,19]
for n in np.arange(len(nu)-2)+1:
# matlab: [2,3,4,5,6,7,8,9]
# python: [1,2,3,4,5,6,7,8]
temp2[:,n+1] = (2*n+1)/n * np.cos(theta).T*temp2[:,n] - (n+1)/n * temp2[:,n-1] #HHH matlab original: temp2(n+1) = (2*n+1)/n*cos(theta)*temp2(n) - (n+1)/n*temp2(n-1) ####selecting whole columns, using transverses properly
# temp1 denotes the expression
# sin(theta)*kzlegendre_derivative(nu,1,cos(theta)). Here I am
# also using a recursive relation to compute temp1 from temp2,
# which avoids numerical difficulty when theta == 0 or PI.
temp1 = np.zeros((len(theta), len(nu))) #HHH changed to keep matlab's structure.
temp1[:,0] = np.cos(theta).T
for n in np.arange(len(nu)-1)+1:
# matlab: [2,3,4,5,6,7,8,9,10] (index starts at 1)
# python: [1,2,3,4,5,6,7,8,9] (index starts at 0)
temp1[:,n-1] = (n+1) * temp2[:,n-1]-n*np.cos(theta).T*temp2[:,n]
#temp1 = np.dot(np.ones(nFreq, 1.), temp1)
#temp2 = np.dot(np.ones(nFreq, 1.), temp2)
#iNU = 10
#if p.Results.debug:
# A = np.array(np.vstack((np.hstack((ric_besselh_derivative(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-np.sqrt(np.dot(eps, mu)), np.sqrt(np.dot(eps_d, mu_d))), ric_besselj_derivative(iNU, np.dot(k_d, radius))))), np.hstack((ric_besselh(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-mu, mu_d), ric_besselj(iNU, np.dot(k_d, radius))))))))
# rhs = np.dot(-a_n[int(iNU)-1], np.array(np.vstack((np.hstack((ric_besselj_derivative(iNU, np.dot(k, radius)))), np.hstack((ric_besselj(iNU, np.dot(k, radius))))))))
# x = linalg.solve(A, rhs)
# np.disp(np.array(np.hstack(('b_n ', num2str(x[0]), d_n, num2str(x[1])))))
# A = np.array(np.vstack((np.hstack((ric_besselh(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-np.sqrt(np.dot(eps, mu)), np.sqrt(np.dot(eps_d, mu_d))), ric_besselj(iNU, np.dot(k_d, radius))))), np.hstack((ric_besselh_derivative(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-mu, mu_d), ric_besselj_derivative(iNU, np.dot(k_d, radius))))))))
# rhs = np.dot(-a_n[int(iNU)-1], np.array(np.vstack((np.hstack((ric_besselj(iNU, np.dot(k, radius)))), np.hstack((ric_besselj_derivative(iNU, np.dot(k, radius))))))))
# x = linalg.solve(A, rhs)
# np.disp(np.array(np.hstack(('c_n ', num2str(x[0]), e_n, num2str(x[1])))))
# np.disp('------')
#alpha = np.zeros((len(theta),len(nu))) #HHH In matlab, alpha is a row, with nu number values. since here r,theta,phi is a column, alpha has to be an array the size of (theta,nu), so it can include all the nus (in row) per value of r (in colum)
#print("alpha shape",np.shape(alpha))
#HHH initializing final result, and adding 0j so it's imaginary from the start
E_r = np.zeros(np.shape(r)) + 0j
E_theta = np.zeros(np.shape(r)) + 0j
E_phi = np.zeros(np.shape(r)) + 0j
H_r = np.zeros(np.shape(r)) + 0j
H_theta = np.zeros(np.shape(r)) + 0j
H_phi = np.zeros(np.shape(r)) + 0j
for elem in range(0,np.size(r)): #HHH gotta evaluate element by element in r (which is a column array)
if r[elem] < radius:
#num = j.*mu_d/sqrt(mu)*sqrt(eps_d);
num = 1j*mu_d/np.sqrt(mu)*np.sqrt(eps_d)
#print("num",num)
#den = - sqrt(mu. *eps_d) *ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselh_derivative(nu,2,k*radius))...
# + sqrt(mu_d.*eps) *ones(1,N). *transpose(ric_besselh(nu,2,k*radius)). *transpose(ric_besselj_derivative(nu,k_d*radius));
den = ( - (np.sqrt(mu * eps_d)*np.ones((1,N))) * np.transpose(ric_besselj(nu,k_d*radius)) * np.transpose(ric_besselh_derivative(nu,2,k*radius))
+ (np.sqrt(mu_d * eps )*np.ones((1,N))) * np.transpose(ric_besselh(nu,2,k*radius)) * np.transpose(ric_besselj_derivative(nu,k_d*radius)) )
#print("den",den)
#d_n = num*ones(1,N)./den.*a_n;
d_n = num*np.ones((1, N))/den*a_n
#den = + sqrt(mu.*eps_d) *ones(1,N). *transpose(ric_besselh(nu,2,k*radius)). *transpose(ric_besselj_derivative(nu,k_d*radius))...
# - sqrt(mu_d.*eps) *ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselh_derivative(nu,2,k*radius));
den = ( + (np.sqrt(mu * eps_d)*np.ones((1,N))) * np.transpose(ric_besselh(nu,2,k*radius)) * np.transpose(ric_besselj_derivative(nu,k_d*radius))
- (np.sqrt(mu_d * eps )*np.ones((1,N))) * np.transpose(ric_besselj(nu,k_d*radius)) * np.transpose(ric_besselh_derivative(nu,2,k*radius)) )
#e_n = num*ones(1,N)./den.*a_n;
e_n = num*np.ones((1, N))/den*a_n
x = k_d * r[elem] #HHH x of the current r[elem]
x=x[0] #HHH x should be integer... or problems
## Implement (11-239a) in [Balanis1989]
#alpha = (transpose(ric_besselj_derivative(nu,x,2))+transpose(ric_besselj(nu,x)))...
# .*transpose(kzlegendre(nu,1,cos(theta))*ones(1,nFreq));
alpha = ( (np.transpose(ric_besselh_derivative(nu, 2, x, 2)) + np.transpose(ric_besselh(nu, 2, x))) *
np.transpose(kzlegendre(nu, 1, np.cos(theta[elem]))) ) #HHH obviously, specific theta[elem] is used for alpha
# E_r = -j*cos(phi)*sum(d_n.*alpha, 2);
E_r[elem] = (-1j*np.cos(phi[elem]) * np.sum(d_n*alpha, 1))[0] #HHH use specific row of phi to get a single number
print("elem:",elem,"/",np.size(r), "is r:",r[elem],"/",radius,"with E_r:",E_r[elem])
#H_r = -j*sin(phi)*sum(e_n.*alpha, 2)./eta_d;
H_r[elem] = (-1j*np.sin(phi[elem]) * np.sum(e_n*alpha, 1)/eta_d)[0] #HHH use specific row of phi to get a single number
## Implement (11-239b) in [Balanis1989]
#alpha = transpose(ric_besselj_derivative(nu,x)).*temp1;
alpha = np.transpose(ric_besselj_derivative(nu, x))*temp1[elem]
#beta = transpose(ric_besselj(nu,x)).*temp2;
beta = np.transpose(ric_besselj(nu, x))*temp2[elem]
# summation = j*d_n.*alpha - e_n.*beta;
summation = 1j*d_n*alpha-e_n*beta
# E_theta = cos(phi)./x.*sum(summation,2);
E_theta[elem] = (np.cos(phi[elem])/x*np.sum(summation, 1))[0]
# summation = j*e_n.*alpha - d_n.*beta;
summation = 1j*e_n*alpha - d_n*beta
# H_theta = sin(phi)./x.*sum(summation,2)./eta_d;
H_theta[elem] = (np.sin(phi[elem])/x*np.sum(summation, 1)/eta_d)[0]
## Implement (11-239c) in [Balanis1989]
# alpha = transpose(ric_besselj_derivative(nu,x)).*temp2;
alpha = np.transpose(ric_besselj_derivative(nu, x))*temp2[elem]
# beta = transpose(ric_besselj(nu,x)).*temp1;
beta = np.transpose(ric_besselj(nu, x))*temp1[elem]
# summation = j*d_n.*alpha - e_n.*beta;
summation = 1j*d_n*alpha - e_n*beta
# E_phi = sin(phi)./x.*sum(summation,2);
E_phi[elem] = (np.sin(phi[elem])/x*np.sum(summation, 1))[0]
# summation = j*e_n.*alpha - d_n.*beta;
summation = 1j*e_n*alpha - d_n*beta
# H_phi =-cos(phi)./x.*sum(summation,2)./eta_d;
H_phi[elem] = (-np.cos(phi[elem])/x*np.sum(summation, 1)/eta_d)[0]
else:
# num = + sqrt(mu_d.*eps)*ones(1,N). *transpose(ric_besselj(nu,k*radius)) . *transpose(ric_besselj_derivative(nu,k_d*radius)) ...
# - sqrt(mu.*eps_d)*ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselj_derivative(nu,k*radius));
num = ( (np.sqrt(mu_d*eps)*np.ones((1, N))) * np.transpose(ric_besselj(nu, k*radius)) *np.transpose(ric_besselj_derivative(nu, k_d*radius))
-(np.sqrt(mu*eps_d)*np.ones((1, N))) * np.transpose(ric_besselj(nu, k_d*radius))*np.transpose(ric_besselj_derivative(nu, k*radius)) )
#den = + sqrt(mu.*eps_d)*ones(1,N). *transpose(ric_besselj(nu,k_d*radius)) *transpose(ric_besselh_derivative(nu,2,k*radius))...
# - sqrt(mu_d.*eps)*ones(1,N). *transpose(ric_besselh(nu,2,k*radius)). *transpose(ric_besselj_derivative(nu,k_d*radius));
den = ( (np.sqrt(mu*eps_d)*np.ones((1, N))) * np.transpose(ric_besselj(nu, k_d*radius)) * np.transpose(ric_besselh_derivative(nu, 2, k*radius))
-(np.sqrt(mu_d*eps)*np.ones((1, N))) * np.transpose(ric_besselh(nu, 2, k*radius)) * np.transpose(ric_besselj_derivative(nu, k_d*radius)))
#b_n = num./den.*a_n;
b_n = num/den*a_n
#num = + sqrt(mu_d.*eps)*ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselj_derivative(nu,k*radius))...
# - sqrt(mu.*eps_d)*ones(1,N). *transpose(ric_besselj(nu,k*radius)) . *transpose(ric_besselj_derivative(nu,k_d*radius));
num = ( (np.sqrt(mu_d*eps)*np.ones((1, N))) * np.transpose(ric_besselj(nu, k_d*radius)) * np.transpose(ric_besselj_derivative(nu, k*radius))
-(np.sqrt(mu*eps_d)*np.ones((1, N))) * np.transpose(ric_besselj(nu, k*radius)) * np.transpose(ric_besselj_derivative(nu, k_d*radius)) )
#den = + sqrt(mu.*eps_d)*ones(1,N). *transpose(ric_besselh(nu,2,k*radius)). *transpose(ric_besselj_derivative(nu,k_d*radius))...
# - sqrt(mu_d.*eps)*ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselh_derivative(nu,2,k*radius));
den = ( (np.sqrt(mu*eps_d)*np.ones((1, N))) * np.transpose(ric_besselh(nu, 2, k*radius)) * np.transpose(ric_besselj_derivative(nu, k_d*radius))
-(np.sqrt(mu_d*eps)* | np.ones((1, N)) | numpy.ones |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = | np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp) | numpy.array |
"""
this file contains the ugly code hacked together my overworked PhD students
to hide boring visualisation boilerplate code from users and keep the course interesting
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def horizontal_pane(I_list, cmap=None):
Z, axarr = plt.subplots(1, len(I_list))
i = 0
if isinstance(I_list, dict):
for name, value in I_list.items():
axarr[i].set_title(name)
if isinstance(value, dict):
I = value.get('image', None)
cmap = value.get('cmap', None)
if cmap:
axarr[i].imshow(I, plt.get_cmap(cmap))
else:
axarr[i].imshow(I, plt.get_cmap('gray'))
else:
axarr[i].imshow(value, plt.get_cmap(cmap))
i += 1
else:
for I in I_list:
if cmap is None:
cmap = 'gray'
axarr[i].imshow(I, plt.get_cmap(cmap))
i += 1
plt.show()
def overlay_RGB(I_list):
Z, axarr = plt.subplots(1, len(I_list)-1)
for i in range(len(I_list)-1):
overlay = numpy.zeros((I_list[i].shape) + (3,), dtype=float)
overlay[:, :, 0] = I_list[i].astype(float)
overlay[:, :, 1] = I_list[i+1].astype(float)
overlay[:, :, 2] = I_list[i].astype(
float)*0.5 + I_list[i+1].astype(float)*0.5
if (len(I_list)-1) == 1:
axarr.imshow(overlay/ | numpy.max(overlay) | numpy.max |
from __future__ import print_function, division
import collections
from copy import deepcopy
import numpy as np
from astropy import log as logger
from ..densities import FlaredDisk, AlphaDisk, PowerLawEnvelope, UlrichEnvelope, AmbientMedium
from ..util.interpolate import interp1d_fast_loglog
from ..util.constants import pi, sigma, c, G
from ..sources import SphericalSource, SpotSource
from ..util.functions import FreezableClass, virtual_file
from ..grid import SphericalPolarGrid, CylindricalPolarGrid
from . import Model
def _min_none(*args):
"Minimum of several arguments, ignoring None values"
return min(x for x in args if x is not None)
def _max_none(*args):
"Maximum of several arguments, ignoring None values"
return max(x for x in args if x is not None)
class Star(FreezableClass):
def __init__(self):
self.sources = {}
self.sources['star'] = SphericalSource(name='star')
self.mass = None
self.radius = None
self.limb = False
self._freeze()
@classmethod
def read(self, filename):
raise Exception("Can only call ``read`` for Model, not AnalyticalYSOModel")
def add_spot(self, *args, **kwargs):
self.sources['star'].add_spot(SpotSource(*args, **kwargs))
def __setattr__(self, attribute, value):
if self.isfinal():
raise Exception("Attribute %s can no longer be changed" % attribute)
if attribute in ['luminosity', 'temperature', 'spectrum']:
self.sources['star'].__setattr__(attribute, value)
return
elif attribute in ['radius', 'limb']:
for source in self.sources:
self.sources[source].__setattr__(attribute, value)
FreezableClass.__setattr__(self, attribute, value)
def __getattr__(self, attribute):
if attribute in ['luminosity', 'temperature', 'spectrum', 'radius', 'limb']:
return getattr(self.sources['star'], attribute)
else:
return FreezableClass.__getattr__(self, attribute)
def total_luminosity(self):
"Return the total luminosity of the star, including accretion"
ltot = 0.
for source in self.sources:
if self.sources[source].luminosity is not None:
ltot += self.sources[source].luminosity
return ltot
def effective_temperature(self):
"Return the effective temperature of the star, including accretion"
return (self.total_luminosity() / (4. * pi * self.radius ** 2. * sigma)) ** 0.25
def total_spectrum(self, bnu_range=None):
'''
Return the total spectrum of the star, including accretion
Parameters
----------
bnu_range : tuple
Range of frequencies to cover for sources that have Planck spectra
'''
# Retrieve all the spectra for the sources of emission
nu_all, fnu_all = [], []
for source in self.sources:
if self.sources[source].temperature is not None:
if bnu_range is None:
raise ValueError("bnu_range is needed for sources with Planck spectra")
nu, fnu = self.sources[source].get_spectrum(nu_range=bnu_range)
else:
nu, fnu = self.sources[source].get_spectrum()
nu_all.append(nu)
fnu_all.append(fnu)
# Find common minimum and maximum for all spectra
nu_min = np.min([nu.min() for nu in nu_all])
nu_max = np.max([nu.max() for nu in nu_all])
# Find common frequencies
nu_common = np.unique(np.sort(np.hstack(nu_all)))
nu_common = nu_common[(nu_common >= nu_min) & (nu_common <= nu_max)]
# Compute total spectrum
fnu_total = np.zeros(nu_common.shape)
for i in range(len(self.sources)):
fnu_total += interp1d_fast_loglog(nu_all[i], fnu_all[i], nu_common, bounds_error=False, fill_value=0.)
return nu_common, fnu_total
class AnalyticalYSOModel(Model):
def __init__(self, name=None):
"Initialize an analytical YSO model"
self.star = Star()
self.disks = []
self.envelopes = []
self.ambients = []
Model.__init__(self, name=name)
def add_density_grid(self, *args, **kwargs):
raise NotImplementedError("add_density_grid cannot be used for AnalyticalYSOModel")
def use_quantities(self, *args, **kwargs):
raise NotImplementedError("use_quantities cannot be used for AnalyticalYSOModel")
def use_geometry(self, *args, **kwargs):
raise NotImplementedError("use_geometry cannot be used for AnalyticalYSOModel")
# DENSITY COMPONENTS
def add_ambient_medium(self, subtract=[]):
'''
Add an ambient density medium to the model
Parameters
----------
subtract : list
Components to subtract from the ambient density medium (see
notes below).
Returns
-------
ambient : :class:`~hyperion.densities.AmbientMedium`
An :class:`~hyperion.densities.AmbientMedium` instance.
Examples
--------
To add an ambient medium to the model, you can do::
>>> ambient = m.add_ambient_medium()
then set the ambient medium properties using e.g.::
>>> from hyperion.util.constants import au, pc
>>> ambient.rho = 1.e-20 # cgs
>>> ambient.rmin = 0.1 * au # cm
>>> ambient.rmax = pc # cm
See the :class:`~hyperion.densities.AmbientMedium` documentation
to see which parameters can be set.
Notes
-----
By default, the ambient medium simply adds a constant density ``rho`` of
dust to the whole model between the inner and outer radius. However, it
is possible to pass components that should be subtracted from the
constant density using the ``subtract=`` argument. In the following
example::
>>> e = m.add_power_law_envelope()
>>> m.add_ambient_medium(subtract=[e])
the ambient medium does not simply add a constant density ``rho`` of
dust everywhere, but it adds dust such that the density never falls
below ``rho`` between ``rmin`` and ``rmax`` - that is, it subtracts the
density of component ``e`` from the ``rho``, with a minimum of zero. In
regions where the density of component of ``e`` is larger than ``rho``,
no dust is added.
'''
ambient = AmbientMedium()
ambient.star = self.star
ambient.subtract = subtract
self.ambients.append(ambient)
return ambient
def add_flared_disk(self):
'''
Add a flared disk to the model
Returns
-------
disk : :class:`~hyperion.densities.FlaredDisk`
A :class:`~hyperion.densities.FlaredDisk` instance.
Examples
--------
To add a flared disk to the model, you can do::
>>> disk = m.add_flared_disk()
then set the disk properties using e.g.::
>>> disk.mass = 1.e30 # g
>>> disk.rmin = 1e10 # cm
>>> disk.rmax = 1e14 # cm
See the :class:`~hyperion.densities.FlaredDisk` documentation
to see which parameters can be set.
'''
disk = FlaredDisk()
disk.star = self.star
self.disks.append(disk)
return disk
def add_alpha_disk(self):
'''
Add an alpha disk to the geometry
This is similar to a flared disk, but with accretion luminosity. See
:class:`~hyperion.densities.AlphaDisk` for more details.
Returns
-------
disk : :class:`~hyperion.densities.AlphaDisk`
A :class:`~hyperion.densities.AlphaDisk` instance.
Examples
--------
To add an alpha disk to the model, you can do::
>>> disk = m.add_alpha_disk()
then set the disk properties using e.g.::
>>> disk.mass = 1.e30 # g
>>> disk.rmin = 1e10 # cm
>>> disk.rmax = 1e14 # cm
See the :class:`~hyperion.densities.AlphaDisk` documentation
to see which parameters can be set.
'''
disk = AlphaDisk()
disk.star = self.star
self.disks.append(disk)
return disk
def add_settled_disks(self, reference_disk, reference_size, eta=0.,
sizes=[], dust_files=[]):
'''
Automatically create disks with varying degrees of settling
.. warning:: this function is still experimental, and will be documented once stable
'''
exists = False
for disk in self.disks:
if disk is reference_disk:
logger.warn("Reference disk already exists, not re-adding")
exists = True
if not exists:
logger.warn("Reference disk does not exist, adding")
self.disks.append(reference_disk)
for i, size in enumerate(sizes):
disk = deepcopy(reference_disk)
disk.star = self.star
disk.h_0 *= (size / reference_size) ** (-eta)
disk.dust = dust_files[i]
self.disks.append(disk)
def add_ulrich_envelope(self):
'''
Add an infalling rotationally flatted envelope to the model
Returns
-------
env : :class:`~hyperion.densities.UlrichEnvelope`
An :class:`~hyperion.densities.UlrichEnvelope` instance.
Examples
--------
To add an infalling envelope to the model, you can do::
>>> env = m.add_ulrich_envelope()
then set the envelope properties using e.g.::
>>> from hyperion.util.constants import msun, yr, au
>>> env.mdot = 1.e-6 * msun / yr # g/s
>>> env.rmin = 0.1 * au # cm
>>> env.rmax = 10000. * au # cm
See the :class:`~hyperion.densities.UlrichEnvelope` documentation
to see which parameters can be set.
'''
envelope = UlrichEnvelope()
envelope.star = self.star
self.envelopes.append(envelope)
return envelope
def add_power_law_envelope(self):
'''
Add a spherically symmetric power-law envelope to the model
Returns
-------
env : :class:`~hyperion.densities.PowerLawEnvelope`
A :class:`~hyperion.densities.PowerLawEnvelope` instance.
Examples
--------
To add a power-law envelope to the model, you can do::
>>> env = m.add_power_law_envelope()
then set the envelope properties using e.g.::
>>> from hyperion.util.constants import msun, au
>>> env.mass = 0.1 * msun # g/s
>>> env.rmin = 0.1 * au # cm
>>> env.rmax = 10000. * au # cm
See the :class:`~hyperion.densities.PowerLawEnvelope` documentation
to see which parameters can be set.
'''
envelope = PowerLawEnvelope()
envelope.star = self.star
self.envelopes.append(envelope)
return envelope
def _check_all_set(self):
for disk in self.disks:
disk._check_all_set()
for envelope in self.envelopes:
envelope._check_all_set()
for ambient in self.ambients:
ambient._check_all_set()
# MIDPLANE OPTICAL DEPTH
def print_midplane_tau(self, wavelength):
for i, disk in enumerate(self.disks):
if disk.mass > 0.:
tau = (disk.midplane_cumulative_density(np.array([disk.rmax]))
* disk.dust.interp_chi_wav(wavelength))
print("Disk %i: %.5e" % (i + 1, tau))
def get_midplane_tau(self, r):
self._check_all_set()
# Find the total combined optical depth through the midplane
tau_midplane = np.zeros(r.shape)
# IMPLEMENT: PAHs
for i, disk in enumerate(self.disks):
if disk.mass > 0.:
if disk.dust is None:
raise Exception("Disk %i dust not set" % i)
nu_min = disk.dust.optical_properties.nu[0]
nu_max = disk.dust.optical_properties.nu[-1]
nu, fnu = self.star.total_spectrum(bnu_range=[nu_min, nu_max])
if np.any(fnu > 0.):
tau_midplane += (disk.midplane_cumulative_density(r)
* disk.dust.optical_properties.chi_planck_spectrum(nu, fnu))
for i, envelope in enumerate(self.envelopes):
if envelope.exists():
if envelope.dust is None:
raise Exception("envelope %i dust not set" % i)
nu_min = envelope.dust.optical_properties.nu[0]
nu_max = envelope.dust.optical_properties.nu[-1]
nu, fnu = self.star.total_spectrum(bnu_range=[nu_min, nu_max])
if np.any(fnu > 0.):
tau_midplane += (envelope.midplane_cumulative_density(r)
* envelope.dust.optical_properties.chi_planck_spectrum(nu, fnu))
return tau_midplane
def plot_midplane_tau(self, filename):
import matplotlib.pyplot as plt
tau_midplane = self.get_midplane_tau(self.grid.r_wall[1:])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.loglog(self.grid.r[1:] / self.grid.r[1] - 1.,
tau_midplane[1:] - tau_midplane[:-1],
drawstyle='steps-mid')
fig.savefig(filename)
# COORDINATE GRID
def radial_range(self):
if (len(self.disks) == 0 and
len(self.envelopes) == 0 and
len(self.ambients) == 0):
rmin = self.star.radius
else:
rmin_values = ([disk.rmin for disk in self.disks] +
[envelope.rmin for envelope in self.envelopes] +
[ambient.rmin for ambient in self.ambients])
rmin = _min_none(*rmin_values)
rmax_values = [self.star.radius]
rmax_values += ([disk.rmax for disk in self.disks] +
[envelope.rmax for envelope in self.envelopes] +
[ambient.rmax for ambient in self.ambients])
rmax = _max_none(*rmax_values)
return rmin, rmax
def set_cylindrical_polar_grid_auto(self, n_w, n_z, n_phi,
wmax=None, zmax=None, min_spacing=1.e-8):
'''
Set the grid to be cylindrical polar with automated resolution.
Parameters
----------
n_w, n_z, n_phi : int
Number of cells to use in the radial, vertical, and azimuthal
directions.
wmax : float, optional
The maximum radius to extend out to. If not specified, this is
set to the maximum cylindrical radius of the dust geometry in the
mid-plane.
zmax : float, optional
The maximum height above and below the midplane to extend to. If
not specified, this is set to the maximum cylindrical radius of
the dust geometry.
min_spacing : float, optional
The minimum spacing (in relative terms) for the inner cell walls.
The spacing from rmin to the next cell wall cannot be smaller than
rmin * (1 + min_spacing).
'''
self.grid = {'grid_type': 'cylindrical',
'n1': n_w, 'n2': n_z, 'n3': n_phi,
'rmax': wmax, 'zmax': zmax, 'min_spacing':min_spacing}
def set_spherical_polar_grid_auto(self, n_r, n_theta, n_phi,
rmax=None, min_spacing=1.e-8):
'''
Set the grid to be spherical polar with automated resolution.
Parameters
----------
n_r, n_theta, n_phi : int
Number of cells to use in the radial, theta, and azimuthal
directions.
rmax : float, optional
The maximum radius to extend out to. If not specified, this is
set to the maximum spherical radius of the dust geometry in the
mid-plane. Note that if you are including a disk with a
cylindrical outer edge, this should be set to a value larger
than the disk radius, otherwise the disk will be truncated with
a spherical edge.
min_spacing : float, optional
The minimum spacing (in relative terms) for the inner cell walls.
The spacing from rmin to the next cell wall cannot be smaller than
rmin * (1 + min_spacing).
'''
self.grid = {'grid_type': 'spherical',
'n1': n_r, 'n2': n_theta, 'n3': n_phi,
'rmax': rmax, 'min_spacing':min_spacing}
def _set_polar_grid_auto(self, n1=None, n2=None, n3=None, grid_type=None,
zmax=None, rmax=None, min_spacing=1.e-8):
if self.star.radius is None:
raise Exception("The central source radius need to be defined "
"before the grid can be set up")
if grid_type is 'spherical':
n_r, n_theta, n_phi = n1, n2, n3
elif grid_type is 'cylindrical':
n_r, n_z, n_phi = n1, n2, n3
else:
raise Exception("Unknown grid type: %s" % grid_type)
# Find minimum and maximum radius
if len(self.disks) == 0 and len(self.envelopes) == 0:
rmin = self.star.radius
else:
rmin_values = ([disk.rmin for disk in self.disks] +
[envelope.rmin for envelope in self.envelopes] +
[ambient.rmin for ambient in self.ambients])
rmin = _min_none(*rmin_values)
if not rmax:
rmax_values = [2. * self.star.radius]
rmax_values += ([disk.rmax for disk in self.disks] +
[envelope.rmax for envelope in self.envelopes] +
[ambient.rmax for ambient in self.ambients])
rmax = _max_none(*rmax_values)
if rmax < rmin:
logger.warn("Grid rmax < rmin, model will consist only of central star")
rmin = self.star.radius
rmax = 2. * self.star.radius
if np.isnan(rmin):
raise Exception("R_min is NaN")
if np.isnan(rmax):
raise Exception("R_max is NaN")
if rmin == 0:
raise ValueError("R_min is 0, so cannot set up the grid cell "
"walls automatically. Use set_%s_polar_grid()"
" instead to specify the cell wall positions"
"." % grid_type)
# RADIAL WALLS
# Set first wall to be at half the stellar radius to avoid any
# numerical problems. Set second wall to be the disk inner radius.
r_wall = [self.star.radius / 2., rmin]
# Define a radial grid to compute the midplane column density on
r = np.logspace(-20., np.log10((rmax - rmin) / rmin), 100000) * rmin + rmin
# We need the first point to be exactly at the inner radius
r[0] = rmin
# Get cumulative midplane optical depth
tau_midplane = self.get_midplane_tau(r)
# Find where the second wall after rmin would be if grid cells were
# spaced equally
r_next_real = rmin * ((rmax / rmin) ** (1. / n_r) - 1.)
# Find where the second wall would be if we put it at tau=0.1
if tau_midplane[-1] <= 0.1:
r_next_tau = rmax - rmin
else:
r_next_tau = np.interp(0.1, tau_midplane, r) - rmin
# Pick the smallest
rnext = min(r_next_real, r_next_tau)
# Make sure rnext isn't too small
if rmin * (1. + min_spacing) > rnext + rmin:
logger.warn("Spacing of inner radial cells is too small, resetting to {0}".format(min_spacing))
rnext = rmin * min_spacing
# Define wall positions
r_wall = np.hstack([0., np.logspace( | np.log10(rnext / rmin) | numpy.log10 |
import numpy as np
import os
import re
from PIL import Image
from sklearn.model_selection import train_test_split
CURDIR=os.path.dirname(os.path.abspath( __file__))
def split_data(X,y,test_size=0.5,random_state=None):
"""
划分测试集、训练集
size为训练集的占比
random_state为随机数,固定时划分固定
"""
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=test_size,random_state=random_state)
X_all=np.concatenate([X_train,X_test],axis=0)
y_all=np.concatenate([y_train,y_test],axis=0)
X_train=np.array(X_train).astype(float)
y_train= | np.array(y_train) | numpy.array |
# This file is modified from https://github.com/traveller59/second.pytorch
import numpy as np
import numba
from ...utils import common_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
import torch
import warnings
try:
from numba.errors import NumbaPerformanceWarning
warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
except:
pass
@numba.njit
def corner_to_standup_nd_jit(boxes_corner):
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack(
(boxes, boxes[:, slices, :]), axis=2) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = corner_to_standup_nd_jit(boxes)
qboxes_standup = corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = (min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(
boxes_standup[i, 0], qboxes_standup[j, 0]))
if iw > 0:
ih = (min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(
boxes_standup[i, 1], qboxes_standup[j, 1]))
if ih > 0:
for k in range(4):
for l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, l, 0]
D = lines_qboxes[j, l, 1]
acd = (D[1] - A[1]) * (C[0] - A[0]) > (
C[1] - A[1]) * (D[0] - A[0])
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (
C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (C[0] - A[0])
abd = (D[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (D[0] - A[0])
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
boxes[i, k, 0] - qboxes[j, l, 0])
cross -= vec[0] * (
boxes[i, k, 1] - qboxes[j, l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for l in range(4): # point l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
qboxes[j, k, 0] - boxes[i, l, 0])
cross -= vec[0] * (
qboxes[j, k, 1] - boxes[i, l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
@numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
corners[:] = corners @ rot_mat_T
@numba.jit(nopython=True)
def box2d_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(
1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
@numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = | np.zeros((2, 2), dtype=boxes.dtype) | numpy.zeros |
"""
Created on April, 2018
@author: <NAME>
visualize code for SUNRGBD
"""
import matplotlib.pyplot as plt
import numpy as np
from random import random as rand
from mpl_toolkits.mplot3d import Axes3D
def show_2dboxes(im, bdbs, color_list=[], random_color=True, scale=1.0):
"""
Visualize the bounding boxes with the image
Parameters
----------
im : numpy array (W, H, 3)
bdbs : list of dicts
Keys: {'x1', 'y1', 'x2', 'y2', 'classname'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
color_list: list of colors
"""
plt.cla()
plt.axis('off')
plt.imshow(im)
for i, bdb in enumerate(bdbs):
if bdb is None:
continue
bbox = np.array([bdb['x1'], bdb['y1'], bdb['x2'], bdb['y2']]) * scale
if random_color is False:
color = color_list[i]
else:
color = (rand(), rand(), rand())
rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=2.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1], '{:s}'.format(bdb['classname']), bbox=dict(facecolor=color, alpha=0.5), fontsize=9, color='white')
plt.show()
return im
# def show_2dboxes_8pts(im, points, bdbs)
#
def plot_world_point(ax, p1, p2, color='r-'):
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]], color)
def plot_cuboid(ax, p1, p2, p3, p4, p5, p6, p7, p8, color='r-'):
plot_world_point(ax, p1, p2, color)
plot_world_point(ax, p2, p3, color)
plot_world_point(ax, p3, p4, color)
plot_world_point(ax, p4, p1, color)
plot_world_point(ax, p5, p6, color)
plot_world_point(ax, p6, p7, color)
plot_world_point(ax, p7, p8, color)
plot_world_point(ax, p8, p5, color)
plot_world_point(ax, p1, p5, color)
plot_world_point(ax, p2, p6, color)
plot_world_point(ax, p3, p7, color)
plot_world_point(ax, p4, p8, color)
return p1, p2, p3, p4, p5, p6, p7, p8
def show_3d_box(boxes):
"""
:param box: 8 x 3 numpy array
"""
fig = plt.figure()
ax = Axes3D(fig)
for box in boxes:
plot_cuboid(ax, box[0], box[1], box[2], box[3], box[4], box[5], box[6], box[7], 'r-')
plt.show()
def hex_to_rgb(hex):
hex = hex.lstrip('#')
hlen = len(hex)
return tuple(float(int(hex[i:i + hlen / 3], 16)) / 255 for i in range(0, hlen, hlen / 3))
def object_color(obj_id, if_rgb, if_random):
obj_color = ["#1f77b4", "#aec7e8", "#ff7f0e", "#ffbb78", "#2ca02c", "#98df8a", "#d62728", "#ff9896", "#9467bd", "#c5b0d5",
"#8c564b", "#c49c94", "#e377c2", "#f7b6d2", "#7f7f7f", "#c7c7c7", "#bcbd22", "#dbdb8d", "#17becf", "#9edae5",
"#8dd3c7", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9", "#bc80bd", "#ccebc5",
"#ffed6f", "#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#a65628", "#f781bf", "#999999", "#621e15",
"#e59076", "#128dcd", "#083c52", "#64c5f2", "#61afaf", "#0f7369", "#9c9da1", "#365e96", "#983334", "#77973d",
"#5d437c", "#36869f", "#d1702f", "#8197c5", "#c47f80", "#acc484", "#9887b0", "#2d588a", "#58954c", "#e9a044",
"#c12f32", "#723e77", "#7d807f", "#9c9ede", "#7375b5", "#4a5584", "#cedb9c", "#b5cf6b", "#8ca252", "#637939",
"#e7cb94", "#e7ba52", "#bd9e39", "#8c6d31", "#e7969c", "#d6616b", "#ad494a", "#843c39", "#de9ed6", "#ce6dbd",
"#a55194", "#7b4173", "#000000", "#0000FF"]
length = len(obj_color)
if if_random:
obj_id = | np.random.randint(length) | numpy.random.randint |
from typing import Callable
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given
from numpy.testing import assert_allclose, assert_array_equal
import mygrad as mg
from mygrad import Tensor
from tests.custom_strategies import tensors
# Make sure we actually test the correctness of the
# in-place syntaxes, e.g. `x += y`, and not just
# `x.__iadd__(y)`
#
# Also, make sure that augmented updates on tensors
# match behavior of numpy
def test_iadd_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an += 2.0
bt = at
vt = at[...]
at += 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_isub_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an -= 2.0
bt = at
vt = at[...]
at -= 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_imul_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an *= 2.0
bt = at
vt = at[...]
at *= 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_idiv_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an /= 2.0
bt = at
vt = at[...]
at /= 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_ipow_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an **= 2.1
bt = at
vt = at[...]
at **= 2.1
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_isqr_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an **= 2
bt = at
vt = at[...]
at **= 2
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_ipow_1_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an **= 1
bt = at
vt = at[...]
at **= 1
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
@pytest.mark.parametrize("inplace_on_view", [True, False])
def test_raising_during_in_place_op_doesnt_corrupt_graph(inplace_on_view: bool):
x = mg.arange(1.0, 5.0)
y_base = 2 * x
y = y_base[...] if inplace_on_view else y_base
w = y[...]
with pytest.raises(ValueError):
y[:2] = y # shape mismatch
(2 * w).backward()
assert (y.base is y_base) if inplace_on_view else (y.base is None)
assert w.base is y_base
assert np.shares_memory(w, y)
assert_allclose(w.grad, 2 * np.ones_like(y))
assert_allclose(y_base.grad, 2 * np.ones_like(y_base))
assert_allclose(y.grad, 2 * np.ones_like(y))
assert_allclose(x.grad, 4 * np.ones_like(y))
@pytest.mark.parametrize("x_constant", [False, True])
@pytest.mark.parametrize("y_constant", [False, True])
@pytest.mark.parametrize("z_constant", [False, True])
def test_inplace_update_constant_dictated_by_target(
x_constant: bool, y_constant: bool, z_constant: bool
):
x = mg.tensor([1.0], constant=x_constant)
y = mg.tensor([1.0], constant=y_constant)
z = mg.tensor([1.0], constant=z_constant)
assert np.multiply(x, y, out=z).constant is z_constant
@pytest.mark.parametrize("inplace_on_view", [False, True])
@pytest.mark.parametrize("x_constant", [False, True])
@pytest.mark.parametrize("y_constant", [False, True])
def test_inplace_update_constant_dictated_by_target(
inplace_on_view: bool, x_constant: bool, y_constant: bool
):
x = mg.arange(1.0, 5.0, constant=x_constant)
y = mg.zeros_like(x, constant=y_constant)
if inplace_on_view:
x = x[...]
dangling_view = x[:2]
assert x.constant is x_constant
assert dangling_view.constant is x_constant
x[...] = y
assert x.constant is x_constant
assert dangling_view.constant is x.constant
@pytest.mark.parametrize("inplace_on_view", [True, False])
@pytest.mark.parametrize("constant", [True, False])
def test_in_place_op_propagates_to_views(constant: bool, inplace_on_view: bool):
x = mg.arange(1.0, 5.0, constant=constant)
y_base = +x
y = y_base[...] if inplace_on_view else y_base
view1 = y[...]
view2 = view1[...] # view of view
y[:2] = -1 # should mutate all views
assert y_base.base is None
if inplace_on_view:
assert y.base is y_base
assert view1.base is y_base
assert view2.base is y_base
assert_array_equal(x, mg.arange(1.0, 5.0))
assert_array_equal(y_base, [-1.0, -1.0, 3.0, 4.0])
assert_array_equal(y_base, y)
assert_array_equal(y_base, view1)
assert_array_equal(y_base, view2)
@given(tensors(shape=(4,), constant=False))
@pytest.mark.parametrize("inplace_on_view", [True, False])
def test_simple_backprop_from_view_post_upstream_mutation(
inplace_on_view: bool, x: Tensor
):
y_base = +x
y = y_base[...] if inplace_on_view else y_base
z = y[...]
y[:2] = 0 # base is mutated
# downstream view should carry appropriate info
# for backprop post-mutation
w = mg.ones_like(z)
(w * z).backward()
assert_array_equal(y, y_base)
assert_array_equal(z, y_base)
assert_array_equal(w.grad, [0.0, 0.0, *y_base.data[-2:]])
assert_array_equal(z.grad, | np.ones_like(y_base) | numpy.ones_like |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from sklearn import preprocessing
from . import utils
from . import timeseries
from . import pos
from . import txn
from .utils import APPROX_BDAYS_PER_MONTH
from functools import wraps
def plotting_context(func):
"""Decorator to set plotting context during function call."""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with context():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def context(context='notebook', font_scale=1.5, rc=None):
"""Create pyfolio default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.context(font_scale=2):
>>> pyfolio.create_full_tear_sheet()
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale,
rc=rc)
def plot_rolling_fama_french(
returns,
factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best',
ax=None, **kwargs):
"""Plots rolling Fama-French single factor betas.
Specifically, plots SMB, HML, and UMD vs. date with a legend.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
rolling_window : int, optional
The days window over which to compute the beta.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
num_months_str = '%.0f' % (rolling_window / APPROX_BDAYS_PER_MONTH)
ax.set_title(
"Rolling Fama-French Single Factor Betas (" +
num_months_str +
'-month)')
ax.set_ylabel('beta')
rolling_beta = timeseries.rolling_fama_french(
returns,
factor_returns=factor_returns,
rolling_window=rolling_window)
rolling_beta.plot(alpha=0.7, ax=ax, **kwargs)
ax.axhline(0.0, color='black')
ax.legend(['Small-Caps (SMB)',
'High-Growth (HML)',
'Momentum (UMD)'],
loc=legend_loc)
ax.set_ylim((-2.0, 2.0))
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.axhline(0.0, color='black')
ax.set_xlabel('')
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = timeseries.aggregate_returns(returns,
'monthly')
monthly_ret_table = monthly_ret_table.unstack()
monthly_ret_table = | np.round(monthly_ret_table, 3) | numpy.round |
import torch
from torch import nn, optim
from torch import Tensor
import numpy as np
import numpy
import scipy.misc
import matplotlib.pyplot
import scipy.io as sio
import nufft
dtype = numpy.complex64
def np_to_torch_(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np).cuda()
def torch_to_np_(img_var):
'''Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy().astype(np.float32)
class Mypnufft_cardiac_func(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
NuFFT from https://github.com/jyhmiinlin/pynufft
"""
@staticmethod
def forward(ctx,input_r,angle,Nspoke,Nvec,Nc,C,w):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
N=np.shape(input_r)[-2]
if np.mod(N,2)==0:
x=[np.linspace(-N//2,N//2-1,N),
np.linspace(-N//2,N//2-1,N)
]
else:
x=[np.linspace(-N//2,N//2,N),
np.linspace(-N//2,N//2,N)
]
X = np.meshgrid(x[0], x[1], indexing='ij')
x1=X[0].reshape(-1)
x2=X[1].reshape(-1)
## ctx define
ctx.x1=x1
ctx.x2=x2
ctx.N=N
ctx.Nc=Nc
ctx.Nspoke=Nspoke
ctx.Nvec=Nvec
ctx.angle=angle
ctx.wr=w
ctx.C=C
###########
input=torch_to_np_(input_r)
input_c=input[...,0]+1j*input[...,1]
input_c=np.tile(input_c[np.newaxis],(Nc,1,1))
input_c*=C
y=np.zeros((Nc,Nvec*Nspoke),dtype=np.complex64)
for it in range(Nc):
y[it,:] = nufft.nufft2d3(-x1,-x2,input_c[it,:,:].reshape(-1),angle[:,0],angle[:,1],iflag=0)
# density correction
#y=y*self.wr
y = y[...,np.newaxis]
y_c = np.concatenate((np.real(y),np.imag(y)),axis=-1)
y_t = np_to_torch_(y_c.astype(np.float32))
return y_t
@staticmethod
def backward(ctx,grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
angle=ctx.angle
grad_output_n=torch_to_np_(grad_output)
grad_output=grad_output_n[...,0]+1j*grad_output_n[...,1]
yr=np.reshape(grad_output,(ctx.Nc,ctx.Nspoke,ctx.Nvec))
yc=yr*ctx.wr
out=np.zeros((ctx.Nc,ctx.N,ctx.N),dtype=np.complex64)
for it in range(ctx.Nc):
x_re = nufft.nufft2d3(angle[:,0],angle[:,1],yc[it,:,:].reshape(-1),ctx.x1,ctx.x2,iflag=1)
tmp = x_re.reshape(ctx.N,ctx.N)
out[it,:,:]=tmp
out=np.sum(out*np.conj(ctx.C),0)/sum( np.abs(ctx.C)**2,0 )
out = out[...,np.newaxis]
out_c = np.concatenate((np.real(out),np.imag(out)),axis=-1)
grad_output = np_to_torch_(out_c.astype(np.float32))
return grad_output, None, None, None, None, None, None
class Mypnufft_cardiac_test(nn.Module):
def __init__(self,ImageSize, angle, Nspoke,Nvec,Nc,C,w):
super(Mypnufft_cardiac_test,self).__init__()
N=ImageSize
if np.mod(N,2)==0:
x=[np.linspace(-N//2,N//2-1,N),
np.linspace(-N//2,N//2-1,N)
]
else:
x=[np.linspace(-N//2,N//2,N),
np.linspace(-N//2,N//2,N)
]
X = np.meshgrid(x[0], x[1], indexing='ij')
x1=X[0].reshape(-1)
x2=X[1].reshape(-1)
self.x1=x1
self.x2=x2
self.N=N
self.Nc=Nc
self.Nspoke=Nspoke
self.Nvec=Nvec
self.angle=angle
self.C=C
self.wr=w#np.sqrt(wr)
def forward(self,input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
angle=self.angle
input_c=input[...,0]+1j*input[...,1]
input_c=input_c
# input_c*=self.C
y=np.zeros((self.Nc,self.Nvec*self.Nspoke),dtype=np.complex64)
for it in range(self.Nc):
y[it,:] = nufft.nufft2d3(-self.x1,-self.x2,input_c[it,:,:].reshape(-1),angle[:,0],angle[:,1],iflag=0)
# density correction
#y=y*self.wr
y = y[...,np.newaxis]
y_c = np.concatenate((np.real(y),np.imag(y)),axis=-1)
y_t = y_c.astype(np.float32)
return y_t
def backward(self,grad_output_n):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
angle=self.angle
grad_output=grad_output_n[...,0]+1j*grad_output_n[...,1]
yr=np.reshape(grad_output,(self.Nc,self.Nspoke,self.Nvec))
yc=yr*self.wr
out=np.zeros((self.Nc,self.N,self.N),dtype=np.complex64)
for it in range(self.Nc):
x_re = nufft.nufft2d3(angle[:,0],angle[:,1],yc[it,:,:].reshape(-1),self.x1,self.x2,iflag=1)
tmp = x_re.reshape(self.N,self.N)
out[it,:,:]=tmp
#print(out.shape)
#out*=(np.pi/2/self.Nspoke)
# coil combination
out=np.sum(out* | np.conj(self.C) | numpy.conj |
"""Functions for using Gaussian Processes."""
import logging
from typing import Callable, Tuple
import numpy as np
def zero_mean_initialise(x: np.ndarray, kernel_fun: Callable, noise=0.0) -> Tuple[np.ndarray, np.ndarray]:
"""Initialise a zero mean GP using the provided kernel function.
Parameters
----------
x: ndarray
List of x points
kernel_fun: function
Kernel function, like those provided by the kernel_functions module.
Returns
-------
tuple of ndarray
The mean vector and the covariance matrix.
"""
logging.debug("x shape: {}".format(x.shape))
mean_vector = | np.zeros(x.shape[0]) | numpy.zeros |
#!/usr/bin/python
##
# This module defines the ProMP class, which is the user-facing class for deploying Probabilistic Movement Primitives.
# The code for conditioning the ProMP is taken from the code by <NAME> at https://github.com/sebasutp/promp
# TODO: Implement the EM based learning with NIW prior
#
# @author <NAME> <<EMAIL>>, TU Darmstadt
import intprim.constants
import scipy.linalg
import numpy as np
import pickle
import sklearn.preprocessing
##
# The ProMP class is responsible for training an Probabilistic Movement Primitive model from demonstrations as well as performing run-time inference.
# Support for importing and exporting a trained model as well
#
class ProMP(object):
##
# The initialization method for ProMP.
#
# @param basis_model The basis model corresponding to this state space.
# @param scaling_groups If provided, used to indicate which degrees of freedom should be scaled as a group.
#
def __init__(self, basis_model, scaling_groups = None):
self.basis_model = basis_model
self.scaling_groups = scaling_groups
self.basis_weights = np.array([], dtype = intprim.constants.DTYPE)
self.prior_fitted = False
self.scalers = []
# self.init_scalers()
##
# Exports the internal state information from this model.
# Allows one to export a trained model and import it again without requiring training.
#
# @param file_name The name of the export file.
#
def export_data(self, file_name):
print("Exporting data to: " + str(file_name))
data_struct = {
"basis_weights" : self.basis_weights,
"scaling_groups" : self.scaling_groups,
"scalers" : self.scalers
}
with open(file_name, 'wb') as out_file:
pickle.dump(data_struct, out_file, pickle.HIGHEST_PROTOCOL)
##
# Imports the internal state information from an export file.
# Allows one to import a trained model without requiring training.
#
# @param file_name The name of the import file.
#
def import_data(self, file_name):
print("Importing data from: " + str(file_name))
with open(file_name, 'rb') as in_file:
data_struct = pickle.load(in_file)
self.basis_weights = | np.array(data_struct["basis_weights"]) | numpy.array |
import cv2
import numpy as np
import math
from PIL import Image
import random
class DIP:
def __init__(self):
pass
def read(self, file):
return np.array(Image.open(file))
def save(self, file, image):
return cv2.imwrite(file, image )
def resize(self, image, size):
return cv2.resize(image, (size[0], size[1]))
def cvtGreyscale(self, image):
grey = np.dot(image[...,:3], [0.2989, 0.5870, 0.114])
grey /= np.max(grey)
return grey
def gaussianKernel(self, kernelSize, sigma, flag=True, BilSpatial=None):
normal = 1 / (2.0 * np.pi * sigma * sigma)
if flag:
center = kernelSize // 2
x, y = np.mgrid[-center:center + 1, -center:center + 1]
kernel = np.exp(-((x * x + y * y) / (2.0 * sigma * sigma))) * normal
else:
kernel = np.exp(-(kernelSize*kernelSize / (2.0 * sigma * sigma)))
kernel = np.multiply(kernel, BilSpatial)
return kernel
def gaussianFilter(self, image, kernelSize, sigma):
gKernel = self.gaussianKernel(kernelSize, sigma)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
output[col, row] = np.sum(gKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
output /= np.max(output)
return output
def gabf(self, image, kernelSize, sigmaS, sigmaR):
spatialKernel = self.gaussianKernel(kernelSize, sigmaS)
LP_guide = | np.zeros(image.shape, np.float) | numpy.zeros |
import pytest
# import unittest
import numpy as np
import femnurbs.SplineUsefulFunctions as SUF
def test_isValidU():
with pytest.raises(TypeError):
SUF.isValidU()
assert SUF.isValidU(0) is False
assert SUF.isValidU(1.2) is False
assert SUF.isValidU({}) is False
assert SUF.isValidU(-1) is False
assert SUF.isValidU({1: 1}) is False
assert SUF.isValidU([0, 0, 0, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1, 1, ]) is False
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 1, 1, 1]) is False
assert SUF.isValidU([-1, -1, 1, 1]) is False
assert SUF.isValidU([0, 0, 2, 2]) is False
assert SUF.isValidU([0, 0, 0.8, 0.2, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 0.5, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.2, 0.8, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0.5, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.1, 0.5, 0.9, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 0.5, 1, 1]) is False
def test_UBezier():
for p in range(1, 10):
assert SUF.isValidU(SUF.UBezier(p=p)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UBezier(p=1)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UBezier(p=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UBezier(p=3)
np.testing.assert_almost_equal(Ugood, Utest)
def test_UUniform():
for p in range(1, 10):
for n in range(p + 1, 11):
assert SUF.isValidU(SUF.UUniform(p=p, n=n)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UUniform(p=1, n=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.5, 1, 1])
Utest = SUF.UUniform(p=1, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.25, 0.5, 0.75, 1, 1])
Utest = SUF.UUniform(p=1, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1])
Utest = SUF.UUniform(p=1, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.5, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.5, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=8)
np.testing.assert_almost_equal(Ugood, Utest)
def test_URandom():
Ntest = 100
for p in (1, 2, 3):
for n in range(p + 1, 30):
for zz in range(Ntest):
U = SUF.URandom(p=p, n=n)
assert SUF.isValidU(U) is True
assert SUF.getPfromU(U) == p
assert SUF.getNfromU(U) == n
def test_transpose():
II = np.eye(3)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(3)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
def test_isSymetric():
II = np.eye(3)
assert SUF.isSymetric(II) is True
II = np.eye(4)
assert SUF.isSymetric(II) is True
II = np.eye(3)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.eye(4)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 3, 4],
[4, 3, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 4, 4],
[4, 4, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[7, 2, 4, 3],
[4, 4, 2, 7]])
assert SUF.isSymetric(II, diagonal=2) is False
II = np.array([[7, 2, 4, 7],
[7, 4, 2, 3]])
assert SUF.isSymetric(II, diagonal=2) is False
def test_getPfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UBezier(p=2)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UBezier(p=3)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UBezier(p=4)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getPfromU(U)
assert ptest == 2
def test_getNfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getNfromU(U)
assert ptest == 2
U = SUF.UBezier(p=2)
ptest = SUF.getNfromU(U)
assert ptest == 3
U = SUF.UBezier(p=3)
ptest = SUF.getNfromU(U)
assert ptest == 4
U = SUF.UBezier(p=4)
ptest = SUF.getNfromU(U)
assert ptest == 5
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getNfromU(U)
assert ptest == 5
def test_transformUtoH():
U = SUF.UBezier(p=1)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 0, 1, 0, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0, 0.25, 0.25, 0.25, 0.25, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 0, 1, 1, 1, 0, 0]) / 3
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 0, 1, 1, 0, 0, 0]) / 2
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0, 0.2, 0.6, 0.2, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 1, 1, 1, 0]) / 3
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 1, 1, 0]) / 2
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 1, 1, 0, 0]) / 2
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0.2, 0.6, 0.2])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0.2, 0.6, 0.2])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
def test_transformHtoSides():
H = np.array([1, 1, 1])
Sgood = np.array([[1], [1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 1])
Sgood = np.array([[0], [1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 0])
Sgood = np.array([[1], [0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 0])
Sgood = np.array([[0], [0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0.6, 1, 0.3])
Sgood = np.array([[0.6], [0.3]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([6, 10, 3])
Sgood = np.array([[0.6], [0.3]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 1, 1, 1])
Sgood = np.array([[1, 1], [1, 1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 1, 1, 1])
Sgood = np.array([[1, 0], [1, 1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 1, 1, 0])
Sgood = np.array([[1, 1], [1, 0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 0, 1, 0, 0])
Sgood = np.array([[0, 0], [0, 0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0.2, 0.6, 1, 0.3, 0.4])
Sgood = np.array([[0.6, 0.2], [0.3, 0.4]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([2, 6, 10, 3, 4])
Sgood = np.array([[0.6, 0.2], [0.3, 0.4]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
def test_cutHtoElementZ():
H = np.array([0.5, 0.5])
Zgood = | np.array([0.5]) | numpy.array |
import numpy as np
from rubin_sim.utils import calcLmstLast
__all__ = ['_approx_altAz2RaDec', '_approx_RaDec2AltAz', 'approx_altAz2RaDec',
'approx_RaDec2AltAz', '_approx_altaz2pa', 'approx_altaz2pa']
def _approx_altaz2pa(alt_rad, az_rad, latitude_rad):
"""A fast calculation of parallactic angle
Parameters
----------
alt_rad : `float`
Altitude (radians)
az_rad : `float`
Azimuth (radians)
latitude_rad : `float`
The latitude of the observatory (radians)
Returns
-------
pa : `float`
Parallactic angle (radians)
"""
y = np.sin(-az_rad)*np.cos(latitude_rad)
x = np.cos(alt_rad)*np.sin(latitude_rad) - np.sin(alt_rad)* | np.cos(latitude_rad) | numpy.cos |
import numpy as np
from scipy import optimize
import sys
from astropy import modeling
from itertools import chain
from nuclear.ejecta import Ejecta
from nuclear.nuclear_data import DecayRadiation
from scipy import stats
from collections import OrderedDict
import pandas as pd
from astropy import units as u
import pymultinest
msun_to_cgs = u.Msun.to(u.g)
mpc_to_cm = u.Mpc.to(u.cm)
class BaseModel(modeling.Model):
def __call__(self, *inputs, **kwargs):
parameters = self._param_sets(raw=True)
return self.evaluate(*chain(inputs, parameters))
class BolometricLightCurveModel(BaseModel):
pass
class BolometricLightCurveModelIa(object):
def __init__(self, epochs, lum_dens, lum_dens_err, ni56, ni57, co55, ti44):
self.epochs = epochs
self.lum_dens = lum_dens
self.lum_dens_err = lum_dens_err
self.ejecta = Ejecta.from_masses(Ni56=ni56 * u.Msun, Ni57=ni57 * u.Msun,
Co55=co55 * u.Msun, Ti44=ti44 * u.Msun)
self.nuclear_data = DecayRadiation(self.ejecta.get_all_children_nuc_name())
self.rad_trans = SimpleLateTime(self.ejecta, self.nuclear_data)
def calculate_light_curve(self, ni56, ni57, co55, ti44, fraction=1.0,
distance=6.4, epochs=None):
if epochs is None:
epochs = self.epochs
total_mass = ni56 + ni57 + co55 + ti44
self.ejecta.mass_g = total_mass * msun_to_cgs
self.ejecta['Ni56'] = ni56 / total_mass
self.ejecta['Ni57'] = ni57 / total_mass
self.ejecta['Co55'] = co55 / total_mass
self.ejecta['Ti44'] = ti44 / total_mass
luminosity_density = self.rad_trans.total_bolometric_light_curve(epochs)
return (luminosity_density * fraction /
(4 * np.pi * (distance * mpc_to_cm)**2))
def calculate_individual_light_curve(self, ni56, ni57, co55, ti44, fraction=1.0,
distance=6.4, epochs=None):
if epochs is None:
epochs = self.epochs
total_mass = ni56 + ni57 + co55 + ti44
self.ejecta.mass_g = total_mass * msun_to_cgs
self.ejecta['Ni56'] = ni56 / total_mass
self.ejecta['Ni57'] = ni57 / total_mass
self.ejecta['Co55'] = co55 / total_mass
self.ejecta['Ti44'] = ti44 / total_mass
luminosity_density = self.rad_trans.bolometric_light_curve(epochs)
return (luminosity_density * fraction /
(4 * np.pi * (distance * mpc_to_cm)**2))
def fitness_function(self, ni56, ni57, co55, ti44, fraction, distance):
model_light_curve = self.calculate_light_curve(ni56, ni57, co55, ti44,
fraction, distance)
return (model_light_curve.value - self.lum_dens)/self.lum_dens_err
def log_likelihood(self, model_param, ndim, nparam):
#return -5
model_param = [model_param[i] for i in xrange(6)]
return (-0.5 * self.fitness_function(*model_param)**2).sum()
def simple_fit(self, ni56, ni57, co55, ti44, method='Nelder-Mead'):
def fit_func(isotopes):
ni57, co55, ti44 = np.abs(isotopes)
mdl = self.evaluate(ni56, ni57, co55, ti44)
mdl *= np.mean(self.luminosity / mdl.value)
return ((mdl.value - self.luminosity)**2).sum()
fit = optimize.minimize(fit_func, (ni57, co55, ti44),
method=method)
mdl = self.evaluate(ni56, *fit.x)
norm_factor = | np.mean(self.luminosity / mdl.value) | numpy.mean |
#!/usr/bin/env python3
# Copyright: <NAME>, 2021
"""
Author: <NAME>
Date: 2021Dec17
Brief: Functions to generate multiband pulses
Requires a base shape generated by any of
the other pulse functions (except SLR)
"""
import os
import json
import numpy as np
import matplotlib.pyplot as plt
from AFP_all import sech, AFP_rf
from AHP_all import AHP_rf
from AM_pulses import AM_rf
from BIR4 import BIR4_rf
from CHIRP import chirp_rf
"""
Available shaped pulse functions:
Amplitude Modulated (AM_rf): 'sinc', 'gaussian', 'hermite'
Adiabatic Full/Half Passage (AFP_rf/AHP_rf): 'HSn', 'tanh/tan'
B1 Insensitive Rotation (BIR4): 'tanh/tan', 'sech/tanh'
CHIRP Linear frequency sweep: chirp_rf
"""
BASE_RF_FILE = "C:/Users/RudrakshaMajumdar/Documents/GitHub/rf-bloch-simulator/saved_rf_pulses/gaussian1_2ms/rf_pulse_file.npz"
BASE_RF_PULSE = np.load(BASE_RF_FILE)
PULSE_LENGTH = 2000 #us
NR_OF_POINTS = len(BASE_RF_PULSE['arr_0'])
TIME_REVERSAL = "on"
BLOCH_SIEGERT_COMP = "off"
FREQ_1 = 1000 #H
FREQ_2 = -1000
PHS_1 = 0
PHS_2 = 0
RF_AMPLITUDE = 1000 # Hz
SAVE_PULSE = True
BASE_PATH = "C:/Users/RudrakshaMajumdar/Documents/GitHub/rf-bloch-simulator/saved_rf_pulses"
NAME_PULSE = "multifreq_gauss_2ms"
def multi_freq(
pulse_length = PULSE_LENGTH,
shape_pts = NR_OF_POINTS,
time_reversal = TIME_REVERSAL,
bs_compensation = BLOCH_SIEGERT_COMP,
freq_1 = FREQ_1,
freq_2 = FREQ_2,
phs_1 = PHS_1,
phs_2 = PHS_2,
rf_amplitude = RF_AMPLITUDE
):
# Generate the base RF pulse shape to modulate
# rf_amp, freq_mod, phs_mod, _ = AM_rf(func="gaussian",pulse_length=pulse_length,shape_pts=shape_pts,trunc_lvl=1)
rf_amp = BASE_RF_PULSE['arr_0']
phs_mod = BASE_RF_PULSE['arr_1']
freq_mod = BASE_RF_PULSE['arr_2']
rf_dwelltime = 1e-6 * (pulse_length/shape_pts) # convert to seconds
time = np.arange(0, pulse_length, pulse_length/shape_pts)
# Convert RF phase from deg to rad
phs_mod = phs_mod * (np.pi/180)
# Time reversal of pulse 2 relative to pulse 1
if time_reversal == "on":
rf_amp_1 = rf_amp
phs_mod_1 = phs_mod
rf_amp_2 = np.flip(rf_amp)
phs_mod_2 = np.flip(phs_mod)
elif time_reversal == "off":
rf_amp_1 = rf_amp
phs_mod_1 = phs_mod
rf_amp_2 = rf_amp
phs_mod_2 = phs_mod
else:
rf_amp_1 = rf_amp
phs_mod_1 = phs_mod
rf_amp_2 = rf_amp
phs_mod_2 = phs_mod
print("Warning: No time reversal option specified. Continuing without time reversal")
# Memory allocation
Mx_final_1 = np.zeros(shape_pts)
Mx_final_2 = np.zeros(shape_pts)
My_final_1 = np.zeros(shape_pts)
My_final_2 = np.zeros(shape_pts)
M = np.zeros([3,1])
# Initial Magnetization
Mx_0 = 1.0
My_0 = 0.0
Mz_0 = 0.0
if bs_compensation == "on":
# Calculate the effect of pulse 1 with freq_1 at freq_2
phs_1_freq_1 = phs_1 + 2 * np.pi * freq_1 * (time / 1e6)
# Scale RF pulse over the entire RF range and convert from Hz to rad/s
rf_amp = 2 * np.pi * rf_amplitude * rf_amp_1.reshape(-1,1)
M[0,0] = Mx_0
M[1,0] = My_0
M[2,0] = Mz_0
R = np.identity(3)
# Convert frequency offset from hz to rad/s
rf_offset = 2 * np.pi * freq_1
for rf_pulse_counter in range(shape_pts):
term_0 = rf_amp[rf_pulse_counter] ** 2
term_1 = rf_offset ** 2
#B_effective
Be = np.sqrt(term_0 + term_1) * rf_dwelltime
alpha = np.arctan2(rf_offset, rf_amp[rf_pulse_counter])
cosBe = np.cos(Be)
sinBe = np.sin(Be)
cos1alpha = np.cos(alpha)
cos2alpha = np.cos(alpha) * np.cos(alpha)
sin1alpha = np.sin(alpha)
sin2alpha = np.sin(alpha) * np.sin(alpha)
cos1phi = np.cos(phs_1_freq_1[rf_pulse_counter])
cos2phi = np.cos(phs_1_freq_1[rf_pulse_counter]) * np.cos(phs_1_freq_1[rf_pulse_counter])
sin1phi = np.sin(phs_1_freq_1[rf_pulse_counter])
sin2phi = np.sin(phs_1_freq_1[rf_pulse_counter]) * np.sin(phs_1_freq_1[rf_pulse_counter])
# Construct the total rotation matrix
R[0,0] = cos2phi*(cos2alpha + cosBe*sin2alpha) + cosBe*sin2phi
R[1,0] = -sin1alpha*sinBe + sin1phi*cos1phi*cos2alpha*(1 - cosBe)
R[2,0] = cos1alpha*(cos1phi*sin1alpha*(cosBe - 1) - sinBe*sin1phi)
R[0,1] = sin1alpha*sinBe + sin1phi*cos1phi*cos2alpha*(1 - cosBe)
R[1,1] = cosBe*cos2phi + (cos2alpha + cosBe*sin2alpha)*sin2phi
R[2,1] = sin1alpha*cos1alpha*sin1phi*(cosBe - 1) + cos1alpha*cos1phi*sinBe
R[0,2] = cos1alpha*(cos1phi*sin1alpha*(cosBe - 1) + sinBe*sin1phi)
R[1,2] = sin1alpha*cos1alpha*sin1phi*(cosBe - 1) - cos1alpha*cos1phi*sinBe
R[2,2] = cosBe*cos2alpha + sin2alpha
M = R @ M
Mx_final_1[rf_pulse_counter] = M[0,0]
My_final_1[rf_pulse_counter] = M[1,0]
# Calculate phase evolution during pulse 1
phs_from_pulse1 = np.arctan2(My_final_1,Mx_final_1)
phs_from_pulse1 = np.unwrap(phs_from_pulse1)
phs_from_chem_shft1 = 2 * np.pi * freq_1 * (time / 1e6)
phs_bs_1 = phs_from_pulse1 + phs_from_chem_shft1
# Calculate the effect of pulse 2 with freq_2 at freq_1
phs_2_freq_2 = phs_2 + 2 * np.pi * freq_2 * (time / 1e6)
# Scale RF pulse over the entire RF range and convert from Hz to rad/s
rf_amp = 2 * np.pi * rf_amplitude * rf_amp_2.reshape(-1,1)
M[0,0] = Mx_0
M[1,0] = My_0
M[2,0] = Mz_0
R = np.identity(3)
# Convert frequency offset from hz to rad/s
rf_offset = 2 * np.pi * freq_1
for rf_pulse_counter in range(shape_pts):
term_0 = rf_amp[rf_pulse_counter] ** 2
term_1 = rf_offset ** 2
#B_effective
Be = np.sqrt(term_0 + term_1) * rf_dwelltime
alpha = np.arctan2(rf_offset, rf_amp[rf_pulse_counter])
cosBe = np.cos(Be)
sinBe = np.sin(Be)
cos1alpha = np.cos(alpha)
cos2alpha = np.cos(alpha) * np.cos(alpha)
sin1alpha = np.sin(alpha)
sin2alpha = np.sin(alpha) * np.sin(alpha)
cos1phi = np.cos(phs_2_freq_2[rf_pulse_counter])
cos2phi = np.cos(phs_2_freq_2[rf_pulse_counter]) * np.cos(phs_2_freq_2[rf_pulse_counter])
sin1phi = np.sin(phs_2_freq_2[rf_pulse_counter])
sin2phi = np.sin(phs_2_freq_2[rf_pulse_counter]) * | np.sin(phs_2_freq_2[rf_pulse_counter]) | numpy.sin |
#!/usr/bin/env python
## Copyright (c) 2009, <NAME>
## Original Matlab version of GC2D, <NAME>
## GC2D first converted to Python/NumPy in 2009 by <NAME>
################################################################
# NOTE: TopoFlow can provide "mass balance" for GC2D, but
# the timescales are very different. TopoFlow should
# pass some kind of "net" or cumulative "mass balance"
# to GC2D at its large timestep.
#
# NOTE: There is no "load_mask()" function yet, but it is
# called in a "try" block.
#
# NOTE: THERMAL_TOGGLE option does not work yet.
# See notes below regarding undefined vars.
#
# NOTE: Should carefully test update_vars() due to
# a bug fix and other changes to the code.
# Compare to update_vars_OLD().
#
# NOTE: Check that all "numpy" function calls include "numpy.".
# Fixed calls to "mean()", "nonzero()", "ravel()",
# abs() vs. absolute(), max(A,B) vs. maximum(A,B), etc.
#
################################################################
import numpy
import time
import sys
import logging
# import getopt
import scipy # scipy.signal.convolve, scipy.io.loadmat
from scipy import interpolate
from scipy import signal
# SDP. 10/24/11. No longer available. Deprecated?
# from scipy.io.numpyio import fwrite # used by print_watch_point()
#--------------------------------------------------------------------------------------------------
# run_model() # (for testing)
# ------------------------------
# Classes (used as structures)
# ------------------------------
# MassBalance
# BoundaryCond
# Parameters
# InputParams
# OutputParams
# Toggles
#
# -----------
# Functions
# -----------
# compress_grid()
# filter2d()
# add_halo()
# set_bc()
# difference_grid()
# basal_shear_stress()
# iceflow()
# ice_sliding()
# sum_ice_motion()
# avalanche()
# calve()
# mass_balance()
# mass_conservation()
# load_dem()
# load_dem_var()
# load_mask() ###### Not written, but called. #####
# get_timestep()
# update_vars()
# print_watch_point()
# update()
# init_valley_glacier()
# init_ice_sheet()
# resample_dem()
# init_ice_surface()
# load_state()
# #### load_state_old()
# #### run_for()
#--------------------------------------------------------------------------------------------------
def run_model(t_max=10.0, DEM_file='Animas_200.mat', SILENT=False):
Toggles.VARIABLE_DT_TOGGLE = 0 # (or change to 1)
###################################
print('Starting GC2D test run...')
print('Reading input file...')
( H, Zb, Zi, dx, dy ) = load_state(DEM_file=DEM_file,
RESTART_TOGGLE = 0,
INIT_COND_TOGGLE=1 )
ny, nx = Zb.shape
#------------------
# Initialize vars
#------------------
t = numpy.float64(0)
conserveIce = numpy.float64(0) # (total ice mass ??)
meltrate = numpy.zeros( (ny, nx), dtype='float64' )
## fd_watch = {}
## fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' )
## counter = 0
while (t < t_max):
(dt, t, H, Zi, meltrate, conserveIce) = update( t, H, Zb, dx, dy,
meltrate, conserveIce,
SILENT=SILENT)
## COMPRESS_TOGGLE = Toggles.COMPRESS_TOGGLE,
## ICEFLOW_TOGGLE = Toggles.ICEFLOW_TOGGLE,
## ICESLIDE_TOGGLE = Toggles.ICESLIDE_TOGGLE,
## VARIABLE_DT_TOGGLE = Toggles.VARIABLE_DT_TOGGLE,
## dtDefault=Parameters.dtDefault,
## dtMax=Parameters.dtMax)
#-----------------------
# Print a short report
#-----------------------
print(' ')
print('(nx, ny) =', nx, ny)
print('(dx, dy) =', dx, dy)
print('(Hmin, Hmax) =', H.min(), H.max())
print('(Zbmin, Zbmax) =', Zb.min(), Zb.max())
print('(Zimin, Zimax) =', Zi.min(), Zi.max())
print('(MRmin, MRmax) =', meltrate.min(), meltrate.max())
print('conserveIce =', conserveIce)
print('Finished.')
print(' ')
# run_model()
#--------------------------------------------------------------------------------------------------
class MassBalance: # (enumeration)
( BAD_VAL ,
ZERO_BALANCE ,
CONSTANT_ELA ,
ELA_LOWERING ,
ELA_TIME_SERIES ,
EXTERNAL_FUNC ,
ELA_LOWERING2 ,
BALANCE_FILE ,
D180_TIME_SERIES ) = list(range( 9))
# class MassBalance
#--------------------------------------------------------------------------------------------------
class BoundaryCond: # (enumeration)
( BAD_VAL ,
ICE_FREE_BOUND ,
ZERO_FLUX_BOUND ,
CONST_FLUX_BOUND ,
SURF_ELEV_BOUND ,
SURF_SLOPE_BOUND ) = list(range( 6))
# class BoundaryCond
#--------------------------------------------------------------------------------------------------
class Parameters: # (structure)
# Constants
g = numpy.float64(9.81) # gravitional acceleration [m/s**2]
rhoI = numpy.float64(917) # density of ice [kg/m**3]
rhoW = numpy.float64(1000) # density of water [kg/m**3]
day = numpy.float64(0.00274) # length of a day in years [years]
# Time
t = numpy.float64(0) # set time to zero
tMax = numpy.float64(100000) # maximum simulation time in years
dtMax = numpy.float64(0.4 * 365*day) # maximum timestep in years
dtDefault = dtMax # timestep if VARIABLE_DT_TOGGLE==0
sec_per_year = numpy.float64(3600) * 24 * 365 # (SDP, 9/30/09)
# Glacier Properties
MinGlacThick = numpy.float64(1)
# Ice Deformation
glensA = numpy.float64( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000
## glensA = numpy.float64( 6.8 * 3.15 * 1e-17)
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
UsChar = numpy.float64(10)
taubChar = numpy.float64(100000)
# Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used)
B = numpy.float64(0.0012) # m/(Pa*yr) -- MacGregor, 2000
DepthToWaterTable = numpy.float64(20) # distance from ice surface to water table
MaxFloatFraction = numpy.float64(80) # limits water level in ice
Hpeff = numpy.float64(20) # effective pressure (meters of water)
# Mass Balance
initELA = numpy.float64(3350) # (valley glaciers, try 3500 ice sheets)
ELAStepSize = numpy.float64(-50)
ELAStepInterval = numpy.float64(500)
gradBz = numpy.float64(0.01)
maxBz = numpy.float64(2)
tmin = numpy.float64(200) # Years, spin-up time
# Avalanching
angleOfRepose = numpy.float64(30)
avalanchFreq = numpy.float64(3) # average number per year
# Calving
seaLevel = numpy.float64(-100) # meters
calvingCoef = numpy.float64(2) # year^-1
# Thermal
c = numpy.float64(2060) # specific heat capacity (J/(kg*K))
Qg = numpy.float64(0.05 * 3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2)
gradTz = numpy.float64(-0.0255) # Geothermal Gradient
# Only for Ice Sheets ???
Hbound = numpy.float64(2000)
Elev0 = numpy.float64(0) # reference elevation
To = numpy.float64(2.6) # temperature at Elev0
lapseRate = numpy.float64(-0.0065) # degrees per meter
# class Parameters
#--------------------------------------------------------------------------------------------------
class InputParams: # (structure)
CLEAR_FIGURE = 1
CONTOUR_INTERVAL = 50.
DEBUG_TOGGLE = 0
DT_LIMIT = 0
ELA_CONTOUR = 1.
ICE_CONTOUR = 1.
NEW_FIGURE = 0
QUIVER_VECS = 0
RECONSTRUCT = 0
SUBFIGURE = 0
THERMAL_CONTOUR = 0
# class InputParams
#--------------------------------------------------------------------------------------------------
class OutputParams: # (structure)
plotInterval = 60 * 120 # seconds
saveInterval = 100 # whole years
reportInterval = 30 # seconds
nextPlot = 0 # initialize to plot on first timestep
nextSave = 0 # initialize to save on first timestep
nextReport = 0 # initialize to report on first timestep
outputFile = 'savetmp'
# class OutputParams
#--------------------------------------------------------------------------------------------------
class Toggles: # (structure)
#------------------------
# Code behavior toggles
#-----------------------------------------------------------
# Toggle or turn on/off segments of the code or select
# between multiple possibilities for a given process.
# Values can be reset in INIT_COND segment.
# Note that many of these are unused in current version.
#-----------------------------------------------------------
GUISTART_TOGGLE = 0 # started simulation with the gui (off|on)
SAVE_TOGGLE = 1 # saving (off|on)
PLOT_TOGGLE = 1 # plotting (off|on)
REPORT_TOGGLE = 1 # reporting (off|on)
COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on)
VARIABLE_DT_TOGGLE = 0 # state dependent time step (off|on)
INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet)
GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on)
ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on)
ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select)
THERMAL_TOGGLE = 0 # temp dependance of flow (off|on)
FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on)
AVALANCHE_TOGGLE = 0 # avalanche off steep surfaces (off|on)
CALVING_TOGGLE = 0 # calving front (off|on)
ERODE_TOGGLE = 0 # erode the bed (off|on|select)
## CRN_TOGGLE = 0 # CRN accumulation (off|on)
# MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select)
MASS_BALANCE_TOGGLE = MassBalance.CONSTANT_ELA # select climate scenerio (off|on|select)
WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
# class Toggles
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def compress_grid( H , Zb , COMPRESS_TOGGLE=False , RESTART_TOGGLE=0,
THERMAL_TOGGLE=False ):
# COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
if (COMPRESS_TOGGLE) and (H.max() > 1) and (RESTART_TOGGLE != 2):
H_FullSpace = H.copy()
Zb_FullSpace = Zb.copy()
if (THERMAL_TOGGLE):
Ts_FullSpace = Ts.copy()
Tb_FullSpace = Tb.copy()
Tm_FullSpace = Tm.copy()
#[indrw,indcl] = find(H ~= 0);
indrw, indcl = numpy.where( H != 0 )
mxrw, mxcl = Zb.shape
mnrw = max( 0 , min(indrw) - 2 )
mxrw = min( mxrw , max(indrw) + 2 )
mncl = max( 0 , min(indcl) - 2 )
mxcl = min( mxcl , max(indcl) + 2 )
H = H [ mnrw:mxrw , mncl:mxcl ]
Zb = Zb[ mnrw:mxrw , mncl:mxcl ]
## Zi = Zb + max( H, 0 )
## Zi = Zb + numpy.choose( H<0 , (H,0) )
Zi = Zb + numpy.maximum(H, 0)
if (THERMAL_TOGGLE):
Ts = Ts[ mnrw:mxrw , mncl:mxcl ]
Tb = Tb[ mnrw:mxrw , mncl:mxcl ]
Tm = Tm[ mnrw:mxrw , mncl:mxcl ]
ny, nx = H.shape
mx_ny, mx_nx = Zb_FullSpace.shape
ny, nx = Zb.shape
compression_ratio = (mx_nx * mx_ny) / (nx * ny)
COMPRESSED_FLAG = 1
else:
## Zi = Zb + max( H, 0 ) # included for restarts
## Zi = Zb + numpy.choose( H<0 , (H,0) )
Zi = Zb + numpy.maximum(H, 0)
compression_ratio = 1.
COMPRESSED_FLAG = 0
return ( Zi , compression_ratio , COMPRESSED_FLAG )
# compress_grid()
#--------------------------------------------------------------------------------------------------
def filter2d( b , x , shape='same' ):
return scipy.signal.convolve( b , x , mode=shape )
# filter2d()
#--------------------------------------------------------------------------------------------------
def add_halo( x ):
x_ext = numpy.concatenate( ( x[:,0,numpy.newaxis] , x , x[:,-1,numpy.newaxis] ) , axis=1 )
x_ext = numpy.concatenate( ( [x_ext[0,:]] , x_ext , [x_ext[-1,:]] ) )
return x_ext
# add_halo()
#--------------------------------------------------------------------------------------------------
def set_bc( H , Zb , Zi ,
THERMAL_TOGGLE = Toggles.THERMAL_TOGGLE,
WEST_BC_TOGGLE = Toggles.WEST_BC_TOGGLE,
EAST_BC_TOGGLE = Toggles.EAST_BC_TOGGLE,
SOUTH_BC_TOGGLE = Toggles.SOUTH_BC_TOGGLE,
NORTH_BC_TOGGLE = Toggles.NORTH_BC_TOGGLE ):
## WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ):
#-------------------------------------------------------
# MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS
#-------------------------------------------------------
# DEFAULT BOUNDARY CONDITION IS ZERO FLUX
#-------------------------------------------------------
H_ext = add_halo( H )
Zb_ext = add_halo( Zb )
Zi_ext = add_halo( Zi )
if (THERMAL_TOGGLE):
Ts_ext = add_halo( Ts )
Tb_ext = add_halo( Tb )
Tm_ext = add_halo( Tm )
# WESTERN BOUNDARY CONDITION
if WEST_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[:,0]) + Hbound
H_ext[:,0] = ZiBound - Zb_ext[:,0]
elif WEST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif WEST_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,0] = 2*Zi_ext[:,1] - Zi_ext[:,2]
H_ext [:,0] = Zi_ext[:,0] - Zb_ext[:,0]
H_ext [:,0] = numpy.maximum( H_ext[:,0], 0 )
elif WEST_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,0] = 0
# EASTERN BOUNDARY CONDITION
if EAST_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[:,-1]) + Hbound
H_ext[:,-1] = ZiBound - Zb_ext[:,-1]
elif EAST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif EAST_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,-1] = 2*Zi_ext[:,-2] - Zi_ext[:,-3]
H_ext [:,-1] = Zi_ext[:,-1] - Zb_ext[:,-1]
H_ext [:,-1] = numpy.maximum( H_ext[:,-1], 0)
elif EAST_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,-1] = 0
# SOUTHERN BOUNDARY CONDITION
if SOUTH_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[0,:]) + Hbound
H_ext[0,:] = ZiBound - Zb_ext[0,:]
elif SOUTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif SOUTH_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[0,:] = 2*Zi_ext[1,:] - Zi_ext[2,:]
H_ext [0,:] = Zi_ext[0,:] - Zb_ext[0,:]
H_ext [0,:] = numpy.maximum( H_ext[0,:], 0 )
elif SOUTH_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[0,:] = 0
# NORTHERN BOUNDARY CONDITION
if NORTH_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[-1,:]) + Hbound
H_ext[-1,:] = ZiBound - Zb_ext[-1,:]
elif NORTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif NORTH_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[-1,:] = 2*Zi_ext[-2,:] - Zi_ext[-3,:]
H_ext [-1,:] = Zi_ext[-1,:] - Zb_ext[-1,:]
H_ext [-1,:] = numpy.maximum( H_ext[-1,:], 0 )
elif NORTH_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[-1,:] = 0
Zi_ext = Zb_ext + H_ext
return ( H_ext , Zb_ext , Zi_ext )
# set_bc()
#--------------------------------------------------------------------------------------------------
def difference_grid( A , dx , dy ):
dAdx_ext = ( A[:,1:] - A[:,:-1] ) / dx
dAdy_ext = ( A[1:,:] - A[:-1,:] ) / dy
dAdx = dAdx_ext[1:-1,:]
dAdy = dAdy_ext[:,1:-1]
return ( dAdx , dAdy )
# difference_grid()
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def basal_shear_stress( H_ext , Zi_ext , dx=1. , dy=1. ,
g=Parameters.g , rhoI=Parameters.rhoI ):
#------------------------------------
# CALCULATE THE BASAL SHEAR STRESS
#------------------------------------
# forward differences (could use difference_grid())
dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx
dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy
dZidxX = dZidxX_ext[1:-1,:]
dZidyY = dZidyY_ext[:,1:-1]
HX_ext = ( H_ext[:,1:] + H_ext[:,:-1] ) / 2.
HY_ext = ( H_ext[1:,:] + H_ext[:-1,:] ) / 2.
HX = HX_ext[1:-1,:]
HY = HY_ext[:,1:-1]
taubxX_ext = -rhoI * g * HX_ext * dZidxX_ext
taubyY_ext = -rhoI * g * HY_ext * dZidyY_ext
taubxX = taubxX_ext[1:-1,:]
taubyY = taubyY_ext[:,1:-1]
taubxY = ( taubxX_ext[:-1,:-1] + taubxX_ext[:-1,1:] +
taubxX_ext[1: ,:-1] + taubxX_ext[1: ,1:] ) / 4.
taubyX = ( taubyY_ext[:-1,:-1] + taubyY_ext[:-1,1:] +
taubyY_ext[1: ,:-1] + taubyY_ext[1: ,1:] ) / 4.
taubX = numpy.sqrt( taubxX**2 + taubyX**2 )
taubY = numpy.sqrt( taubxY**2 + taubyY**2 )
taubX = numpy.choose( HX>0 , (0,taubX) )
taubY = | numpy.choose( HY>0 , (0,taubY) ) | numpy.choose |
import importlib.resources
import numpy as np
from hexrd import constants
from hexrd import symmetry, symbols
from hexrd.spacegroup import Allowed_HKLs
from hexrd.ipfcolor import sphere_sector, colorspace
from hexrd.valunits import valWUnit
import hexrd.resources
import warnings
import h5py
from pathlib import Path
from scipy.interpolate import interp1d
import time
eps = constants.sqrt_epsf
class unitcell:
'''
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 10/09/2018 SS 1.0 original
@DATE: 10/15/2018 SS 1.1 added space group handling
>> @DETAILS: this is the unitcell class
'''
# initialize the unitcell class
# need lattice parameters and space group data from HDF5 file
def __init__(self, lp, sgnum,
atomtypes, charge,
atominfo,
U, dmin, beamenergy,
sgsetting=0):
self._tstart = time.time()
self.pref = 0.4178214
self.atom_type = atomtypes
self.chargestates = charge
self.atom_pos = atominfo
self._dmin = dmin
self.lparms = lp
self.U = U
'''
initialize interpolation from table for anomalous scattering
'''
self.InitializeInterpTable()
'''
sets x-ray energy
calculate wavelength
also calculates anomalous form factors for xray scattering
'''
self.voltage = beamenergy * 1000.0
'''
calculate symmetry
'''
self.sgsetting = sgsetting
self.sgnum = sgnum
self._tstop = time.time()
self.tinit = self._tstop - self._tstart
def GetPgLg(self):
'''
simple subroutine to get point and laue groups
to maintain consistency for planedata initialization
in the materials class
'''
for k in list(_pgDict.keys()):
if self.sgnum in k:
pglg = _pgDict[k]
self._pointGroup = pglg[0]
self._laueGroup = pglg[1]
self._supergroup = pglg[2]
self._supergroup_laue = pglg[3]
def CalcWavelength(self):
# wavelength in nm
self.wavelength = constants.cPlanck * \
constants.cLight / \
constants.cCharge / \
self.voltage
self.wavelength *= 1e9
self.CalcAnomalous()
def calcBetaij(self):
self.betaij = np.zeros([self.atom_ntype, 3, 3])
for i in range(self.U.shape[0]):
U = self.U[i, :]
self.betaij[i, :, :] = np.array([[U[0], U[3], U[4]],
[U[3], U[1], U[5]],
[U[4], U[5], U[2]]])
self.betaij[i, :, :] *= 2. * np.pi**2 * self._aij
def calcmatrices(self):
a = self.a
b = self.b
c = self.c
alpha = np.radians(self.alpha)
beta = np.radians(self.beta)
gamma = np.radians(self.gamma)
ca = np.cos(alpha)
cb = np.cos(beta)
cg = np.cos(gamma)
sa = np.sin(alpha)
sb = np.sin(beta)
sg = np.sin(gamma)
tg = np.tan(gamma)
'''
direct metric tensor
'''
self._dmt = np.array([[a**2, a*b*cg, a*c*cb],
[a*b*cg, b**2, b*c*ca],
[a*c*cb, b*c*ca, c**2]])
self._vol = np.sqrt(np.linalg.det(self.dmt))
if(self.vol < 1e-5):
warnings.warn('unitcell volume is suspiciously small')
'''
reciprocal metric tensor
'''
self._rmt = np.linalg.inv(self.dmt)
'''
direct structure matrix
'''
self._dsm = np.array([[a, b*cg, c*cb],
[0., b*sg, -c*(cb*cg - ca)/sg],
[0., 0., self.vol/(a*b*sg)]])
self._dsm[np.abs(self._dsm) < eps] = 0.
'''
reciprocal structure matrix
'''
self._rsm = np.array([[1./a, 0., 0.],
[-1./(a*tg), 1./(b*sg), 0.],
[b*c*(cg*ca - cb)/(self.vol*sg),
a*c*(cb*cg - ca)/(self.vol*sg),
a*b*sg/self.vol]])
self._rsm[np.abs(self._rsm) < eps] = 0.
ast = self.CalcLength([1, 0, 0], 'r')
bst = self.CalcLength([0, 1, 0], 'r')
cst = self.CalcLength([0, 0, 1], 'r')
self._aij = np.array([[ast**2, ast*bst, ast*cst],
[bst*ast, bst**2, bst*cst],
[cst*ast, cst*bst, cst**2]])
''' transform between any crystal space to any other space.
choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)'''
def TransSpace(self, v_in, inspace, outspace):
if(inspace == 'd'):
if(outspace == 'r'):
v_out = np.dot(v_in, self.dmt)
elif(outspace == 'c'):
v_out = np.dot(self.dsm, v_in)
else:
raise ValueError(
'inspace in ''d'' but outspace can''t be identified')
elif(inspace == 'r'):
if(outspace == 'd'):
v_out = np.dot(v_in, self.rmt)
elif(outspace == 'c'):
v_out = np.dot(self.rsm, v_in)
else:
raise ValueError(
'inspace in ''r'' but outspace can''t be identified')
elif(inspace == 'c'):
if(outspace == 'r'):
v_out = np.dot(v_in, self.rsm)
elif(outspace == 'd'):
v_out = np.dot(v_in, self.dsm)
else:
raise ValueError(
'inspace in ''c'' but outspace can''t be identified')
else:
raise ValueError('incorrect inspace argument')
return v_out
''' calculate dot product of two vectors in any space 'd' 'r' or 'c' '''
def CalcDot(self, u, v, space):
if(space == 'd'):
dot = np.dot(u, np.dot(self.dmt, v))
elif(space == 'r'):
dot = np.dot(u, np.dot(self.rmt, v))
elif(space == 'c'):
dot = np.dot(u, v)
else:
raise ValueError('space is unidentified')
return dot
''' calculate dot product of two vectors in any space 'd' 'r' or 'c' '''
def CalcLength(self, u, space):
if(space == 'd'):
vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u)))
elif(space == 'r'):
vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u)))
elif(space == 'c'):
vlen = np.linalg.norm(u)
else:
raise ValueError('incorrect space argument')
return vlen
''' normalize vector in any space 'd' 'r' or 'c' '''
def NormVec(self, u, space):
ulen = self.CalcLength(u, space)
return u/ulen
''' calculate angle between two vectors in any space'''
def CalcAngle(self, u, v, space):
ulen = self.CalcLength(u, space)
vlen = self.CalcLength(v, space)
dot = self.CalcDot(u, v, space)/ulen/vlen
angle = np.arccos(dot)
return angle
''' calculate cross product between two vectors in any space.
cross product of two vectors in direct space is a vector in
reciprocal space
cross product of two vectors in reciprocal space is a vector in
direct space
the outspace specifies if a conversion needs to be made
@NOTE: iv is the switch (0/1) which will either turn division
by volume of the unit cell on or off.'''
def CalcCross(self, p, q, inspace, outspace, vol_divide=False):
iv = 0
if(vol_divide):
vol = self.vol
else:
vol = 1.0
pxq = np.array([p[1]*q[2]-p[2]*q[1],
p[2]*q[0]-p[0]*q[2],
p[0]*q[1]-p[1]*q[0]])
if(inspace == 'd'):
'''
cross product vector is in reciprocal space
and can be converted to direct or cartesian space
'''
pxq *= vol
if(outspace == 'r'):
pass
elif(outspace == 'd'):
pxq = self.TransSpace(pxq, 'r', 'd')
elif(outspace == 'c'):
pxq = self.TransSpace(pxq, 'r', 'c')
else:
raise ValueError(
'inspace is ''d'' but outspace is unidentified')
elif(inspace == 'r'):
'''
cross product vector is in direct space and
can be converted to any other space
'''
pxq /= vol
if(outspace == 'r'):
pxq = self.TransSpace(pxq, 'd', 'r')
elif(outspace == 'd'):
pass
elif(outspace == 'c'):
pxq = self.TransSpace(pxq, 'd', 'c')
else:
raise ValueError(
'inspace is ''r'' but outspace is unidentified')
elif(inspace == 'c'):
'''
cross product is already in cartesian space so no
volume factor is involved. can be converted to any
other space too
'''
if(outspace == 'r'):
pxq = self.TransSpace(pxq, 'c', 'r')
elif(outspace == 'd'):
pxq = self.TransSpace(pxq, 'c', 'd')
elif(outspace == 'c'):
pass
else:
raise ValueError(
'inspace is ''c'' but outspace is unidentified')
else:
raise ValueError('inspace is unidentified')
return pxq
def GenerateRecipPGSym(self):
self.SYM_PG_r = self.SYM_PG_d[0, :, :]
self.SYM_PG_r = np.broadcast_to(self.SYM_PG_r, [1, 3, 3])
self.SYM_PG_r_laue = self.SYM_PG_d[0, :, :]
self.SYM_PG_r_laue = np.broadcast_to(self.SYM_PG_r_laue, [1, 3, 3])
for i in range(1, self.npgsym):
g = self.SYM_PG_d[i, :, :]
g = np.dot(self.dmt, np.dot(g, self.rmt))
g = np.round(np.broadcast_to(g, [1, 3, 3]))
self.SYM_PG_r = np.concatenate((self.SYM_PG_r, g))
for i in range(1, self.SYM_PG_d_laue.shape[0]):
g = self.SYM_PG_d_laue[i, :, :]
g = np.dot(self.dmt, np.dot(g, self.rmt))
g = np.round(np.broadcast_to(g, [1, 3, 3]))
self.SYM_PG_r_laue = np.concatenate((self.SYM_PG_r_laue, g))
self.SYM_PG_r = self.SYM_PG_r.astype(np.int32)
self.SYM_PG_r_laue = self.SYM_PG_r_laue.astype(np.int32)
def GenerateCartesianPGSym(self):
'''
use the direct point group symmetries to generate the
symmetry operations in the cartesian frame. this is used
to reduce directions to the standard stereographi tringle
'''
self.SYM_PG_c = []
self.SYM_PG_c_laue = []
for sop in self.SYM_PG_d:
self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_c = np.array(self.SYM_PG_c)
self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0.
if(self._pointGroup == self._laueGroup):
self.SYM_PG_c_laue = self.SYM_PG_c
else:
for sop in self.SYM_PG_d_laue:
self.SYM_PG_c_laue.append(
np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue)
self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0.
'''
use the point group symmetry of the supergroup
to generate the equivalent operations in the
cartesian reference frame
SS 11/23/2020 added supergroup symmetry operations
SS 11/24/2020 fix monoclinic groups separately since
the supergroup for monoclinic is orthorhombic
'''
supergroup = self._supergroup
sym_supergroup = symmetry.GeneratePGSYM(supergroup)
supergroup_laue = self._supergroup_laue
sym_supergroup_laue = symmetry.GeneratePGSYM(supergroup_laue)
if((self.latticeType == 'monoclinic' or
self.latticeType == 'triclinic')):
'''
for monoclinic groups c2 and c2h, the supergroups are
orthorhombic, so no need to convert from direct to
cartesian as they are identical
'''
self.SYM_PG_supergroup = sym_supergroup
self.SYM_PG_supergroup_laue = sym_supergroup_laue
else:
self.SYM_PG_supergroup = []
self.SYM_PG_supergroup_laue = []
for sop in sym_supergroup:
self.SYM_PG_supergroup.append(
np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup)
self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0.
for sop in sym_supergroup_laue:
self.SYM_PG_supergroup_laue.append(
np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue)
self.SYM_PG_supergroup_laue[np.abs(
self.SYM_PG_supergroup_laue) < eps] = 0.
'''
the standard setting for the monoclinic system has the b-axis aligned
with the 2-fold axis. this needs to be accounted for when reduction to
the standard stereographic triangle is performed. the siplest way is to
rotate all symmetry elements by 90 about the x-axis
the supergroups for the monoclinic groups are orthorhombic so they need
not be rotated as they have the c* axis already aligned with the z-axis
SS 12/10/2020
'''
if(self.latticeType == 'monoclinic'):
om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
for i, s in enumerate(self.SYM_PG_c):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_c[i, :, :] = ss
for i, s in enumerate(self.SYM_PG_c_laue):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_c_laue[i, :, :] = ss
'''
for the triclinic group c1, the supergroups are the monoclinic group m
therefore we need to rotate the mirror to be perpendicular to the z-axis
same shouldn't be done for the group ci, since the supergroup is just the
triclinic group c1!!
SS 12/10/2020
'''
if(self._pointGroup == 'c1'):
om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
for i, s in enumerate(self.SYM_PG_supergroup):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_supergroup[i, :, :] = ss
for i, s in enumerate(self.SYM_PG_supergroup_laue):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_supergroup_laue[i, :, :] = ss
def CalcOrbit(self, v, reduceToUC=True):
"""
@date 03/04/2021 SS 1.0 original
@details calculate the equivalent position for the
space group symmetry. this function will replace the
code in the CalcPositions subroutine.
@params v is the factional coordinates in direct space
reduceToUC reduces the position to the
fundamental fractional unit cell (0-1)
"""
asym_pos = []
n = 1
if v.shape[0] != 3:
raise RuntimeError("fractional coordinate in not 3-d")
r = v
# using wigner-sietz notation
r = np.hstack((r, 1.))
asym_pos = np.broadcast_to(r[0:3], [1, 3])
for symmat in self.SYM_SG:
# get new position
rnew = np.dot(symmat, r)
rr = rnew[0:3]
if reduceToUC:
# reduce to fundamental unitcell with fractional
# coordinates between 0-1
rr = np.modf(rr)[0]
rr[rr < 0.] += 1.
rr[np.abs(rr) < 1.0E-6] = 0.
# check if this is new
isnew = True
for j in range(n):
v = rr - asym_pos[j]
dist = self.CalcLength(v, 'd')
if dist < 1E-3:
isnew = False
break
# if its new add this to the list
if(isnew):
asym_pos = np.vstack((asym_pos, rr))
n += 1
numat = n
return asym_pos, numat
def CalcStar(self, v, space, applyLaue=False):
'''
this function calculates the symmetrically equivalent hkls (or uvws)
for the reciprocal (or direct) point group symmetry.
'''
if(space == 'd'):
if(applyLaue):
sym = self.SYM_PG_d_laue
else:
sym = self.SYM_PG_d
elif(space == 'r'):
if(applyLaue):
sym = self.SYM_PG_r_laue
else:
sym = self.SYM_PG_r
else:
raise ValueError('CalcStar: unrecognized space.')
vsym = np.atleast_2d(v)
for s in sym:
vp = np.dot(s, v)
# check if this is new
isnew = True
for vec in vsym:
vv = vp - vec
dist = self.CalcLength(vv, space)
if dist < 1E-3:
isnew = False
break
if(isnew):
vsym = np.vstack((vsym, vp))
return vsym
def CalcPositions(self):
'''
calculate the asymmetric positions in the fundamental unitcell
used for structure factor calculations
'''
numat = []
asym_pos = []
for i in range(self.atom_ntype):
v = self.atom_pos[i, 0:3]
apos, n = self.CalcOrbit(v)
asym_pos.append(apos)
numat.append(n)
self.numat = np.array(numat)
self.asym_pos = asym_pos
def remove_duplicate_atoms(self,
atom_pos=None,
tol=1e-3):
"""
@date 03/04/2021 SS 1.0 original
@details it was requested that a functionality be
added which can remove duplicate atoms from the
atom_pos field such that no two atoms are closer that
the distance specified by "tol" (lets assume its in A)
steps involved are as follows:
1. get the star (or orbit) oe each point in atom_pos
2. if any points in the orbits are within tol, then
remove the second point (the first point will be
preserved by convention)
3. update the densities, interptables for structure factors
etc.
@params tol tolerance of distance between points specified
in A
"""
if atom_pos is None:
atom_pos = self.atom_pos
atom_pos_fixed = []
"""
go through the atom_pos and remove the atoms that are duplicate
"""
for i in range(atom_pos.shape[0]):
pos = atom_pos[i, 0:3]
occ = atom_pos[i, 3]
v1, n1 = self.CalcOrbit(pos)
for j in range(i+1, atom_pos.shape[0]):
isclose = False
atom_pos_fixed.append(np.hstack([pos, occ]))
pos = atom_pos[j, 0:3]
occ = atom_pos[j, 3]
v2, n2 = self.CalcOrbit(pos)
for v in v2:
vv = np.tile(v, [v1.shape[0], 1])
vv = vv - v1
for vvv in vv:
# check if distance less than tol
# the factor of 10 is for A --> nm
if self.CalcLength(vvv, 'd') < tol/10.:
# if true then its a repeated atom
isclose = True
break
if isclose:
break
if isclose:
break
else:
atom_pos_fixed.append(np.hstack([pos, occ]))
return np.array(atom_pos_fixed)
def CalcDensity(self):
'''
calculate density, average atomic weight (avA)
and average atomic number(avZ)
'''
self.avA = 0.0
self.avZ = 0.0
for i in range(self.atom_ntype):
'''
atype is atom type i.e. atomic number
numat is the number of atoms of atype
atom_pos(i,3) has the occupation factor
'''
atype = self.atom_type[i]
numat = self.numat[i]
occ = self.atom_pos[i, 3]
# -1 due to 0 indexing in python
self.avA += numat * constants.atom_weights[atype-1] * occ
self.avZ += numat * atype
self.density = self.avA / (self.vol * 1.0E-21 * constants.cAvogadro)
av_natom = np.dot(self.numat, self.atom_pos[:, 3])
self.avA /= av_natom
self.avZ /= np.sum(self.numat)
''' calculate the maximum index of diffraction vector along
each of the three reciprocal
basis vectors '''
def init_max_g_index(self):
"""
added 03/17/2021 SS
"""
self.ih = 1
self.ik = 1
self.il = 1
def CalcMaxGIndex(self):
self.init_max_g_index()
while (1.0 / self.CalcLength(
np.array([self.ih, 0, 0],
dtype=np.float64), 'r') > self.dmin):
self.ih = self.ih + 1
while (1.0 / self.CalcLength(
np.array([0, self.ik, 0],
dtype=np.float64), 'r') > self.dmin):
self.ik = self.ik + 1
while (1.0 / self.CalcLength(
np.array([0, 0, self.il],
dtype=np.float64), 'r') > self.dmin):
self.il = self.il + 1
def InitializeInterpTable(self):
self.f1 = {}
self.f2 = {}
self.f_anam = {}
data = importlib.resources.open_binary(hexrd.resources, 'Anomalous.h5')
with h5py.File(data, 'r') as fid:
for i in range(0, self.atom_ntype):
Z = self.atom_type[i]
elem = constants.ptableinverse[Z]
gid = fid.get('/'+elem)
data = gid.get('data')
self.f1[elem] = interp1d(data[:, 7], data[:, 1])
self.f2[elem] = interp1d(data[:, 7], data[:, 2])
def CalcAnomalous(self):
for i in range(self.atom_ntype):
Z = self.atom_type[i]
elem = constants.ptableinverse[Z]
f1 = self.f1[elem](self.wavelength)
f2 = self.f2[elem](self.wavelength)
frel = constants.frel[elem]
Z = constants.ptable[elem]
self.f_anam[elem] = np.complex(f1+frel-Z, f2)
def CalcXRFormFactor(self, Z, charge, s):
'''
we are using the following form factors for x-aray scattering:
1. coherent x-ray scattering, f0 tabulated in Acta Cryst. (1995). A51,416-431
2. Anomalous x-ray scattering (complex (f'+if")) tabulated in J. Phys. Chem. Ref. Data, 24, 71 (1995)
and J. Phys. Chem. Ref. Data, 29, 597 (2000).
3. Thompson nuclear scattering, fNT tabulated in Phys. Lett. B, 69, 281 (1977).
the anomalous scattering is a complex number (f' + if"), where the two terms are given by
f' = f1 + frel - Z
f" = f2
f1 and f2 have been tabulated as a function of energy in Anomalous.h5 in hexrd folder
overall f = (f0 + f' + if" +fNT)
'''
elem = constants.ptableinverse[Z]
if charge == '0':
sfact = constants.scatfac[elem]
else:
sfact = constants.scatfac[f"{elem}{charge}"]
fe = sfact[5]
fNT = constants.fNT[elem]
frel = constants.frel[elem]
f_anomalous = self.f_anam[elem]
for i in range(5):
fe += sfact[i] * np.exp(-sfact[i+6]*s)
return (fe+fNT+f_anomalous)
def CalcXRSF(self, hkl):
'''
the 1E-2 is to convert to A^-2
since the fitting is done in those units
'''
s = 0.25 * self.CalcLength(hkl, 'r')**2 * 1E-2
sf = np.complex(0., 0.)
for i in range(0, self.atom_ntype):
Z = self.atom_type[i]
charge = self.chargestates[i]
ff = self.CalcXRFormFactor(Z, charge, s)
if(self.aniU):
T = np.exp(-np.dot(hkl, np.dot(self.betaij[i, :, :], hkl)))
else:
T = np.exp(-8.0*np.pi**2 * self.U[i]*s)
ff *= self.atom_pos[i, 3] * T
for j in range(self.asym_pos[i].shape[0]):
arg = 2.0 * np.pi * np.sum(hkl * self.asym_pos[i][j, :])
sf = sf + ff * np.complex(np.cos(arg), -np.sin(arg))
return np.abs(sf)**2
''' calculate bragg angle for a reflection. returns Nan if
the reflections is not possible for the voltage/wavelength
'''
def CalcBraggAngle(self, hkl):
glen = self.CalcLength(hkl, 'r')
sth = self.wavelength * glen * 0.5
return np.arcsin(sth)
def ChooseSymmetric(self, hkllist, InversionSymmetry=True):
'''
this function takes a list of hkl vectors and
picks out a subset of the list picking only one
of the symmetrically equivalent one. The convention
is to choose the hkl with the most positive components.
'''
mask = np.ones(hkllist.shape[0], dtype=np.bool)
laue = InversionSymmetry
for i, g in enumerate(hkllist):
if(mask[i]):
geqv = self.CalcStar(g, 'r', applyLaue=laue)
for r in geqv[1:, ]:
rid = np.where(np.all(r == hkllist, axis=1))
mask[rid] = False
hkl = hkllist[mask, :].astype(np.int32)
hkl_max = []
for g in hkl:
geqv = self.CalcStar(g, 'r', applyLaue=laue)
loc = np.argmax(np.sum(geqv, axis=1))
gmax = geqv[loc, :]
hkl_max.append(gmax)
return np.array(hkl_max).astype(np.int32)
def SortHKL(self, hkllist):
'''
this function sorts the hkllist by increasing |g|
i.e. decreasing d-spacing. If two vectors are same
length, then they are ordered with increasing
priority to l, k and h
'''
glen = []
for g in hkllist:
glen.append(np.round(self.CalcLength(g, 'r'), 8))
# glen = np.atleast_2d(np.array(glen,dtype=np.float)).T
dtype = [('glen', float), ('max', int), ('sum', int),
('h', int), ('k', int), ('l', int)]
a = []
for i, gl in enumerate(glen):
g = hkllist[i, :]
a.append((gl, np.max(g), np.sum(g), g[0], g[1], g[2]))
a = np.array(a, dtype=dtype)
isort = np.argsort(a, order=['glen', 'max', 'sum', 'l', 'k', 'h'])
return hkllist[isort, :]
def getHKLs(self, dmin):
'''
this function generates the symetrically unique set of
hkls up to a given dmin.
dmin is in nm
'''
'''
always have the centrosymmetric condition because of
Friedels law for xrays so only 4 of the 8 octants
are sampled for unique hkls. By convention we will
ignore all l < 0
'''
hmin = -self.ih-1
hmax = self.ih
kmin = -self.ik-1
kmax = self.ik
lmin = -1
lmax = self.il
hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1)
for ik in np.arange(kmax, kmin, -1)
for il in np.arange(lmax, lmin, -1)])
hkl_allowed = Allowed_HKLs(self.sgnum, hkllist)
hkl = []
dsp = []
hkl_dsp = []
for g in hkl_allowed:
# ignore [0 0 0] as it is the direct beam
if(np.sum(np.abs(g)) != 0):
dspace = 1./self.CalcLength(g, 'r')
if(dspace >= dmin):
hkl_dsp.append(g)
'''
we now have a list of g vectors which are all within dmin range
plus the systematic absences due to lattice centering and glide
planes/screw axis has been taken care of
the next order of business is to go through the list and only pick
out one of the symetrically equivalent hkls from the list.
'''
hkl_dsp = np.array(hkl_dsp).astype(np.int32)
'''
the inversionsymmetry switch enforces the application of the inversion
symmetry regradless of whether the crystal has the symmetry or not
this is necessary in the case of xrays due to friedel's law
'''
hkl = self.ChooseSymmetric(hkl_dsp, InversionSymmetry=True)
'''
finally sort in order of decreasing dspacing
'''
self.hkls = self.SortHKL(hkl)
return self.hkls
'''
set some properties for the unitcell class. only the lattice
parameters, space group and asymmetric positions can change,
but all the dependent parameters will be automatically updated
'''
def Required_lp(self, p):
return _rqpDict[self.latticeType][1](p)
def Required_C(self, C):
return np.array([C[x] for x in _StiffnessDict[self._laueGroup][0]])
def MakeStiffnessMatrix(self, inp_Cvals):
if(len(inp_Cvals) != len(_StiffnessDict[self._laueGroup][0])):
x = len(_StiffnessDict[self._laueGroup][0])
msg = (f"number of constants entered is not correct."
f" need a total of {x} independent constants.")
raise IOError(msg)
# initialize all zeros and fill the supplied values
C = np.zeros([6, 6])
for i, x in enumerate(_StiffnessDict[self._laueGroup][0]):
C[x] = inp_Cvals[i]
# enforce the equality constraints
C = _StiffnessDict[self._laueGroup][1](C)
# finally fill the lower triangular matrix
for i in range(6):
for j in range(i):
C[i, j] = C[j, i]
self.stifness = C
self.compliance = np.linalg.inv(C)
def inside_spheretriangle(self, conn, dir3, hemisphere, switch):
'''
check if direction is inside a spherical triangle
the logic used as follows:
if determinant of [A B x], [A x C] and [x B C] are
all same sign, then the sphere is inside the traingle
formed by A, B and C
returns a mask with inside as True and outside as False
11/23/2020 SS switch is now a string specifying which
symmetry group to use for reducing directions
11/23/2020 SS catching cases when vertices are empty
'''
'''
first get vertices of the triangles in the
'''
vertex = self.sphere_sector.vertices[switch]
# if(switch == 'pg'):
# vertex = self.sphere_sector.vertices
# elif(switch == 'laue'):
# vertex = self.sphere_sector.vertices_laue
# elif(switch == 'super'):
# vertex = self.sphere_sector.vertices_supergroup
# elif(switch == 'superlaue'):
# vertex = self.sphere_sector.vertices_supergroup_laue
A = np.atleast_2d(vertex[:, conn[0]]).T
B = np.atleast_2d(vertex[:, conn[1]]).T
C = np.atleast_2d(vertex[:, conn[2]]).T
mask = []
for x in dir3:
x2 = np.atleast_2d(x).T
d1 = np.linalg.det(np.hstack((A, B, x2)))
d2 = np.linalg.det(np.hstack((A, x2, C)))
d3 = np.linalg.det(np.hstack((x2, B, C)))
'''
catching cases very close to FZ boundary when the
determinant can be very small positive or negative
number
'''
if(np.abs(d1) < eps):
d1 = 0.
if(np.abs(d2) < eps):
d2 = 0.
if(np.abs(d3) < eps):
d3 = 0.
ss = np.unique(np.sign([d1, d2, d3]))
if(hemisphere == 'upper'):
if(np.all(ss >= 0.)):
mask.append(True)
else:
mask.append(False)
elif(hemisphere == 'both'):
if(len(ss) == 1):
mask.append(True)
elif(len(ss) == 2):
if(0 in ss):
mask.append(True)
else:
mask.append(False)
elif(len(ss) == 3):
mask.append(False)
mask = np.array(mask)
return mask
'''
@AUTHOR <NAME>, Lawrence Livermore National Lab, <EMAIL>
@date 10/28/2020 SS 1.0 original
11/23/2020 SS 1.1 the laueswitch has been changed from a boolean
variable to a string input with threee possible values
@params dir3 : n x 3 array of directions to reduce
switch switch to decide which symmetry group to use. one of four:
(a) 'pg' use the cartesian point group symmetry
(b) 'laue' use the laue symmetry
(c) 'super' use the supergroup symmetry used in coloring
(d) 'superlaue' use the supergroup of the laue group
@detail this subroutine takes a direction vector and uses the point group
symmetry of the unitcell to reduce it to the fundamental stereographic
triangle for that point group. this function is used in generating the IPF
color legend for orientations. for now we are assuming dir3 is a nx3 array
of directions.
'''
def reduce_dirvector(self, dir3, switch='pg'):
'''
check if the dimensions of the dir3 array is to spec
'''
idx = np.arange(dir3.shape[0], dtype=np.int32)
dir3 = np.ascontiguousarray(np.atleast_2d(dir3))
if(dir3.ndim != 2):
raise RuntimeError("reduce_dirvector: invalid shape of dir3 array")
'''
check if the direction vector is a unit vector or not.
if it is not normalize it to get a unit vector. the dir vector
is in the sample frame, so by default it is assumed to be in a
orthonormal cartesian frame. this defines the normalization as
just division by the L2 norm
'''
eps = constants.sqrt_epsf
if(np.all(np.abs(np.linalg.norm(dir3, axis=1) - 1.0) < eps)):
dir3n = dir3
else:
if(np.all(np.linalg.norm(dir3) > eps)):
dir3n = dir3/np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T
else:
raise RuntimeError(
"atleast one of the input direction seems \
to be a null vector")
'''
we need both the symmetry reductions for the point group and laue group
this will be used later on in the coloring routines to determine if the
points needs to be moved to the southern hemisphere or not
'''
dir3_copy = np.copy(dir3n)
dir3_reduced = np.array([])
idx_copy = np.copy(idx)
idx_red = np.array([], dtype=np.int32)
'''
laue switch is used to determine which set of symmetry operations to
loop over
'''
hemisphere = self.sphere_sector.hemisphere[switch]
ntriangle = self.sphere_sector.ntriangle[switch]
connectivity = self.sphere_sector.connectivity[switch]
if(switch == 'pg'):
sym = self.SYM_PG_c
elif(switch == 'super'):
sym = self.SYM_PG_supergroup
elif(switch == 'laue'):
sym = self.SYM_PG_c_laue
elif(switch == 'superlaue'):
sym = self.SYM_PG_supergroup_laue
for sop in sym:
if(dir3_copy.size != 0):
dir3_sym = np.dot(sop, dir3_copy.T).T
mask = np.zeros(dir3_sym.shape[0]).astype(np.bool)
if(ntriangle == 0):
if(hemisphere == 'both'):
mask = np.ones(dir3_sym.shape[0], dtype=np.bool)
elif(hemisphere == 'upper'):
mask = dir3_sym[:, 2] >= 0.
else:
for ii in range(ntriangle):
tmpmask = self.inside_spheretriangle(
connectivity[:, ii], dir3_sym,
hemisphere, switch)
mask = np.logical_or(mask, tmpmask)
if(np.sum(mask) > 0):
if(dir3_reduced.size != 0):
dir3_reduced = np.vstack(
(dir3_reduced, dir3_sym[mask, :]))
idx_red = | np.hstack((idx_red, idx[mask])) | numpy.hstack |
#!/usr/bin/env python
"""
oocgcm.plot.plot1d
Define nice plotting function for bidimensional data series using matplotlib
"""
import numpy as np
import pylab as plt
import matplotlib.mlab as mlab
from matplotlib.ticker import MultipleLocator
import matplotlib
def spectrum2d_plot(ax, x, y, z, xlog=False, ylog=False, zlog=False, **kwargs):
"""
Define a nice spectrum with twin x-axis and twin y-axis, one with
frequencies, the other one with periods, on a predefined axis
object.
Parameters
----------
x,y : array_like
1D array defining the coordinates
z : array_like
2D array
xlog, ylog, zlog : bool, optional
Define if the x-axis, y-axis and z-axis are plotted with a
log scale
** kwargs : optional keyword arguments
See matplotlib.axes.Axes.contourf method in matplotlib
documentation
"""
if not 'xlim' in kwargs:
xlim = None
else:
xlim = kwargs['xlim']
del kwargs['xlim']
if not 'ylim' in kwargs:
ylim = None
else:
ylim = kwargs['ylim']
del kwargs['ylim']
if not 'zlim' in kwargs:
zlim = None
else:
zlim = kwargs['zlim']
del kwargs['zlim']
n_lev = 40
# if symmetric:
# lim = max(np.max(z), abs(np.min(z)))
# lev = np.hstack((np.linspace(- lim, 0, n_lev / 2 + 1),
# np.linspace(0, lim, n_lev / 2)[1:]))
#
# else:
# lev = np.linspace(np.min(z), np.max(z), n_lev / 2 + 1)
if zlog:
plot = ax.pcolormesh(np.log10(z), **kwargs)
else:
plot = ax.pcolormesh(z, **kwargs)
# X limits
if xlog:
ax.set_xscale('symlog', nonposx='clip')
xmin = np.ceil(np.log10(x[1,])) - 1
xmax = np.ceil(np.log10(x[-1,]))
ax.set_xlim((10 ** xmin, 10 ** xmax))
else:
try:
ax.set_xlim(xlim)
except:
ax.set_xlim(np.min(x), np.max(x))
# Y limits
if ylog:
ax.set_yscale('symlog', nonposx='clip')
ymin = np.ceil(np.log10(x[1,])) - 1
ymax = np.ceil(np.log10(x[-1,]))
ax.set_ylim((-10 ** ymin, 10 ** ymax))
else:
try:
ax.set_ylim(ylim)
except:
ax.set_ylim( | np.min(y) | numpy.min |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from collections import OrderedDict
import numpy as np
import os
import pandas
import posixpath
import warnings
from ast import literal_eval
from pyiron.base.settings.generic import Settings
from pyiron.base.generic.template import PyironObject
"""
GenericParameters class defines the typical input file with a key value structure plus an additional column for comments.
"""
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class GenericParameters(PyironObject):
"""
GenericParameters class defines the typical input file with a key value structure plus an additional column for comments.
Convenience class to easily create, read, and modify input files
Args:
table_name (str): name of the input file inside the HDF5 file - optional
input_file_name (str): name of the input file (if None default parameters are used)
val_only (bool): input format consists of value (comments) only
comment_char (str): separator that characterizes comment (e.g. "#" for python)
separator_char (str): separator that characterizes the split between key and value - default=' '
end_value_char (str): special character at the end of every line - default=''
Attributes:
.. attribute:: file_name
file name of the input file
.. attribute:: table_name
name of the input table inside the HDF5 file
.. attribute:: val_only
boolean option to switch from a key value list to an value only input file
.. attribute:: comment_char
separator that characterizes comment
.. attribute:: separator_char
separator that characterizes the split between key and value
.. attribute:: multi_word_separator
multi word separator to have multi word keys
.. attribute:: end_value_char
special character at the end of every line
.. attribute:: replace_char_dict
dictionary to replace certain character combinations
"""
def __init__(
self,
table_name=None,
input_file_name=None,
val_only=False,
comment_char="#",
separator_char=" ",
end_value_char="",
):
self.__name__ = "GenericParameters"
self.__version__ = "0.1"
self._file_name = None
self._table_name = None
self._comment_char = None
self._val_only = None
self._separator_char = None
self._multi_word_separator = None
self._end_value_char = None
self._replace_char_dict = None
self._block_dict = None
self._bool_dict = {True: "True", False: "False"}
self._dataset = OrderedDict()
self._block_line_dict = {}
self.end_value_char = end_value_char
self.file_name = input_file_name
self.table_name = table_name
self.val_only = val_only
self.comment_char = comment_char
self.separator_char = separator_char
self.multi_word_separator = "___"
self.read_only = False
if input_file_name is None:
self.load_default()
else:
self.read_input(self.file_name)
@property
def file_name(self):
"""
Get the file name of the input file
Returns:
str: file name
"""
return self._file_name
@file_name.setter
def file_name(self, new_file_name):
"""
Set the file name of the input file
Args:
new_file_name (str): file name
"""
self._file_name = new_file_name
@property
def table_name(self):
"""
Get the name of the input table inside the HDF5 file
Returns:
str: table name
"""
return self._table_name
@table_name.setter
def table_name(self, new_table_name):
"""
Set the name of the input table inside the HDF5 file
Args:
new_table_name (str): table name
"""
self._table_name = new_table_name
@property
def val_only(self):
"""
Get the boolean option to switch from a key value list to an value only input file
Returns:
bool: [True/False]
"""
return self._val_only
@val_only.setter
def val_only(self, val_only):
"""
Set the boolean option to switch from a key value list to an value only input file
Args:
val_only (bool): [True/False]
"""
self._val_only = val_only
@property
def comment_char(self):
"""
Get the separator that characterizes comment
Returns:
str: comment character
"""
return self._comment_char
@comment_char.setter
def comment_char(self, new_comment_char):
"""
Set the separator that characterizes comment
Args:
new_comment_char (str): comment character
"""
self._comment_char = new_comment_char
@property
def separator_char(self):
"""
Get the separator that characterizes the split between key and value
Returns:
str: separator character
"""
return self._separator_char
@separator_char.setter
def separator_char(self, new_separator_char):
"""
Set the separator that characterizes the split between key and value
Args:
new_separator_char (str): separator character
"""
self._separator_char = new_separator_char
@property
def multi_word_separator(self):
"""
Get the multi word separator to have multi word keys
Returns:
str: multi word separator
"""
return self._multi_word_separator
@multi_word_separator.setter
def multi_word_separator(self, new_multi_word_separator):
"""
Set the multi word separator to have multi word keys
Args:
new_multi_word_separator (str): multi word separator
"""
self._multi_word_separator = new_multi_word_separator
@property
def end_value_char(self):
"""
Get the special character at the end of every line
Returns:
str: end of line character
"""
return self._end_value_char
@end_value_char.setter
def end_value_char(self, new_end_value_char):
"""
Set the special character at the end of every line
Args:
new_end_value_char (str): end of line character
"""
self._end_value_char = new_end_value_char
@property
def replace_char_dict(self):
"""
Get the dictionary to replace certain character combinations
Returns:
dict: character replace dictionary
"""
return self._replace_char_dict
@replace_char_dict.setter
def replace_char_dict(self, new_replace_char_dict):
"""
Set the dictionary to replace certain character combinations
Args:
new_replace_char_dict (dict): character replace dictionary
"""
self._replace_char_dict = new_replace_char_dict
def _read_only_check_dict(self, new_dict):
if self.read_only and new_dict != self._dataset:
self._read_only_error()
@staticmethod
def _read_only_error():
warnings.warn(
"The input in GenericParameters changed, while the state of the job was already finished."
)
def load_string(self, input_str):
"""
Load a multi line string to overwrite the current parameter settings
Args:
input_str (str): multi line string
"""
new_dict = self._lines_to_dict(input_str.splitlines())
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
def load_default(self):
"""
Load defaults resets the dataset in the background to be empty
"""
new_dict = OrderedDict()
new_dict["Parameter"] = []
new_dict["Value"] = []
new_dict["Comment"] = []
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
def keys(self):
"""
Return keys of GenericParameters object
"""
if self.val_only:
return []
else:
return self._dataset["Parameter"]
def read_input(self, file_name, ignore_trigger=None):
"""
Read input file and store the data in GenericParameters - this overwrites the current parameter settings
Args:
file_name (str): absolute path to the input file
ignore_trigger (str): trigger for lines to be ignored
"""
Settings().logger.debug("file: %s %s", file_name, os.path.isfile(file_name))
if not os.path.isfile(file_name):
raise ValueError("file does not exist: " + file_name)
with open(file_name, "r") as f:
lines = f.readlines()
new_lines = np.array(lines).tolist()
if ignore_trigger is not None:
del_ind = list()
for i, line in enumerate(lines):
line = line.strip()
if len(line.split()) > 0:
if ignore_trigger == line.strip()[0]:
del_ind.append(i)
elif ignore_trigger in line:
lines[i] = line[: line.find("!")]
lines = np.array(lines)
new_lines = lines[np.setdiff1d(np.arange(len(lines)), del_ind)]
new_dict = self._lines_to_dict(new_lines)
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
def get_pandas(self):
"""
Output the GenericParameters object as Pandas Dataframe for human readability.
Returns:
pandas.DataFrame: Pandas Dataframe of the GenericParameters object
"""
return pandas.DataFrame(self._dataset)
def get(self, parameter_name, default_value=None):
"""
Get the value of a specific parameter from GenericParameters - if the parameter is not available return
default_value if that is set.
Args:
parameter_name (str): parameter key
default_value (str): default value to return is the parameter is not set
Returns:
str: value of the parameter
"""
i_line = self._find_line(parameter_name)
if i_line > -1:
val = self._dataset["Value"][i_line]
try:
val_v = literal_eval(val)
except (ValueError, SyntaxError):
val_v = val
if callable(val_v):
val_v = val
return val_v
elif default_value is not None:
return default_value
else:
raise NameError("parameter not found: " + parameter_name)
def get_attribute(self, attribute_name):
"""
Get the value of a specific parameter from GenericParameters
Args:
attribute_name (str): parameter key
Returns:
str: value of the parameter
"""
if "_attributes" not in dir(self):
return None
i_line = np.where(np.array(self._attributes["Parameter"]) == attribute_name)[0]
if i_line > -1:
return self._attributes["Value"][i_line]
else:
return None
def modify(self, separator=None, append_if_not_present=False, **modify_dict):
"""
Modify values for existing parameters. The command is called as modify(param1=val1, param2=val2, ...)
Args:
separator (str): needed if the parameter name contains special characters such as par:
use then as input: modify(separator=":", par=val) - optional
append_if_not_present (bool): do not raise an exception but append the parameter in practice use set(par=val)
- default=False
**modify_dict (dict): dictionary of parameter names and values
"""
# print ("modify: ", modify_dict)
if separator is not None:
modify_dict = {k + separator: v for k, v in modify_dict.items()}
for key, val in modify_dict.items():
i_key = self._find_line(key)
if i_key == -1:
if append_if_not_present:
self._append(**{key: val})
continue
else:
raise ValueError("key for modify not found " + key)
if isinstance(val, tuple):
val, comment = val
if self.read_only and self._dataset["Comment"][i_key] != comment:
self._read_only_error()
self._dataset["Comment"][i_key] = comment
if self.read_only and str(self._dataset["Value"][i_key]) != str(val):
self._read_only_error()
self._dataset["Value"][i_key] = str(val)
def set(self, separator=None, **set_dict):
"""
Set the value of multiple parameters or create new parameter key, if they do not exist already.
Args:
separator (float/int/str): separator string - optional
**set_dict (dict): dictionary containing the parameter keys and their corresponding values to be set
"""
self.modify(separator=separator, append_if_not_present=True, **set_dict)
def set_value(self, line, val):
"""
Set the value of a parameter in a specific line
Args:
line (float/int/str): line number - starting with 0
val (str/bytes): value to be set
"""
if line < len(self._dataset["Value"]):
if self.read_only and self._dataset["Value"][line] != val:
self._read_only_error()
self._dataset["Value"][line] = val
elif line >= len(self._dataset["Value"]):
new_array = []
new_comments = []
new_params = []
for el in self._dataset["Value"]:
new_array.append(el)
new_comments.append("")
new_params.append("")
new_array.append(val)
new_comments.append("")
new_params.append("")
new_dict = OrderedDict()
new_dict["Value"] = new_array
new_dict["Comment"] = new_comments
new_dict["Parameter"] = new_params
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
else:
raise ValueError("Wrong indexing")
def remove_keys(self, key_list):
"""
Remove a list of keys from the GenericParameters
Args:
key_list (list): list of keys to be removed
"""
if self.read_only and any([k in self._dataset["Parameter"] for k in key_list]):
self._read_only_error()
for key in key_list:
params = np.array(self._dataset["Parameter"])
i_keys = np.where(params == key)[0]
if len(i_keys) == 0:
continue
if i_keys[0] == -1:
continue
for i_key in i_keys[::-1]:
self._delete_line(i_key)
def define_blocks(self, block_dict):
"""
Define a block section within the GenericParameters
Args:
block_dict (dict): dictionary to define the block
"""
if not isinstance(block_dict, OrderedDict):
raise AssertionError()
self._block_dict = block_dict
def to_hdf(self, hdf, group_name=None):
"""
Store the GenericParameters in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
if group_name:
with hdf.open(group_name) as hdf_group:
hdf_child = hdf_group.create_group(self.table_name)
else:
hdf_child = hdf.create_group(self.table_name)
self._type_to_hdf(hdf_child)
hdf_child["data_dict"] = self._dataset
def from_hdf(self, hdf, group_name=None):
"""
Restore the GenericParameters from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
if group_name:
with hdf.open(group_name) as hdf_group:
data = hdf_group[self.table_name]
else:
data = hdf[self.table_name]
if isinstance(data, dict):
self._dataset = data
else:
self._dataset = data._read("data_dict")
def get_string_lst(self):
"""
Get list of strings from GenericParameters to write to input file
"""
tab_dict = self._dataset
# assert(len(tab_dict['Value']) == len(tab_dict['Parameter']))
if "Parameter" not in tab_dict:
tab_dict["Parameter"] = ["" for _ in tab_dict["Value"]]
string_lst = []
if self.val_only:
value_lst = tab_dict["Value"]
else:
try:
value_lst = [self[p] for p in tab_dict["Parameter"]]
except ValueError:
value_lst = tab_dict["Value"]
for par, v, c in zip(tab_dict["Parameter"], value_lst, tab_dict["Comment"]):
# special treatment for values that are bool or str
if isinstance(v, bool):
v_str = self._bool_dict[v]
elif isinstance(
v, str
): # TODO: introduce variable for string symbol (" or ')
v_str = v
else:
v_str = str(v)
par = " ".join(par.split(self.multi_word_separator))
if par == "Comment":
string_lst.append(str(v) + self.end_value_char + "\n")
elif c.strip() == "":
if self.val_only:
string_lst.append(v_str + self.end_value_char + "\n")
else:
string_lst.append(
par + self.separator_char + v_str + self.end_value_char + "\n"
)
else:
if self.val_only:
string_lst.append(
v_str + self.end_value_char + " " + self.comment_char + c + "\n"
)
else:
string_lst.append(
par
+ self.separator_char
+ v_str
+ " "
+ self.end_value_char
+ self.comment_char
+ c
+ "\n"
)
return string_lst
def write_file(self, file_name, cwd=None):
"""
Write GenericParameters to input file
Args:
file_name (str): name of the file, either absolute (then cwd must be None) or relative
cwd (str): path name (default: None)
"""
if cwd is not None:
file_name = posixpath.join(cwd, file_name)
with open(file_name, "w") as f:
for line in self.get_string_lst():
f.write(line)
def __repr__(self):
"""
Human readable string representation
Returns:
str: pandas Dataframe structure as string
"""
return str(self.get_pandas())
def __setitem__(self, key, value):
"""
Set a value for the corresponding key
Args:
key (str): key to be set of modified
value (float/int/str): value to be set
"""
if isinstance(key, int):
if self.read_only and self._dataset["Value"][key] != value:
self._read_only_error()
self._dataset["Value"][key] = value
else:
self.set(**{key: value})
def set_dict(self, dictionary):
"""
Set a dictionary of key value pairs
Args:
dictionary (dict): dictionary of key value pairs
"""
self.set(**dictionary)
def __getitem__(self, item):
"""
Get a value for the corresponding key
Args:
item (int, str): key
Returns:
str: value
"""
if isinstance(item, int):
return self._dataset["Value"][item]
elif item in self._dataset["Parameter"]:
return self.get(item)
def __delitem__(self, key):
"""
Delete a key from GenericParameters
Args:
key (str): single key
"""
self.remove_keys([key])
def _get_block(self, block_name):
"""
Internal helper function to get a block by name
Args:
block_name (str): block name
Returns:
dict: dictionary of the specific block
"""
if block_name not in self._block_dict:
raise ValueError("unknown block: " + block_name)
keys = self._dataset["Parameter"]
block_dict = OrderedDict()
for key in self._dataset:
block_dict[key] = []
for i, tag in enumerate(keys):
if tag in self._block_dict[block_name]:
for key in self._dataset:
block_dict[key].append(self._dataset[key][i])
return block_dict
def _get_attributes(self):
"""
Internal helper function to extract pyiron specific commands (start in comments with " @my_command")
Returns:
(dict): {"Parameter": list of tags, "Value": list of values}
"""
tags = self._dataset["Parameter"]
lst_tag, lst_val = [], []
for i, tag in enumerate(tags):
if tag not in ["Comment"]:
continue
c = self._dataset["Value"][i]
s_index = c.find(" @")
if s_index > -1:
tag, val = c[s_index:].split()[:2]
lst_tag.append(tag[1:])
lst_val.append(val)
self._attributes = {"Parameter": lst_tag, "Value": lst_val}
return self._attributes
def _remove_block(self, block_name):
"""
Internal helper function to remove a block by name
Args:
block_name (str): block name
"""
if block_name not in self._block_dict:
raise ValueError("unknown block to be removed")
self.remove_keys(self._block_dict[block_name])
def _insert_block(self, block_dict, next_block=None):
"""
Internal helper function to insert a block by name
Args:
block_dict (dict): block dictionary
next_block (str): name of the following block - optional
"""
if next_block is None: # append
for key in block_dict:
self._dataset[key] += block_dict[key]
else:
for i, tag in enumerate(self._dataset["Parameter"]):
if tag in self._block_dict[next_block]:
self._insert(line_number=i, data_dict=block_dict) # , shift=1)
break
def _update_block(self, block_dict):
"""
Internal helper function to update a block by name
Args:
block_dict (dict): block dictionary
"""
tag_lst = block_dict["Parameter"]
val_lst = block_dict["Value"]
par_dict = {}
for t, v in zip(tag_lst, val_lst):
par_dict[t] = v
self.modify(**par_dict)
def _delete_line(self, line_number):
"""
Internal helper function to delete a single line
Args:
line_number (int): line number
"""
if self.read_only:
self._read_only_error()
for key, val in self._dataset.items():
if "numpy" in str(type(val)):
val = val.tolist()
del val[line_number]
self._dataset[key] = val
def _insert(self, line_number, data_dict, shift=0):
"""
Internal helper function to insert a single line by line number
Args:
line_number (int): line number
data_dict (dict): data dictionary
shift (int): shift line number - default=0
"""
if self.read_only:
self._read_only_error()
for key, val in data_dict.items():
lst = self._dataset[key]
val = np.array(val).tolist()
lst = | np.array(lst) | numpy.array |
import os
import sys
import copy
import collections
import numpy as np
import networkx as nx
import pygraphviz as gv
import colour
import pandas as pd
from matplotlib import pyplot as plt
def color_range(one, two, n):
""" color_range("red", "blue", 4) gives four colors in six digit hex
Used because pygraphviz definitely accepts 6-digit hex codes.
Should be noted, the two colors in the middle will be shades of lime ...
Not much to do with red, or blue. Unfortunately, colorbrewer has NO
documentation (!) and other things don't seem to have ranges!
'seaborn' looks like possibly a good option (documentation!) but
"""
col1 = colour.Color(one)
col2 = colour.Color(two)
r = [c.get_hex_l() for c in col1.range_to(col2, n)]
return r
def one_round_swaps(b):
"""Finds the best group change for each node, once.
Described in (Karrer and Newman 2011) as adapted from (Kernhigan, Lin 1970).
The implementation holds a list of likelihoods that were acheived after
each node was optimally placed, as well as the group into which it was
placed. When all nodes have been examined, the system goes back to the
state that achieved the highest likelihood, which means keeping the nodes
that were switched before the maximum was hit, and discarding the changes
after.
"""
# Initialized so that there's a 'previous' value for the initial one
new_max = [b.calculate_likelihood()]
new_groups = copy.copy(b.groups)
# Iterate through the nodes
for i, group in enumerate(b.groups):
changes = []
# Iterate through the possible clusters
for j in range(b.k):
# Not worth evaluating your current position.
if j != group:
# Place node i in group j
changes.append(b.change_in_likelihood(i, j))
# GOTTA CHANGE IT ***BACK***
else:
changes.append(0)
# After trying all groups, set the new group to the best option
best = changes.index(max(changes))
new_groups[i] = best
# Gotta change the ACTUAL assignment -- this is cumulative
b.groups[i] = best
# Switch the matrix -- things have changed.
b.calculate_m()
# Update the likelihood by the local change
# Remember, the list was initialized to have a 'previous' elem
# even for the FIRST element.
new_max.append(new_max[-1] + max(changes))
mx = new_max.index(max(new_max))
# If the index isn't 0, which is the 'no changes' state,
if mx:
# Remove the first element
del new_max[0]
# and pretend it wasn't there
mx -= 1
# so that you can easily index into the best place.
# With n nodes, you index easily into 1:n changes -- not 0:n changes.
best_groups = np.concatenate((new_groups[:mx+1], b.groups[mx+1:]))
b.groups = best_groups
# Since you got new groups, rewire.
b.calculate_m()
# Return the likelihood corresponding to the groups
return new_max[mx] #b.calculate_likelihood()
else:
# The groups don't change.
return new_max[0]
def blockmodel(g, k, iterations=1):
""" Takes a graph and a number of clusters, returns group assignments.
g is the graph, a 2- or 3-d binary (NOT boolean) numpy array
Right now, treats the network as 1-mode, so there's only 1 k.
"""
likelihoods = []
models = []
# Randomly initialized, so try a few times.
for itn in range(iterations):
sys.stdout.write('Iteration #{}\r'.format(itn+1))
sys.stdout.flush()
b = BlockModel(g, k)
models.append(b)
lkhds = []
old_likelihood = -np.inf
new_likelihood = 0
iterations = 0
while True:
iterations += 1
new_likelihood = one_round_swaps(b)
lkhds.append(new_likelihood)
if new_likelihood == old_likelihood or iterations > 100:
likelihoods.append(new_likelihood)
plt.plot(lkhds)
break
else:
old_likelihood = new_likelihood
# This are comparable, no?
return models[likelihoods.index(max(likelihoods))]
class BlockModel:
def __init__(self, g, k, symmetric=False):
"""Randomly assigns nodes to clusters, and counts inter-cluster ties.
"""
# The graph
self.g = g
# The number of clusters (assuming 1-mode)
self.k = k
# An array of cluster assignments for each node
self.groups = np.random.randint(k, size=g.shape[0])
# Is the graph symmetric? (Relevance for 3d?)
self.symmetric = symmetric
# Convenient. Will the dimensions ever DIFFER though?
self.n = g.shape[0]
# The counts of between-cluster ties
self.m = np.zeros(tuple(self.k for dim in range(self.g.ndim)))
# The set of node<->group edge counts needed for diff(likelihood)
# Useful to have on hand, if it COULD be (I think)
# also defined as-needed in the diff-step -- but that's also
# a class function.
self.ki = np.zeros(tuple(self.k for dim in range(self.g.ndim)))
# Initialize this matrix
self.calculate_m()
def count_between_edges(self, x, node=None):#, group=None):
""" Populates tables of group<->group ties, for one or all nodes.
The engine under calculate_m and calculate_k.
"""
if self.g.ndim == 2:
# Am I actually assigning to the OBJECT?
#x = np.zeros(tuple(self.k for i in range(self.g.ndim)))
for i in range(self.k):
for j in range(self.k):
x[i,j] = 0 # But THIS should work ... no?
for i in range(self.n):
for j in range(self.n):
# The groups the nodes belong to
r, s = self.groups[[i, j]]
# If we're only looking for ONE node's numbers ...
if node and node not in [r, s]:
break
else:
# Don't TOTALLY understand the double-diagonal,
# but it's in K+N
# PAY CLOSE ATTENTION -- we're adding to 'x',
# which could be m, OR k.
x[r,s] += self.g[i,j]*(2 if r==s else 1)
elif self.g.ndim == 3:
# Am I actually assigning to the OBJECT?
#x = np.zeros(tuple(self.k for i in range(self.g.ndim)))
for i in range(self.k):
for j in range(self.k):
for k in range(self.k):
x[i,j,k] = 0 # But THIS should work ... no?
for i in range(self.n):
for j in range(self.n):
for k in range(self.n):# if self.g.ndim==3 else [None]:
# The groups the nodes belong to
#ix_tuple = tuple(x for x in (i,j,k) if x)
r, s, t = self.groups[[i, j, k]] #group_tuple = self.groups[list(ix_tuple)]
# If we're only looking for ONE node's numbers ...
if node and node not in [r, s, t]: #if node and node not in group_tuple:
break
else:
#x[group_tuple] += self.g[ix_tuple]*(2 if all([gt==group_tuple[0] for gt in group_tuple]) else 1))
x[r,s,t] += self.g[i,j,k]*(2 if r==s==t else 1)
else:
# Wrong number of dimensions. ?
raise Exception("The dimension of the matrix was not 2 or 3")
def calculate_m(self):
#self.m = self.count_between_edges(self.)
self.count_between_edges(self.m)
def calculate_k(self, node):
self.count_between_edges(self.ki, node=node)#, group=group)
#self.kib, self.kbi = self.count_between_edges(self.)
def change_in_likelihood(self, i, k):
"""Return the change in likelihood for moving node i to cluster k."""
# I want to create a matrix of -- i's degrees into and out of all
# the other clusters. Right? Anything else?
# First, and trivially --
old_group = self.groups[i]
#old_m = copy.copy(self.m)
before = self.calculate_likelihood()
self.groups[i] = k
# Redo the group matrix,
self.calculate_m()
# And redo the likelihood, based on it.
after = self.calculate_likelihood()
# BUT NOW YOU'VE GOT TO PUT EVERYTHING BACK.
self.groups[i] = old_group
#self.m = old_m
self.calculate_m()
return after - before
# Next, see what the difference is ... which is easier than
# calculating the WHOLE likelihood.
def calculate_likelihood(self, corrected=False):
"""Returns the likelihood of the current model."""
# THIS ONLY WORKS FOR 2D MODELS.
# Degree corrected, or not?
# L(G|g) = SUM_rs m_rs log m_rs / n_r*n_s
if corrected:
raise Exception("The degree corrected version isn't implemented")
total = 0
if self.g.ndim == 2:
# fuck -- directed? then it's much more complicated.
for i in range(self.k):
for j in range(self.k):
# This is avoiding a Divide By Zero Warning
if self.m[i,j]:
total += self.m[i,j] * np.log(self.m[i,j])
if n[i] and n[j]:
total -= self.m[i,j] * np.log(n[i]*n[j])
if self.g.ndim == 3:
for i in range(self.k):
for j in range(self.k):
for k in range(self.k):
# This is avoiding a Divide By Zero Warning
if self.m[i,j,k]:
total += self.m[i,j,k] * np.log(self.m[i,j,k])
if n[i] and n[j] and n[k]:
total -= self.m[i,j,k] * np.log(n[i]*n[j]*n[k])
else:
n = collections.Counter(self.groups)
total = 0
if self.g.ndim == 2:
for i in range(self.k):
for j in range(self.k):
# This is avoiding a Divide By Zero Warning
if self.m[i,j]:
total += self.m[i,j] * np.log(self.m[i,j])
if n[i] and n[j]:
total -= self.m[i,j] * np.log(n[i]*n[j])
if self.g.ndim == 3:
for i in range(self.k):
for j in range(self.k):
for k in range(self.k):
# This is avoiding a Divide By Zero Warning
if self.m[i,j,k]:
total += self.m[i,j,k] * np.log(self.m[i,j,k])
if n[i] and n[j] and n[k]:
total -= self.m[i,j,k] * | np.log(n[i]*n[j]*n[k]) | numpy.log |
import numpy as np
import numpy.linalg as linalg
def state_space(raw_data, q):
"""
Performs the state-space projection of the original data using principal
component analysis (eigen-decomposition).
Parameters
----------
raw_data : array, shape (N, M)
Row-vector data points with M features.
q : integer
Number of principal components to keep.
Returns
-------
X : array, shape (q, M)
State-space projection of the original data.
C : array, shape (N, q) the PCA matrix (useful for returning to the data space)
Projection matrix.
"""
if q <= 0:
raise Exception('Parameter "q" restricted to positive integer values.')
# Perform the SVD on the data.
# For full documentation on this aspect, see page 15 of <NAME>'s
# master's thesis on Autoregressive modeling.
#
# Y = U * S * Vt,
#
# Y = C * X,
#
# So:
# C = first q columns of U
# S_hat = first q singular values of S
# Vt_hat = first q rows of Vt
#
# X = S_hat * Vt_hat
#
# For the full documentation of SVD, see:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html#numpy.linalg.svd
U, S, Vt = linalg.svd(raw_data, full_matrices = False)
C = U[:, :q]
Sh = np.diag(S)[:q, :q]
Vth = Vt[:q, :]
X = np.dot(Sh, Vth)
return [X, C]
def appearance_space(state_data, C):
"""
Converts data projected into the state space back into the appearance space
according to the projection matrix C. Effectively, this undoes the operations
of "state_space()":
X, C = state_space(original_data)
original_data = appearance_space(X, C)
Parameters
----------
state_data : array, shape (q, M)
The projected data, or the output "X" from "state_space".
C : array, shape (N, q)
The projection matrix, or the output "C" from "state_space".
Returns
-------
X : array, shape (N, M)
The original form of the data, or the input to "state_space".
"""
return np.dot(C, state_data)
def train(X, order = 2):
"""
Estimates the transition matrices A (and eventually the error parameters as
well) for this AR model, given the order of the markov process.
(in this notation, the parameter to this method "order" has the same value as "q")
Parameters
----------
X : array, shape (q, M) or (M,)
Matrix of column vectors of the data (either original or state-space).
order : integer
Positive, non-zero integer order value for the order of the Markov process.
Returns
-------
A : array, shape (q, q)
Transition coefficients for the system
"""
if order <= 0:
raise Exception('Parameter "order" restricted to positive integer values')
W = None
# A particular special case first.
if len(X.shape) == 1:
Xtemp = np.zeros(shape = (1, | np.size(X) | numpy.size |
""" This module defines the chi-squared and related functions
Module author: <NAME>
Year: 2020
Email: <EMAIL>
"""
import numpy as np
import model
def chi2_no_soliton(c, Rs, ups_disk, ups_bulg, gal, DM_profile="NFW"):
"""chi2 for an NFW fit (c, Rs; ups_disk, ups_bulg). Runs over a single galaxy
:param c: concentration param of the NFW profile, and delc for Burkert
:param Rs: critical radius of the NFW profile
:param ups_disk: disk surface brightness
:param ups_bulg: bulge surface brightness
:param gal: a Galaxy instance
"""
chi2 = 0
Vth2_arr = model.v2_rot(gal, c, Rs, ups_bulg, ups_disk, DM_profile)
for i, r in enumerate(gal.R):
# treat the i-th bin of the rot curve
#
# # TODO: move this part out to a dedicated function
# # V thory due to DM
# if DM_profile == "NFW":
# M_enclosed = model.M_NFW(r, Rs, c)
# elif DM_profile == "Burkert":
# M_enclosed = model.M_Burkert(
# r, delc=c, Rs=Rs)
# else:
# raise Exception(
# "Only NFW and Burkert are implemented at the moment.")
# VDM2 = model._G_Msun_over_kpc * model._c**2 * (M_enclosed/1.) * (1./r)
# # combine DM with the baryon mass model (from SB data)
# Vb2 = (ups_bulg*np.abs(gal.Vbul[i])*gal.Vbul[i]
# + ups_disk*np.abs(gal.Vdisk[i])*gal.Vdisk[i]
# + np.abs(gal.Vgas[i])*gal.Vgas[i]
# )
# Vth2 = VDM2 + Vb2
# ODOT
Vth2 = Vth2_arr[i]
if Vth2 > 0:
Vth = np.sqrt(Vth2)
else:
Vth = 0.
Vobs = gal.Vobs[i]
dVobs = gal.dVobs[i]
# compute chi2 for this bin
chi2 += (Vth - Vobs)**2/dVobs**2
# construct Vtot for visual/sanity checks
# construct Vtot
return chi2
def fit_rot_curve(gal, DM_profile='NFW', gridsize=50):
"""A quick fit with NFW/Burkert only
"""
rs_arr = np.linspace(1, 80, gridsize)
c_arr = np.logspace(0, 1.5, gridsize)
rs_mesh, c_mesh = np.meshgrid(rs_arr, c_arr, indexing='ij')
rs_flat, c_flat = rs_mesh.reshape(-1), c_mesh.reshape(-1)
chi2_flat = []
chi2_val_rec = 1e9
rec_idx = None
for i in range(len(rs_flat)):
rs = rs_flat[i]
c = c_flat[i]
chi2_val = chi2_no_soliton(c=c, Rs=rs, ups_disk=0.5, ups_bulg=0.5,
gal=gal, DM_profile=DM_profile)
chi2_flat.append(chi2_val)
if chi2_val < chi2_val_rec:
rec_idx = i
chi2_val_rec = chi2_val
# print(chi2_val)
chi2_flat = np.array(chi2_flat)
# best fit
rs = rs_flat[rec_idx]
c = c_flat[rec_idx]
# output
gal.rs = rs
gal.c = c
return (rs_mesh, c_mesh, chi2_flat)
def chi2_single_gal(m, M, c, Rs, ups_disk, ups_bulg, gal, flg_Vtot=False, DM_profile="NFW", flg_overshoot=False, combine_mode=None):
"""chi2 for a fixed theory point (m, M, c, Rs; ups_disk, ups_bulg). Runs over a single galaxy, gal
up
:param m: scalar mass [eV]
:param M: soliton mass [Msun]
:param c: concentration param of the NFW profile, and delc for Burkert
:param Rs: critical radius of the NFW profile
:param ups_disk: disk surface brightness
:param ups_bulg: bulge surface brightness
:param gal: a Galaxy instance
:param flg_Vtot: flg to signal returning of the total rot velocity
defined as Vtot**2 = Vb**2 + VNFW**2 + Vsol**2 [km/s]
:param DM_profile: flag to choose between NFW and Burkert. (Default: NFW)
:returns: chi2 value
"""
chi2 = 0
if flg_Vtot:
Vtot = np.array([])
Vth2_arr = model.v2_rot(gal, c, Rs, ups_bulg,
ups_disk, DM_profile,
m=m, M=M, combine_mode=combine_mode)
for i, r in enumerate(gal.R):
# treat the i-th bin of the rot curve
#
# # TODO: move this part out to a dedicated function
# # V thory due to DM
# if DM_profile == "NFW":
# M_enclosed = model.M_NFW(r, Rs, c) + model.M_sol(r, m, M)
# elif DM_profile == "Burkert":
# M_enclosed = model.M_Burkert(
# r, delc=c, Rs=Rs) + model.M_sol(r, m, M)
# else:
# raise Exception(
# "Only NFW and Burkert are implemented at the moment.")
# VDM2 = model._G_Msun_over_kpc * model._c**2 * (M_enclosed/1.) * (1./r)
# # combine DM with the baryon mass model (from SB data)
# Vb2 = (ups_bulg*np.abs(gal.Vbul[i])*gal.Vbul[i]
# + ups_disk*np.abs(gal.Vdisk[i])*gal.Vdisk[i]
# + np.abs(gal.Vgas[i])*gal.Vgas[i]
# )
# Vth2 = VDM2 + Vb2
# ODOT
Vth2 = Vth2_arr[i]
if Vth2 > 0:
Vth = | np.sqrt(Vth2) | numpy.sqrt |
"""
Dynamic endpoints on eICU
"""
import numpy as np
import pandas as pd
import functions.util_array as mlhc_array
class DynamicEndpointExtractor():
def __init__(self):
self.create_pid_col = True
# The horizons at the end which are marked as patient severity
self.back_horizons = [1, 6, 12, 24]
self.unit_discharge_categories = {"home": ["Home"],
"telemetry": ["Telemetry"],
"floor": ["Floor"],
"step_down_unit": ["Step-Down Unit (SDU)"],
"acute_care_floor": ["Acute Care/Floor"],
"other_icu": ["Other ICU", "ICU", "Other ICU (CABG)"],
"expired": ["Death"],
"skilled_nursing_facility": ["Skilled Nursing Facility"],
"other_hospital": ["Other Hospital"]}
self.hospital_discharge_categories = {"home": ["Home"],
"skilled_nursing_facility": ["Skilled Nursing Facility"],
"expired": ["Death"],
"rehabilitation": ["Rehabilitation"],
"other_hospital": ["Other Hospital"],
"nursing_home": ["Nursing Home"]}
# The variables that are to be used as critical thresholds
self.relevant_variables_vitals = ["temperature", "systemicmean", "respiration"]
self.relevant_variables_lab = ["HCO3", "sodium", "potassium", "creatinine"]
def transform(self, df_imputed, df_pat, pid=None):
df_out_dict = {}
if self.create_pid_col:
df_out_dict["patientunitstayid"] = mlhc_array.value_empty(df_imputed.shape[0], pid, dtype=np.int64)
df_out_dict["ts"] = df_imputed["ts"]
rel_row = df_pat.iloc[0]
hospital_discharge_location = str(rel_row["hospitaldischargelocation"]).strip()
unit_discharge_location = str(rel_row["unitdischargelocation"]).strip()
for var, vnames in self.unit_discharge_categories.items():
if unit_discharge_location in vnames:
for hor in self.back_horizons:
arr = np.zeros(df_imputed.shape[0], dtype=np.float64)
arr[-hor:] = 1.0
df_out_dict["unit_discharge_{}_{}".format(var, hor)] = arr
else:
for hor in self.back_horizons:
arr = np.zeros(df_imputed.shape[0], dtype=np.float64)
df_out_dict["unit_discharge_{}_{}".format(var, hor)] = arr
for var, vnames in self.hospital_discharge_categories.items():
if hospital_discharge_location in vnames:
for hor in self.back_horizons:
arr = np.zeros(df_imputed.shape[0], dtype=np.float64)
arr[-hor:] = 1.0
df_out_dict["hospital_discharge_{}_{}".format(var, hor)] = arr
else:
for hor in self.back_horizons:
arr = np.zeros(df_imputed.shape[0], dtype=np.float64)
df_out_dict["hospital_discharge_{}_{}".format(var, hor)] = arr
# Process the vital sign variables of interest
temperature = np.array(df_imputed["vs_temperature"])
abpm = np.array(df_imputed["vs_systemicmean"])
rrate = np.array(df_imputed["vs_respiration"])
hco3 = np.array(df_imputed["lab_HCO3"])
sodium = np.array(df_imputed["lab_sodium"])
potassium = np.array(df_imputed["lab_potassium"])
creatinine = np.array(df_imputed["lab_creatinine"])*100 # Wrong unit in the input data
for hor in self.back_horizons:
full_score_out = np.zeros(df_imputed.shape[0])
# TEMPERATURE
set_indices = {}
for config, thresholds in [("high4", [41, np.inf]), ("low4", [-np.inf, 30]), ("high3", [39, 41]), ("low3", [30, 32]),
("low2", [32, 34]), ("high1", [38.5, 39]), ("low1", [34, 36])]:
temp_out = np.zeros(df_imputed.shape[0])
for idx in np.arange(temperature.size):
forward_window = temperature[idx:min(temperature.size, idx+hor)]
assert( | np.isfinite(forward_window) | numpy.isfinite |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from oneflow.python.ops.constant_op import zeros_like
from test_util import GenArgList
def np_margin_ranking_loss(margin, input1, input2, targets, reduction="none"):
out = np.clip(margin + (-targets) * (input1 - input2), a_min=0, a_max=None)
if reduction == "sum":
return np.sum(out)
elif reduction == "mean":
return out.mean()
elif reduction == "none":
return out
def np_margin_ranking_loss_grad(margin, input1, input2, targets):
out = np.clip(margin + (-targets) * (input1 - input2), a_min=0, a_max=None)
out_grad1 = -1 * np.zeros_like(targets)
out_grad2 = np.zeros_like(targets)
out_grad1[np.nonzero(out)] = -targets[np.nonzero(out)]
out_grad2[np.nonzero(out)] = targets[np.nonzero(out)]
return out_grad1, out_grad2
def _test_marginrankingloss_none(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
target_pos = flow.Tensor(
np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
target_neg = flow.Tensor(
-1 * np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="none")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out_pos = margin_ranking_loss(input1, input2, target_pos)
np_out_pos = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_pos.numpy(), reduction="none"
)
test_case.assertTrue(np.allclose(of_out_pos.numpy(), np_out_pos, 1e-5, 1e-5))
of_out_neg = margin_ranking_loss(input1, input2, target_neg)
np_out_neg = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_neg.numpy(), reduction="none"
)
test_case.assertTrue(np.allclose(of_out_neg.numpy(), np_out_neg, 1e-5, 1e-5))
def _test_marginrankingloss_mean(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
target_pos = flow.Tensor(
np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
target_neg = flow.Tensor(
-1 * np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="mean")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out_pos = margin_ranking_loss(input1, input2, target_pos)
np_out_pos = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_pos.numpy(), reduction="mean"
)
test_case.assertTrue(np.allclose(of_out_pos.numpy(), np_out_pos, 1e-5, 1e-5))
of_out_neg = margin_ranking_loss(input1, input2, target_neg)
np_out_neg = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_neg.numpy(), reduction="mean"
)
test_case.assertTrue(np.allclose(of_out_neg.numpy(), np_out_neg, 1e-5, 1e-5))
def _test_marginrankingloss_sum(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
target_pos = flow.Tensor(
np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
target_neg = flow.Tensor(
-1 * np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="sum")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out_pos = margin_ranking_loss(input1, input2, target_pos)
np_out_pos = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_pos.numpy(), reduction="sum"
)
test_case.assertTrue(np.allclose(of_out_pos.numpy(), np_out_pos, 1e-5, 1e-5))
of_out_neg = margin_ranking_loss(input1, input2, target_neg)
np_out_neg = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_neg.numpy(), reduction="sum"
)
test_case.assertTrue(np.allclose(of_out_neg.numpy(), np_out_neg, 1e-5, 1e-5))
def _test_marginrankingloss_grad(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
input2 = flow.Tensor(
| np.random.randn(*shape) | numpy.random.randn |
import cv2
import os
import shutil
import quaternion
import torch
import numpy as np
from typing import Optional
import imageio
from tqdm import tqdm
from pytorch3d.structures import Meshes
from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import utils as struct_utils
from sparseplane.utils.camera import (
create_cylinder_mesh,
create_color_palette,
get_cone_edges,
)
def transform_meshes(meshes, camera_info):
"""
input:
@meshes: mesh in local frame
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
mesh in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
verts_packed = meshes.verts_packed()
verts_packed = verts_packed * torch.tensor(
[1.0, -1.0, -1.0], dtype=torch.float32
) # suncg2habitat
faces_list = meshes.faces_list()
tex = meshes.textures
rot_matrix = torch.tensor(quaternion.as_rotation_matrix(rot), dtype=torch.float32)
verts_packed = torch.mm(rot_matrix, verts_packed.T).T + torch.tensor(
tran, dtype=torch.float32
)
verts_list = list(verts_packed.split(meshes.num_verts_per_mesh().tolist(), dim=0))
return Meshes(verts=verts_list, faces=faces_list, textures=tex)
def rotate_mesh_for_webview(meshes):
"""
input:
@meshes: mesh in global (habitat) frame
output:
mesh is rotated around x axis by -11 degrees such that floor is horizontal
"""
verts_packed = meshes.verts_packed()
faces_list = meshes.faces_list()
tex = meshes.textures
rot_matrix = torch.FloatTensor(
np.linalg.inv(
np.array([[1, 0, 0], [0, 0.9816272, -0.1908090], [0, 0.1908090, 0.9816272]])
)
)
verts_packed = torch.mm(rot_matrix, verts_packed.T).T
verts_list = list(verts_packed.split(meshes.num_verts_per_mesh().tolist(), dim=0))
return Meshes(verts=verts_list, faces=faces_list, textures=tex)
def transform_verts_list(verts_list, camera_info):
"""
input:
@meshes: verts_list in local frame
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
verts_list in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
verts_list_to_packed = struct_utils.list_to_packed(verts_list)
verts_packed = verts_list_to_packed[0]
num_verts_per_mesh = verts_list_to_packed[1]
verts_packed = verts_packed * torch.tensor(
[1.0, -1.0, -1.0], dtype=torch.float32
) # suncg2habitat
rot_matrix = torch.tensor(quaternion.as_rotation_matrix(rot), dtype=torch.float32)
verts_packed = torch.mm(rot_matrix, verts_packed.T).T + torch.tensor(
tran, dtype=torch.float32
)
verts_list = list(verts_packed.split(num_verts_per_mesh.tolist(), dim=0))
return verts_list
def get_plane_params_in_global(planes, camera_info):
"""
input:
@planes: plane params
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
plane parameters in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
start = np.ones((len(planes), 3)) * tran
end = planes * np.array([1, -1, -1]) # suncg2habitat
end = (quaternion.as_rotation_matrix(rot) @ (end).T).T + tran # cam2world
a = end
b = end - start
planes_world = ((a * b).sum(axis=1) / np.linalg.norm(b, axis=1) ** 2).reshape(-1, 1) * b
return planes_world
def get_plane_params_in_local(planes, camera_info):
"""
input:
@planes: plane params
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
plane parameters in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
b = planes
a = np.ones((len(planes), 3)) * tran
planes_world = (
a
+ b
- ((a * b).sum(axis=1) / np.linalg.norm(b, axis=1) ** 2).reshape(-1, 1) * b
)
end = (
quaternion.as_rotation_matrix(rot.inverse()) @ (planes_world - tran).T
).T # world2cam
planes_local = end * | np.array([1, -1, -1]) | numpy.array |
"""TNQMetro: Tensor-network based package for efficient quantum metrology computations."""
# Table of Contents
#
# 1 Functions for finite size systems......................................29
# 1.1 High level functions...............................................37
# 1.2 Low level functions...............................................257
# 1.2.1 Problems with exact derivative.............................1207
# 1.2.2 Problems with discrete approximation of the derivative.....2411
# 2 Functions for infinite size systems..................................3808
# 2.1 High level functions.............................................3816
# 2.2 Low level functions..............................................4075
# 3 Auxiliary functions..................................................5048
import itertools
import math
import warnings
import numpy as np
from ncon import ncon
########################################
# #
# #
# 1 Functions for finite size systems. #
# #
# #
########################################
#############################
# #
# 1.1 High level functions. #
# #
#############################
def fin(N, so_before_list, h, so_after_list, BC='O', L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the QFI over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence in their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying the quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. These local superoperators have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that the parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h has to be diagonal in the computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
result, result_m, L, psi0 = fin_gen(N, d, BC, ch, ch2, None, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_gen(N, d, BC, ch, ch2, epsilon=None, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence when increasing their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on the channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
ch: list of length N of ndarrays of a shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of a shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in MPO representation.
ch2: list of length N of ndarrays of a shape (Dl_ch2,Dr_ch2,d**2,d**2) for OBC (Dl_ch2, Dr_ch2 can vary between sites) or ndarray of a shape (D_ch2,D_ch2,d**2,d**2,N) for PBC
Interpretiaon depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of the quantum channel as a superoperator in the MPO representation,
2) the quantum channel as superoperator in the MPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float, optional
If specified then interpeted as value of a separation between estimated parameters encoded in ch and ch2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if the Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of the figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if epsilon is None:
result, result_m, L, psi0 = fin_FoM_FoMD_optbd(N, d, BC, ch, ch2, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
else:
result, result_m, L, psi0 = fin2_FoM_FoMD_optbd(N, d, BC, ch, ch2, epsilon, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_state(N, so_before_list, h, so_after_list, rho0, BC='O', L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the QFI over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: list of length N of ndarrays of a shape (Dl_rho0,Dr_rho0,d,d) for OBC (Dl_rho0, Dr_rho0 can vary between sites) or ndarray of a shape (D_rho0,D_rho0,d,d,N) for PBC
Density matrix describing initial state of the system in MPO representation.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of shape (Dl_L,Dr_L,d,d) for OBC, (Dl_L, Dr_L can vary between sites) or ndarray of shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in the MPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
rho = channel_acting_on_operator(ch, rho0)
rho2 = channel_acting_on_operator(ch2, rho0)
result, result_v, L = fin_state_gen(N, d, BC, rho, rho2, None, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
def fin_state_gen(N, d, BC, rho, rho2, epsilon=None, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in the MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
rho: list of length N of ndarrays of a shape (Dl_rho,Dr_rho,d,d) for OBC (Dl_rho, Dr_rho can vary between sites) or ndarray of a shape (D_rho,D_rho,d,d,N) for PBC
Density matrix at the output of the quantum channel in the MPO representation.
rho2: list of length N of ndarrays of a shape (Dl_rho2,Dr_rho2,d,d) for OBC (Dl_rho2, Dr_rho2 can vary between sites) or ndarray of a shape (D_rho2,D_rho2,d,d,N) for PBC
Interpretaion depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of density matrix at the output of quantum channel in MPO representation,
2) density matrix at the output of quantum channel in MPO representation for the value of estimated parameter shifted by epsilon in relation to rho.
epsilon: float, optional
If specified then it is interpeted as the value of separation between estimated parameters encoded in rho and rho2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit as a function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
"""
if epsilon is None:
result, result_v, L = fin_FoM_optbd(N, d, BC, rho, rho2, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
else:
result, result_v, L = fin2_FoM_optbd(N, d, BC, rho, rho2, epsilon, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
############################
# #
# 1.2 Low level functions. #
# #
############################
def fin_create_channel(N, d, BC, so_list, tol=10**-10):
"""
Creates MPO for a superoperator describing translationally invariant quantum channel from list of local superoperators. Function for finite size systems.
For OBC, tensor-network length N has to be at least 2k-1, where k is the correlation length (number of sites on which acts the biggest local superoperator).
Local superoperators acting on more then 4 neighbouring sites are not currently supported.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
For OBC tensor-network length N has to be at least 2k-1 where k is the correlation length (number of sites on which acts the biggest local superoperator).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators in order of their action on the system.
Local superoperators acting on more then 4 neighbour sites are not currently supported.
tol: float, optional
Factor which after multiplication by the highest singular value gives a cutoff on singular values that are treated as nonzero.
Returns:
ch: list of length N of ndarrays of shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in the MPO representation.
"""
if so_list == []:
if BC == 'O':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:]
ch = [ch]*N
elif BC == 'P':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
if BC == 'O':
ch = [0]*N
kmax = max([int(math.log(np.shape(so_list[i])[0],d**2)) for i in range(len(so_list))])
if N < 2*kmax-1:
warnings.warn('For OBC tensor-network length N have to be at least 2k-1 where k is correlation length (number of sites on which acts the biggest local superoperator).')
for x in range(N):
if x >= kmax and N-x >= kmax:
ch[x] = ch[x-1]
continue
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchil = 1
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
if x == 0:
bdchil = 1
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 0 and x < N-1:
bdchil = bdchi
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx],us[nx,:]]
legs = [[-1],[-2]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 1 and x < N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi2
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 2:
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1,-3],[-2,-4],[-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 2 and x < N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:]]
legs = [[-1],[-2,-4],[-3,-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi3*bdchi2
bdchir = bdchi3
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi3
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchil = 1
bdchir = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
if x == 0:
tensors = [us]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi
elif x > 0 and x < N-1:
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = bdchi
elif x == N-1:
tensors = [sv]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-5,1,-2],[1,-6,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x > 1 and x < N-2:
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
elif x == N-2:
tensors = [sv2,us2]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2
elif x == N-1:
tensors = [sv2]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-4,1,-2],[1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x == 2:
tensors = [us3,us2,us1]
legs = [[-1,-6,1,-3],[-2,1,2,-4],[2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x > 2 and x < N-3:
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x == N-3:
tensors = [sv3,us3,us2]
legs = [[-1,-6,1],[-2,1,2,-4],[-3,2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
elif x == N-2:
tensors = [sv3,us3]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2
bdchir = bdchi3
elif x == N-1:
tensors = [sv3]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3
bdchir = 1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdchl = bdchil
bdchr = bdchir
ch[x] = chi
else:
bdchl = bdchil*bdchl
bdchr = bdchir*bdchr
tensors = [chi,ch[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch[x] = ncon(tensors,legs)
ch[x] = np.reshape(ch[x],(bdchl,bdchr,d**2,d**2),order='F')
elif BC == 'P':
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchi = 1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = np.outer(sv[:,nx],us[nx,:])
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
bdchi = bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
bdchi = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchi = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchi = bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchi = bdchi3*bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdch = bdchi
ch = chi
else:
bdch = bdchi*bdch
tensors = [chi,ch]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch = ncon(tensors,legs)
ch = np.reshape(ch,(bdch,bdch,d**2,d**2),order='F')
ch = ch[:,:,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
def fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list):
"""
Creates a MPO for the derivative (over estimated parameter) of the superoperator describing the quantum channel. Function for finite size systems.
Function for translationally invariant channels with unitary parameter encoding generated by h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
Returns:
chd: list of length N of ndarrays of a shape (Dl_chd,Dr_chd,d**2,d**2) for OBC (Dl_chd, Dr_chd can vary between sites) or ndarray of a shape (D_chd,D_chd,d**2,d**2,N) for PBC
Derivative of superoperator describing quantum channel in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
if len(so_before_list) == 0:
if BC == 'O':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:]
ch1 = [ch1]*N
elif BC == 'P':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:,np.newaxis]
ch1 = np.tile(ch1,(1,1,1,1,N))
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
elif len(so_after_list) == 0:
ch1 = fin_create_channel(N,d,BC,so_before_list)
chd = fin_commutator(N,d,BC,ch1,h,1j)
else:
ch1 = fin_create_channel(N,d,BC,so_before_list)
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
return chd
def fin_commutator(N, d, BC, a, h, c):
"""
Calculate MPO for commutator b = [a, c*sum{h}] of MPO a with sum of local generators h and with arbitrary multiplicative scalar factor c.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
a: list of length N of ndarrays of a shape (Dl_a,Dr_a,d,d) for OBC (Dl_a, Dr_a can vary between sites) or ndarray of a shape (D_a,D_a,d,d,N) for PBC
MPO.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
c: complex
Scalar factor which multiplies sum of local generators.
Returns:
b: list of length N of ndarrays of a shape (Dl_b,Dr_b,d,d) for OBC (Dl_b, Dr_b can vary between sites) or ndarray of a shape (D_b,D_b,d,d,N) for PBC
Commutator [a, c*sum{h}] in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.')
if BC == 'O':
bh = [0]*N
b = [0]*N
for x in range(N):
da = np.shape(a[x])[2]
bda1 = np.shape(a[x])[0]
bda2 = np.shape(a[x])[1]
if x == 0:
bdbh1 = 1
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x > 0 and x < N-1:
bdbh1 = 2
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x == N-1:
bdbh1 = 2
bdbh2 = 1
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1],[c*(h[nxp,nxp]-h[nx,nx])]])
if da == d:
# a is operator
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx,nxp],a[x][:,:,nx,nxp])
elif da == d**2:
# a is superoperator (vectorized channel)
bh[x] = np.reshape(bh[x],(bdbh1,bdbh2,d**2),order='F')
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d**2,d**2),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx],a[x][:,:,nx,nxp])
elif BC == 'P':
da = np.shape(a)[2]
bda = np.shape(a)[0]
if N == 1:
bdbh = 1
else:
bdbh = 2
bh = np.zeros((bdbh,bdbh,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
if N == 1:
bh[:,:,nx,nxp,0] = c*(h[nxp,nxp]-h[nx,nx])
else:
bh[:,:,nx,nxp,0] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1],[0,0]])
for x in range(1,N-1):
bh[:,:,nx,nxp,x] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
bh[:,:,nx,nxp,N-1] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),0]])
if da == d:
# a is operator
b = np.zeros((bdbh*bda,bdbh*bda,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,nxp,x],a[:,:,nx,nxp,x])
elif da == d**2:
# a is superoperator (vectorized channel)
bh = np.reshape(bh,(bdbh,bdbh,d**2,N),order='F')
b = np.zeros((bdbh*bda,bdbh*bda,d**2,d**2,N),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,x],a[:,:,nx,nxp,x])
return b
def fin_enlarge_bdl(cold,factor):
"""
Enlarge bond dimension of SLD MPO. Function for finite size systems.
Parameters:
cold: SLD MPO, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
factor: factor which determine on average relation between old and newly added values of SLD MPO
Returns:
c: SLD MPO with bd += 1
"""
rng = np.random.default_rng()
if type(cold) is list:
n = len(cold)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
c = [0]*n
x = 0
d = np.shape(cold[x])[2]
bdl1 = 1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl2-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl2-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
for x in range(1,n-1):
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
x = n-1
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = 1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl1-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl1-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
elif type(cold) is np.ndarray:
n = np.shape(cold)[4]
d = np.shape(cold)[2]
bdl = np.shape(cold)[0]+1
c = np.zeros((bdl,bdl,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(n):
meanrecold = np.sum(np.abs(np.real(cold[:,:,nx,nxp,x])))/(bdl-1)**2
meanimcold = np.sum(np.abs(np.imag(cold[:,:,nx,nxp,x])))/(bdl-1)**2
c[:,:,nx,nxp,x] = (meanrecold*rng.random((bdl,bdl))+1j*meanimcold*rng.random((bdl,bdl)))*factor
c = (c + np.conj(np.moveaxis(c,2,3)))/2
c[0:bdl-1,0:bdl-1,:,:,:] = cold
return c
def fin_enlarge_bdpsi(a0old,factor):
"""
Enlarge bond dimension of wave function MPS. Function for finite size systems.
Parameters:
a0old: wave function MPS, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ratio: factor which determine on average relation between last and next to last values of diagonals of wave function MPS
Returns:
a0: wave function MPS with bd += 1
"""
rng = np.random.default_rng()
if type(a0old) is list:
n = len(a0old)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
a0 = [0]*n
x = 0
d = np.shape(a0old[x])[2]
bdpsi1 = 1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi2-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi2-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
for x in range(1,n-1):
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
x = n-1
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = 1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi1-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi1-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[n-1] = a0[n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[x] = a0[x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[0]),a0[0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[0] = a0[0]/np.sqrt(np.abs(r1))
elif type(a0old) is np.ndarray:
n = np.shape(a0old)[3]
d = np.shape(a0old)[2]
bdpsi = np.shape(a0old)[0]+1
a0 = np.zeros((bdpsi,bdpsi,d,n),dtype=complex)
for nx in range(d):
for x in range(n):
meanrea0old = np.sum(np.abs(np.real(a0old[:,:,nx,x])))/(bdpsi-1)**2
meanima0old = np.sum(np.abs(np.imag(a0old[:,:,nx,x])))/(bdpsi-1)**2
a0[:,:,nx,x] = (meanrea0old*rng.random((bdpsi,bdpsi))+1j*meanima0old*rng.random((bdpsi,bdpsi)))*factor
a0[0:bdpsi-1,0:bdpsi-1,:,:] = a0old
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[2,2,1],[3,3,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
else:
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,n-1] = a0[:,:,:,n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[:,:,:,x] = a0[:,:,:,x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
return a0
#########################################
# 1.2.1 Problems with exact derivative. #
#########################################
def fin_FoM_FoMD_optbd(n,d,bc,ch,chp,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also check of convergence in bond dimensions. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD in function of bd of respectively SLD MPO [rows] and initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin_FoM_optbd(n,d,bc,a,b,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM in function of bd of SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin_FoMD_optbd(n,d,bc,c2d,cpd,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin_FoM_OBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoM_PBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC.
Parameters:
a: MPO for density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[n-1] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
for x in range(n-1,0,-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
a0[x] = np.moveaxis(a0[x],2,1)
a0[x] = np.reshape(a0[x],(bdpsi1,d*bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(vh,(np.shape(s)[0],d,bdpsi2),order='F')
a0[x] = np.moveaxis(a0[x],1,2)
tensors = [a0[x-1],u @ np.diag(s)]
legs = [[-1,1,-3],[1,-2]]
a0[x-1] = ncon(tensors,legs)
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = np.shape(c2d)[4]
d = np.shape(c2d)[2]
bdl2d = np.shape(c2d)[0]
bdlpd = np.shape(cpd)[0]
bdpsi = np.shape(a0)[0]
tol_fomd = 0.1*imprecision/n**2
if n == 1:
tensors = [c2d[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [np.eye(bdpsi),np.eye(bdpsi)]
legs = [[-2,-1],[-4,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomdval = np.real(fomdval[position])
else:
relunc_fomd = 0.1*imprecision
l2df = np.zeros((bdpsi,bdl2d,bdpsi,bdpsi,bdl2d,bdpsi,n-1),dtype=complex)
lpdf = np.zeros((bdpsi,bdlpd,bdpsi,bdpsi,bdlpd,bdpsi,n-1),dtype=complex)
psinormf = np.zeros((bdpsi,bdpsi,bdpsi,bdpsi,n-1),dtype=complex)
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormf[:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2df[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpdf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],psinormf[:,:,:,:,x]]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
psinormf[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c2d[:,:,:,:,0],l2df[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],lpdf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormf[:,:,:,:,0]]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormc = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l2dc,c2d[:,:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc,psinormf[:,:,:,:,x]]
legs = [[1,2,-1,-3],[-2,-4,1,2]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,x] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [l2dc,np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [psinormc,np.conj(a0[:,:,:,x]),a0[:,:,:,x]]
legs = [[-1,-2,2,3],[2,-3,1],[3,-4,1]]
psinormc = ncon(tensors,legs)
tensors = [l2dc,c2d[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,n-1] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoM_OBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomval: value of FoM
"""
n = len(c)
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
tensors = [c[0][0,0,:,:],b[0][0:,0,:,:]]
legs = [[1,2],[2,1]]
l1 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],[0][0,0,:,:],[0][0,0,:,:]]
legs = [[1,2],[2,3],[3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
l1 = l1[:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
l2 = l2[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
tensors = [c[0],b[0],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
l1 = float(l1)
tensors = [c[0],a[0],c[0],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
l2 = float(l2)
fomval = 2*l1-l2
return fomval
def fin_FoM_PBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with PBC.
Parameters:
a: MPO for a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
Returns:
fomval: value of FoM
"""
n = np.shape(a)[4]
if n == 1:
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[4,4,1,2],[5,5,2,3],[6,6,3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],b[:,:,:,:,0],l1]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0],l2]
legs = [[7,4,1,2],[8,5,2,3],[9,6,3,1],[4,5,6,7,8,9]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
return fomval
def fin_FoMD_OBC_val(c2d,cpd,a0):
"""
Calculate value of FoMD. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomdval: value of FoMD
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
tensors = [np.conj(a0[0][0,0,:]),c2d[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cpd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
l2d = l2d[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
lpd = lpd[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
l2d = float(l2d)
tensors = [np.conj(a0[0]),cpd[0],a0[0],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
lpd = float(lpd)
fomdval = 2*lpd-l2d
return fomdval
def fin_FoMD_PBC_val(c2d,cpd,a0):
"""
Calculate the value of FoMD. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for the initial wave function, expected ndarray of a shape (bd,bd,d,n)
Returns:
fomdval: value of FoMD
"""
n = np.shape(c2d)[4]
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0],l2d]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0],lpd]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
return fomdval
#################################################################
# 1.2.2 Problems with discrete approximation of the derivative. #
#################################################################
def fin2_FoM_FoMD_optbd(n,d,bc,ch,chp,epsilon,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also a check of convergence with increasing bond dimensions. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of the local Hilbert space (dimension of the physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for a quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for a quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
cini: initial MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if the maximal value of bd for SLD MPO has to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for the initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if the maximal value of bd for initial wave function MPS has to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD as a function of bd of respectively SLD MPO [rows] and the initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin2_FoM_optbd(n,d,bc,a,b,epsilon,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems. Version with two states separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in a and b, float
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM as a function of bd of the SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin2_FoMD_optbd(n,d,bc,c2d,cd,cpd,epsilon,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence when increasing the bond dimension. Function for finite size systems. Version with two dual SLDs separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cd = channel_acting_on_operator(chd,c)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:]]
legs = [[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l1_0f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[n-2] = ncon(tensors,legs)
l1_0f[n-2] = l1_0f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],l1_0f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1_0f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],l1_0f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1_0c = ncon(tensors,legs)
l1_0c = l1_0c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[x],l1_0f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l1_0c,c[x],a[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected ndarray of a shape (bd,bd,d,d,n)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l1_0f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],l1_0f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1_0f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],l1_0f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[:,:,:,:,x],l1_0f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l1_0c,c[:,:,:,:,x],a[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC. Version with two dual SLDs separated by epsilon.
Parameters:
c2d: MPO for the square of the dual of the SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cd: MPO for the dual of the SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for the dual of the SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0: MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for the initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
tensors = [cd[0][0,0,:,:]]
legs = [[-1,-2]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(d,d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
ldf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ldf[n-2] = ncon(tensors,legs)
ldf[n-2] = ldf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cd[x],a0[x],ldf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
ldf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cd[0],ldf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
ldc = ncon(tensors,legs)
ldc = ldc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [ldc,cd[x],ldf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
tensors = [ldc,np.conj(a0[x]),cd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
ldc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [ldc,cd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[n-1] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
for x in range(n-1,0,-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
a0[x] = np.moveaxis(a0[x],2,1)
a0[x] = np.reshape(a0[x],(bdpsi1,d*bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(vh,(np.shape(s)[0],d,bdpsi2),order='F')
a0[x] = np.moveaxis(a0[x],1,2)
tensors = [a0[x-1],u @ np.diag(s)]
legs = [[-1,1,-3],[1,-2]]
a0[x-1] = ncon(tensors,legs)
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with PBC. Version with two dual SLDs separated by epsilon.
Parameters:
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected ndarray of a shape (bd,bd,d,d,n)
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected ndarray of a shape (bd,bd,d,d,n)
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = np.shape(c2d)[4]
d = np.shape(c2d)[2]
bdl2d = np.shape(c2d)[0]
bdlpd = np.shape(cpd)[0]
bdld = np.shape(cd)[0]
bdpsi = np.shape(a0)[0]
tol_fomd = 0.1*imprecision/n**2
if n == 1:
tensors = [c2d[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [np.eye(bdpsi),np.eye(bdpsi)]
legs = [[-2,-1],[-4,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomdval = np.real(fomdval[position])
else:
relunc_fomd = 0.1*imprecision
l2df = np.zeros((bdpsi,bdl2d,bdpsi,bdpsi,bdl2d,bdpsi,n-1),dtype=complex)
lpdf = np.zeros((bdpsi,bdlpd,bdpsi,bdpsi,bdlpd,bdpsi,n-1),dtype=complex)
ldf = np.zeros((bdpsi,bdld,bdpsi,bdpsi,bdld,bdpsi,n-1),dtype=complex)
psinormf = np.zeros((bdpsi,bdpsi,bdpsi,bdpsi,n-1),dtype=complex)
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ldf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormf[:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2df[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpdf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cd[:,:,:,:,x],a0[:,:,:,x],ldf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
ldf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],psinormf[:,:,:,:,x]]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
psinormf[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c2d[:,:,:,:,0],l2df[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],lpdf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cd[:,:,:,:,0],ldf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormf[:,:,:,:,0]]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ldc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormc = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l2dc,c2d[:,:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [ldc,cd[:,:,:,:,x],ldf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc,psinormf[:,:,:,:,x]]
legs = [[1,2,-1,-3],[-2,-4,1,2]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,x] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [l2dc,np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [ldc,np.conj(a0[:,:,:,x]),cd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
ldc = ncon(tensors,legs)
tensors = [psinormc,np.conj(a0[:,:,:,x]),a0[:,:,:,x]]
legs = [[-1,-2,2,3],[2,-3,1],[3,-4,1]]
psinormc = ncon(tensors,legs)
tensors = [l2dc,c2d[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [ldc,cd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,n-1] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin2_FoM_OBC_val(a,b,epsilon,c):
"""
Calculate value of FoM. Function for finite size systems with OBC. Version with two states separated by epsilon.
Parameters:
a: MPO for density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomval: value of FoM
"""
n = len(c)
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
tensors = [c[0][0,0,:,:],b[0][0,0,:,:]]
legs = [[1,2],[2,1]]
l1 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],a[0][0,0,:,:]]
legs = [[1,2],[2,1]]
l1_0 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],a[0][0,0,:,:],c[0][0,0,:,:]]
legs = [[1,2],[2,3],[3,1]]
l2 = ncon(tensors,legs)
fomval = 2*(l1-l1_0)/epsilon-l2
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
l1 = l1[:,:,0,0]
tensors = [c[n-1],a[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0 = ncon(tensors,legs)
l1_0 = l1_0[:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
l2 = l2[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
tensors = [c[x],a[x],l1_0]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1_0 = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
tensors = [c[0],b[0],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
l1 = float(l1)
tensors = [c[0],a[0],l1_0]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1_0 = ncon(tensors,legs)
l1_0 = float(l1_0)
tensors = [c[0],a[0],c[0],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
l2 = float(l2)
fomval = 2*(l1-l1_0)/epsilon-l2
return fomval
def fin2_FoM_PBC_val(a,b,epsilon,c):
"""
Calculate value of FoM. Function for finite size systems with PBC. Version with two states separated by epsilon.
Parameters:
a: MPO for density matrix at the value of estimated parameter phi=phi_0, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for density matrix at the value of estimated parameter phi=phi_0+epsilon, expected ndarray of a shape (bd,bd,d,d,n)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for SLD, expected ndarray of a shape (bd,bd,d,d,n)
Returns:
fomval: value of FoM
"""
n = np.shape(a)[4]
if n == 1:
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1_0 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[4,4,1,2],[5,5,2,3],[6,6,3,1]]
l2 = ncon(tensors,legs)
fomval = 2*(l1-l1_0)/epsilon-l2
else:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],l1_0]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1_0 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],b[:,:,:,:,0],l1]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],l1_0]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1_0 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0],l2]
legs = [[7,4,1,2],[8,5,2,3],[9,6,3,1],[4,5,6,7,8,9]]
l2 = ncon(tensors,legs)
fomval = 2*(l1-l1_0)/epsilon-l2
return fomval
def fin2_FoMD_OBC_val(c2d,cd,cpd,epsilon,a0):
"""
Calculate value of FoMD. Function for finite size systems with OBC. Version with two dual SLDs separated by epsilon.
Parameters:
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomdval: value of FoMD
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
tensors = [np.conj(a0[0][0,0,:]),c2d[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cpd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
ld = ncon(tensors,legs)
fomdval = 2*(lpd-ld)/epsilon-l2d
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
l2d = l2d[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
lpd = lpd[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ld = ncon(tensors,legs)
ld = ld[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cd[x],a0[x],ld]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
ld = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
l2d = float(l2d)
tensors = [np.conj(a0[0]),cpd[0],a0[0],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
lpd = float(lpd)
tensors = [np.conj(a0[0]),cd[0],a0[0],ld]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
ld = ncon(tensors,legs)
ld = float(ld)
fomdval = 2*(lpd-ld)/epsilon-l2d
return fomdval
def fin2_FoMD_PBC_val(c2d,cd,cpd,epsilon,a0):
"""
Calculate value of FoMD. Function for finite size systems with PBC. Version with two dual SLDs separated by epsilon.
Parameters:
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected ndarray of a shape (bd,bd,d,d,n)
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected ndarray of a shape (bd,bd,d,d,n)
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
Returns:
fomdval: value of FoMD
"""
n = np.shape(c2d)[4]
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
ld = ncon(tensors,legs)
fomdval = 2*(lpd-ld)/epsilon-l2d
else:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ld = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cd[:,:,:,:,x],a0[:,:,:,x],ld]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
ld = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0],l2d]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0],lpd]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cd[:,:,:,:,0],a0[:,:,:,0],ld]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
ld = ncon(tensors,legs)
fomdval = 2*(lpd-ld)/epsilon-l2d
return fomdval
##########################################
# #
# #
# 2 Functions for infinite size systems. #
# #
# #
##########################################
#############################
# #
# 2.1 High level functions. #
# #
#############################
def inf(so_before_list, h, so_after_list, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the lim_{N --> infinity} QFI/N over operator tilde{L} (in iMPO representation) and wave function psi0 (in iMPS representation) and check of convergence in their bond dimensions. Function for infinite size systems.
User has to provide information about the dynamics by specifying quantum channel. It is assumed that quantum channel is translationally invariant and is build from layers of quantum operations.
User has to provide one defining for each layer operation as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user have to provide only its generator h.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
L_ini: ndarray of a shape (D_L,D_L,d,d), optional
Initial iMPO for tilde{L}.
psi0_ini: ndarray of a shape (D_psi0,D_psi0,d), optional
Initial iMPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for iMPO representing tilde{L}).
D_L_max_forced: bool, optional
True if D_L_max have to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge have to be imposed on iMPO representing tilde{L}, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for iMPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max have to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing figure of merit in function of bond dimensions of respectively tilde{L} [rows] and psi0 [columns].
L: ndarray of a shape (D_L,D_L,d,d)
Optimal tilde{L} in iMPO representation.
psi0: ndarray of a shape (D_psi0,D_psi0,d)
Optimal psi0 in iMPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
epsilon = 10**-4
aux = np.kron(h, np.eye(d)) - np.kron(np.eye(d), h)
z = np.diag(np.exp(-1j * np.diag(aux) * epsilon))
ch = inf_create_channel(d, so_before_list + so_after_list)
ch2 = inf_create_channel(d, so_before_list + [z] + so_after_list)
result, result_m, L, psi0 = inf_gen(d, ch, ch2, epsilon, inf_L_symfun, inf_psi0_symfun, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def inf_gen(d, ch, ch2, epsilon, symfun_L, symfun_psi0, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as lim_{N --> infinity} QFI/N) over operator tilde{L} (in iMPO representation) and wave function psi0 (in iMPS representation) and check of convergence in their bond dimensions. Function for infinite size systems.
User has to provide information about the dynamics by specifying two channels separated by small parameter epsilon as superoperators in iMPO representation.
By definition this infinite approach assumes translation invariance of the problem, other than that there are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
d: integer
Dimension of local Hilbert space (dimension of physical index).
ch: ndarray of a shape (D_ch,D_ch,d**2,d**2)
Quantum channel as superoperator in iMPO representation.
ch2: ndarray of a shape (D_ch2,D_ch2,d**2,d**2)
Quantum channel as superoperator in iMPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float
Value of a separation between estimated parameters encoded in ch and ch2.
symfun_L: function
Function which symmetrize iMPO for tilde{L} after each step of otimization (the most simple one would be lambda x: x).
Choosing good function is key factor for successful optimization in infinite approach.
TNQMetro package features inf_L_symfun function which performs well in dephasing type problems.
symfun_psi0: function
Function which symmetrize iMPS for psi0 after each step of otimization (the most simple one would be lambda x: x).
Choosing good function is key factor for successful optimization in infinite approach.
TNQMetro package features inf_psi0_symfun function which performs well in dephasing type problems.
L_ini: ndarray of a shape (D_L,D_L,d,d), optional
Initial iMPO for tilde{L}.
psi0_ini: ndarray of a shape (D_psi0,D_psi0,d), optional
Initial iMPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for iMPO representing tilde{L}).
D_L_max_forced: bool, optional
True if D_L_max have to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge have to be imposed on iMPO representing tilde{L}, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for iMPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max have to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing figure of merit in function of bond dimensions of respectively tilde{L} [rows] and psi0 [columns].
L: ndarray of a shape (D_L,D_L,d,d)
Optimal tilde{L} in iMPO representation.
psi0: ndarray of a shape (D_psi0,D_psi0,d)
Optimal psi0 in iMPS representation.
"""
result, result_m, L, psi0 = inf_FoM_FoMD_optbd(d, ch, ch2, epsilon, symfun_L, symfun_psi0, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def inf_state(so_before_list, h, so_after_list, rho0, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the lim_{N --> infinity} QFI/N over operator tilde{L} (in iMPO representation) and check of convergence in its bond dimension. Function for infinite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying quantum channel. It is assumed that quantum channel is translationally invariant and is build from layers of quantum operations.
User has to provide one defining for each layer operation as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user have to provide only its generator h.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: ndarray of a shape (D_rho0,D_rho0,d,d)
Density matrix describing initial state of the system in iMPO representation.
L_ini: ndarray of a shape (D_L,D_L,d,d), optional
Initial iMPO for tilde{L}.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for iMPO representing tilde{L}).
D_L_max_forced: bool, optional
True if D_L_max have to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge have to be imposed on iMPO representing tilde{L}, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of tilde{L}.
L: ndarray of a shape (D_L,D_L,d,d)
Optimal tilde{L} in iMPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
epsilon = 10**-4
aux = np.kron(h, | np.eye(d) | numpy.eye |
"""Calculations on shapes and vectors."""
import logging
import dataclasses
import typing
import enum
import numpy as np
import sympy as sy
import tqdm
import scipy.optimize as opt
import symfit as sf
LOGGER = logging.getLogger('ectopylasm.geometry')
LOGGER.setLevel(logging.INFO)
def normalize_vector(vector):
"""Input `vector` divided by its absolute size yields a vector of size 1."""
norm = np.linalg.norm(vector)
if norm == 0.:
raise ZeroDivisionError("A zero vector cannot be normalized")
return vector / norm
def angle_between_two_vectors(a, b):
"""
Calculate the angle in radians between two vectors `a` and `b`.
Implementation credits to https://stackoverflow.com/a/13849249/1199693.
"""
a_n = normalize_vector(a)
b_n = normalize_vector(b)
return np.arccos(np.clip(np.dot(a_n, b_n), -1.0, 1.0))
@dataclasses.dataclass(frozen=True)
class Point:
"""A three dimensional point with x, y and z components."""
x: float
y: float
z: float
def to_array(self):
"""Convert to a NumPy array `np.array((x, y, z))`."""
return np.array((self.x, self.y, self.z))
@dataclasses.dataclass(frozen=True)
class Plane:
"""
A plane.
The plane is defined by four parameters, a, b, c and d, which form the
plane equation a*x + b*y + c*z + d = 0. Points (x, y, z) for which this
equation applies are points on the plane.
On creation, the input parameters a, b and c are normalized. When seen
as a vector n = (a, b, c), n is the normal vector to the plane,
indicating its direction. This vector is normalized to have length one.
The fourth parameter d relates to the position of the plane in space. It
can be calculated from a known point in the plane (X, Y, Z) as
d = -a*X - b*Y - c*Z, but can also be given directly.
"""
a: float
b: float
c: float
d: float
def __init__(self, a, b, c, d):
"""
Construct the plane, taking the four plane parameters as input.
Normalizes a, b and c so that the vector n = (a, b, c) has length 1.
"""
a, b, c = normalize_vector((a, b, c))
object.__setattr__(self, 'a', a)
object.__setattr__(self, 'b', b)
object.__setattr__(self, 'c', c)
object.__setattr__(self, 'd', d)
@staticmethod
def d_from_point(point, normal):
"""Calculate d factor in plane equation ax + by + cz + d = 0."""
return -(point[0] * normal[0] + point[1] * normal[1] + point[2] * normal[2])
@classmethod
def from_point(cls, a, b, c, point):
"""Plane constructor that uses a point on the plane as input instead of d."""
a, b, c = normalize_vector((a, b, c))
return cls(a, b, c, cls.d_from_point(point, (a, b, c)))
@classmethod
def from_fit_result(cls, fit_result):
"""Generate a Plane from `fit_result`, the output of `fit.fit_plane`."""
return cls(fit_result.params['a'], fit_result.params['b'],
fit_result.params['c'], fit_result.params['d'])
@classmethod
def from_points(cls, points):
"""
Generate a Plane by fitting to a set of points.
The set of N point coordinates with shape (3, N) is given by `points`.
"""
return cls.from_fit_result(fit_plane(points))
def generate_point(self):
"""
Generate a point in the plane.
Calculate a point in the plane based on d at x,y=0,0 (could be
anywhere); -cz = ax + by + d. If c happens to be zero, try x,z=0,0, and
if b is zero as well, do y,z=0,0.
"""
if self.c != 0:
return (0, 0, -self.d / self.c)
elif self.b != 0:
return (0, -self.d / self.b, 0)
else:
return (-self.d / self.a, 0, 0)
class PlaneSurfaceLimitException(RuntimeError):
"""Raised by plane_surface when the given limits are invalid."""
pass
class PlaneSurfaceNormalException(RuntimeError):
"""Raised by plane_surface when the normal is not compatible with the limits."""
pass
def plane_surface(plane: Plane, x_lim=None, y_lim=None, z_lim=None):
"""
Get plane surface coordinates.
Calculate coordinates of the part of a plane inside a cubical box. Two of
the limited parameters are used to calculate the coordinates in the third
direction.
Note that the first number in the pairs must be smaller than the second!
You only need to provide two pairs of coordinates, so only two of x_lim,
y_lim and z_lim need to be defined. When all three are defined, the
default is to use the x and y pairs. This option to choose is useful when
you have a plane that has a zero normal component in one of the directions.
In that case, you cannot use the limits in that direction, because the
plane coordinates will involve a division by that normal component (which
would give a division by zero error).
plane: a Plane object
x_lim: iterable of the two extrema in the x direction. Default: None.
y_lim: same as x, but for y
z_lim: same as x, but for z
limit_all: see explanation above. Default: False.
"""
if (x_lim is not None) + (y_lim is not None) + (z_lim is not None) < 2:
raise PlaneSurfaceLimitException("Two or three `_lim` kwargs must not be `None`.")
if plane.c != 0 and x_lim is not None and y_lim is not None:
# get box limits in two dimensions
x, y = np.meshgrid(x_lim, y_lim)
# find corresponding z coordinates
z = -(plane.a * x + plane.b * y + plane.d) / plane.c
elif plane.b != 0 and x_lim is not None and z_lim is not None:
# get box limits in two dimensions
x, z = np.meshgrid(x_lim, z_lim)
# find corresponding y coordinates
y = -(plane.a * x + plane.c * z + plane.d) / plane.b
elif plane.a != 0 and y_lim is not None and z_lim is not None:
y, z = np.meshgrid(y_lim, z_lim)
# find corresponding x coordinates
x = -(plane.b * y + plane.c * z + plane.d) / plane.a
else:
raise PlaneSurfaceNormalException("Invalid combination of `_lim` kwargs and plane parameters; normal components must not be zero in the direction in which the limits are not provided.")
return x, y, z
def thick_plane_points(plane: Plane, thickness, plane_point=None):
"""
Convert plane point to two thick plane points.
Given a Plane and a thickness, return two points along the normal that
are `thickness` apart. Optionally specify a specific point in the plane.
"""
if plane_point is None:
plane_point = plane.generate_point()
point_1 = (plane_point[0] + 0.5 * thickness * plane.a,
plane_point[1] + 0.5 * thickness * plane.b,
plane_point[2] + 0.5 * thickness * plane.c)
point_2 = (plane_point[0] - 0.5 * thickness * plane.a,
plane_point[1] - 0.5 * thickness * plane.b,
plane_point[2] - 0.5 * thickness * plane.c)
return point_1, point_2
def thick_plane_planes(plane: Plane, thickness):
"""Convert plane to two planes separated by thickness."""
plane_point_1, plane_point_2 = thick_plane_points(plane, thickness)
plane_1 = Plane.from_point(plane.a, plane.b, plane.c, plane_point_1)
plane_2 = Plane.from_point(plane.a, plane.b, plane.c, plane_point_2)
return plane_1, plane_2
def point_distance_to_plane(point, plane: Plane):
"""
Get signed distance of point to plane.
The sign of the resulting distance tells you whether the point is in
the same or the opposite direction of the plane normal vector.
point: an iterable of length 3 representing a point in 3D space
plane: a Plane object
"""
# from http://mathworld.wolfram.com/Point-PlaneDistance.html
# N.B.: no need to divide by ||(a,b,c)||, since that is always 1
return plane.a * point[0] + plane.b * point[1] + plane.c * point[2] + plane.d
def filter_points_plane(points_xyz, plane: Plane, plane_thickness):
"""
Select the points that are within the thick plane.
points_xyz: a vector of shape (3, N) representing N points in 3D space
plane: a Plane object
plane_thickness: the thickness of the plane (the distance between the two
composing planes)
"""
plane_1, plane_2 = thick_plane_planes(plane, plane_thickness)
p_filtered = []
for p_i in points_xyz.T:
distance_1 = point_distance_to_plane(p_i, plane_1)
distance_2 = point_distance_to_plane(p_i, plane_2)
if abs(distance_1) <= plane_thickness and abs(distance_2) <= plane_thickness:
p_filtered.append(p_i)
return p_filtered
@dataclasses.dataclass(frozen=True)
class Cone:
"""
A cone.
The cone is defined mainly by its `height` and `radius`. When the other
parameters are left at their default values, this will produce a cone with
its axis along the z-axis and the center of its circular base at position
(x, y, z) = (0, 0, 0).
Three optional parameters define its location and orientation: two rotation
parameters `rot_x` and `rot_y`, giving respectively rotations around the x
and y axes (the x rotation is performed first, then the y rotation) and one
translation parameter called `base_pos`, which itself is a `Point` object,
and which moves the position of the circular base of the cone.
"""
height: float
radius: float
rot_x: float = dataclasses.field(default=2 * np.pi, metadata={'unit': 'radians'})
rot_y: float = dataclasses.field(default=2 * np.pi, metadata={'unit': 'radians'})
base_pos: Point = Point(0, 0, 0)
def axis(self):
"""Get the cone's axis unit vector from its rotation angles (radians)."""
# z-unit vector (0, 0, 1) rotated twice
cone_axis = (0, -np.sin(self.rot_x), np.cos(self.rot_x)) # rotation around x-axis
cone_axis = np.array((np.sin(self.rot_y) * cone_axis[2],
cone_axis[1],
np.cos(self.rot_y) * cone_axis[2])) # around y
return cone_axis
def apex_position(self):
"""Get cone apex position from cone parameters."""
return self.base_pos.to_array() + self.axis() * self.height
def opening_angle(self):
"""Twice the opening angle is the maximum angle between directrices."""
return np.arctan(self.radius / self.height)
@classmethod
def from_fit_result(cls, fit_result):
"""Generate a Cone from `fit_result`, the output of `fit.fit_cone`."""
return cls(*fit_result['x'][:4], base_pos=Point(*fit_result['x'][4:]))
@classmethod
def from_points(cls, points, **kwargs):
"""
Generate a Cone by fitting to a set of points.
The set of N point coordinates with shape (3, N) is given by `points`.
"""
return cls.from_fit_result(fit_cone(points, **kwargs))
def cone_sympy_model(cone: Cone):
"""
Convert `cone` to a sympy based cone model.
Returns the model (first return value) and a dictionary with constituent
symbols.
"""
height, radius, u_param, theta_param = sy.symbols('height, radius, u_param, theta_param')
# column vector for the non-rotated, non-translated parameterized cone surface equation
cone_eqn = sy.Matrix([(height - u_param) / height * radius * sy.cos(theta_param),
(height - u_param) / height * radius * sy.sin(theta_param),
u_param])
base_pos_vec = sy.Matrix([cone.base_pos.x, cone.base_pos.y, cone.base_pos.z])
# rotation matrices R_x and R_y
r_x = sy.Matrix([[1, 0, 0],
[0, sy.cos(cone.rot_x), -sy.sin(cone.rot_x)],
[0, sy.sin(cone.rot_x), sy.cos(cone.rot_x)]])
r_y = sy.Matrix([[sy.cos(cone.rot_y), 0, sy.sin(cone.rot_y)],
[0, 1, 0],
[-sy.sin(cone.rot_y), 0, sy.cos(cone.rot_y)]])
cone_rot_trans = r_y @ (r_x @ cone_eqn) + base_pos_vec
return cone_rot_trans, {'height': height, 'radius': radius, 'u_param': u_param,
'theta_param': theta_param}
def cone_surface(cone: Cone, n_steps=20):
"""
Calculate coordinates of the surface of a cone.
cone: a Cone object
n_steps: number of steps in the parametric range used for drawing (more gives a
smoother surface, but may render more slowly)
"""
cone_model, cone_symbols = cone_sympy_model(cone)
# get box limits in two dimensions
u_steps = np.linspace(0, cone.height, n_steps)
theta_steps = np.linspace(0, 2 * np.pi, n_steps)
u_array, theta_array = np.meshgrid(u_steps, theta_steps)
# find corresponding y coordinates
x, y, z = [], [], []
for u_i, theta_i in zip(u_array.flatten(), theta_array.flatten()):
subs = {cone_symbols['height']: cone.height, cone_symbols['radius']: cone.radius,
cone_symbols['u_param']: u_i, cone_symbols['theta_param']: theta_i}
xyz = np.array(cone_model.evalf(subs=subs).tolist()).astype(np.float64)
x.append(xyz[0])
y.append(xyz[1])
z.append(xyz[2])
return (np.array(x).reshape(u_array.shape),
np.array(y).reshape(u_array.shape),
np.array(z).reshape(u_array.shape))
def thick_cone_base_positions(cone: Cone, thickness):
"""
Convert cone base position to two thick cone base positions.
Given the cone parameters, return two base positions along the cone axis
that are a certain distance apart, such that the distance between the
cone surfaces (the directrices) is `thickness` apart.
cone: a Cone object
thickness: distance between the two cone surfaces (i.e. their directrices)
"""
thickness = abs(thickness)
# trigonometry:
base_distance = (thickness / cone.radius * cone.height
* np.sqrt(1 + cone.radius**2 / cone.height**2))
cone_axis = cone.axis()
base_pos_bottom = cone.base_pos.to_array() - cone_axis * 0.5 * base_distance
base_pos_top = cone.base_pos.to_array() + cone_axis * 0.5 * base_distance
return base_pos_bottom, base_pos_top
def thick_cone_cones(cone: Cone, thickness) -> typing.Tuple[Cone, Cone]:
"""
Convert one Cone to two cones separated by `thickness`.
Given the cone parameters, return two cones, such that the distance between
the cone surfaces (the directrices) is `thickness` apart.
cone: a Cone object
thickness: distance between the two cone surfaces (i.e. their directrices)
"""
base_pos_bottom, base_pos_top = thick_cone_base_positions(cone, thickness)
cone_bottom = Cone(cone.height, cone.radius, rot_x=cone.rot_x,
rot_y=cone.rot_y, base_pos=Point(*base_pos_bottom))
cone_top = Cone(cone.height, cone.radius, rot_x=cone.rot_x,
rot_y=cone.rot_y, base_pos=Point(*base_pos_top))
return cone_bottom, cone_top
class ConeRegion(enum.Enum):
"""
Class defining three regions in and around cones.
These regions are used in point_distance_to_cone to pass on information
about which kind of region the point is in. This can be used in other
functions (like filter_points_cone).
The three options are:
- perpendicular: the point is at a location where its shortest distance to
the cone surface is perpendicular to that surface
- above_apex: the point is somewhere above the apex of the cone, but not
perpendicular to the surface
- below_directrix: the point is not perpendicular to the surface and it is
below the directrix
"""
perpendicular = enum. auto()
above_apex = enum.auto()
below_directrix = enum.auto()
def point_distance_to_cone(point, cone: Cone, return_extra=False):
"""
Get distance of point to cone.
Check whether for a point `point`, the shortest path to the cone is
perpendicular to the cone surface (and if so, return it). If
not, it is either "above" the apex and the shortest path is simply
the line straight to the apex, or it is "below" the base, and the
shortest path is the shortest path to the directrix (the base
circle).
This function returns a second value depending on which of the
three above cases is true for point `point`. If we're using the
perpendicular, it is True, if we're above the apex it is False and
if it is below the base, it is None.
Extra values can be returned to be reused outside the function by
setting return_extra to True.
"""
cone_axis = cone.axis()
apex_pos = cone.apex_position()
point_apex_vec = np.array(point) - apex_pos
point_apex_angle = np.pi - angle_between_two_vectors(cone_axis, point_apex_vec)
opening_angle = cone.opening_angle()
# for the second conditional, we need the length of the component of the
# difference vector between P and apex along the closest generatrix
point_apex_generatrix_angle = point_apex_angle - opening_angle
point_apex_distance = np.sqrt(np.sum(point_apex_vec**2))
point_apex_generatrix_component = point_apex_distance * np.cos(point_apex_generatrix_angle)
generatrix_length = np.sqrt(cone.radius**2 + cone.height**2)
returnees = {}
if return_extra:
returnees['opening_angle'] = opening_angle
returnees['point_apex_angle'] = point_apex_angle
if point_apex_angle > opening_angle + np.pi / 2:
# "above" the apex
return point_apex_distance, ConeRegion.above_apex, returnees
elif point_apex_generatrix_component > generatrix_length:
# "below" the directrix
# use cosine rule to find length of third side
return (np.sqrt(point_apex_distance**2 + generatrix_length**2
- 2 * point_apex_distance * generatrix_length
* | np.cos(point_apex_generatrix_angle) | numpy.cos |
import numpy as np
import numba
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array(
[near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]],
dtype=C.dtype)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype)
ret_xy = np.concatenate(
[near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
def projection_matrix_to_CRT_kitti(proj):
# P = C @ [R|T]
# C is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return C, R, T
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
# print('1:',xyz.shape)
# xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
# print('2:', xyz_lidar.shape)
xyz_lidar = xyz
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = | np.linalg.inv(R) | numpy.linalg.inv |
import numpy as np
"""
This module provides functions to calculate Boo_n - the expected performance of the best-validation model chosen
from a pool of n candidate models. The measure is fully described in the paper
<NAME>, <NAME>, and <NAME>. A Boo(n) for Evaluating Architecture Performance. ICML 2018.
So far, only the non-parametric estimator is implemented. Other estimators may be added later.
Git repository: https://gitlab.com/obajgar/boon/
"""
def boo(valid, test=None, n=5, best='max'):
"""
Non-parametric empirical estimate of Boo_n based on Equation 7 in the paper and the following note on handling
ties in validation results.
:param valid: (array-like) Validation results.
:param test: (array-like) Test results aligned with the validation data. Default: valid
:param n: (int) the n parameter of Boo_n. Estimate the expected test performance of the best-validation model out of n.
:param best: ('max' or 'min') Indicates whether 'best' corresponds to the maximum or minimum for the validation results.
:return: (float) Boo_n
"""
valid_array = np.array(valid)
m = len(valid_array)
if test is None:
test = valid
elif len(test) != m:
raise ValueError("The number of validation and test results must be the same.")
order_coeff = {'max': 1, 'min': -1}[best]
# A bit of numpy magic to handle ties in validation results:
# np.unique returns sorted unique validation results with their counts and an inverse array,
# which maps the indices of original valid results to indices in this uq array and hence also in the counts array
uq, inverse, counts = np.unique(order_coeff * valid_array, return_inverse=True, return_counts=True)
# Ranks of unique elements starting from 1 and taking duplication into account
uq_upper_ranks = np.cumsum(counts)
uq_lower_ranks = uq_upper_ranks - counts
valid_upper_ranks = uq_upper_ranks[inverse]
valid_lower_ranks = uq_lower_ranks[inverse]
valid_counts = counts[inverse]
boon = np.sum((np.power(valid_upper_ranks, n) - | np.power(valid_lower_ranks, n) | numpy.power |
# racetrack route planner
# based on apex cone locations and track widths, get a
import numpy as np
maxv = 10
laterala = 8
maxk = 1.5
bw_v = np.pi*2*0.7
bw_w = np.pi*2*1.5
# track is a set of points and radii (positive or negative if track goes CCW/CW
# around each point)
# so first we determine the nearest point
# T is [5, NUM_PTS]; [(x, y, r), i]
# TODO: precompute handy normals
def gettargetv(k):
kmin = laterala / (maxv**2)
targetv = maxv
if np.abs(k) > kmin:
targetv = np.sqrt(laterala / np.abs(k))
return targetv
# get nearest point on track, its direction normal and curvature
def gettrack(xy, T):
Tn = np.hstack([T[:, 1:], T[:, :1]])
Nn = Tn[:2] - T[:2]
L = | np.linalg.norm(Nn, axis=0) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
# from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
__all__ = ['Eos','Calculator','CONSTS','fill_array']
# xmeos.models.Calculator
# xmeos.models.Eos
#====================================================================
# EOS Model Classes
#====================================================================
class Eos(with_metaclass(ABCMeta)):
"""
Abstract Equation of State Parent Base class
"""
def __init__(self, natom=1):
self._pre_init(natom=natom)
##########################
# Model-specific initialization
##########################
self._post_init()
pass
def _pre_init(self, natom=1, molar_mass=20):
# self._init_all_calculators()
self._calculators = {}
self._natom=natom
self._molar_mass=molar_mass
pass
def _post_init(self, model_state={}):
param_names, param_units, param_defaults, param_scales = \
self._get_all_calc_params()
param_values = self._overwrite_param_values(param_names, param_defaults,
model_state)
self._param_names = | np.array(param_names) | numpy.array |
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: <NAME>
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(0.5, 0.15), (0.5, 0.6), (0.3, 0.2)]):
for _ in range(30):
phase_noise = 0.01 * np.random.normal()
amplitude_noise = 0.04 * | np.random.normal() | numpy.random.normal |
import numpy as np
import random
from scipy.spatial.transform import Rotation as R
def rotation_2_quaternion(rot):
r = R.from_matrix(rot)
q_array = R.as_quat(r)
return q_array # retrun numpy array
def rotateVecRot(vec, rot):
r = R.from_matrix(rot)
return r.apply(vec)
def rotateVecQuat(vec, quat):
r = R.from_quat(quat)
return r.apply(vec)
def quaternion_2_rotation(q):
n = np.dot(q, q)
# epsilon for testing whether a number is close to zero
_EPS = np.finfo(float).eps * 4.0
if n < _EPS:
return np.identity(3)
q *= np.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array([
[1.0-(q[1, 1]+q[2, 2]), -(q[2, 3]-q[1, 0]), (q[1, 3]+q[2, 0])],
[q[2, 3]+q[1, 0], -(1.0-(q[1, 1]+q[3, 3])), (q[1, 2]-q[3, 0])],
[-(q[1, 3]-q[2, 0]), (q[1, 2]+q[3, 0]), -(1.0-(q[2, 2]+q[3, 3]))]])
def quaternion_2_matrix(quat):
q = np.array(quat[3:7],dtype=np.float64, copy=True)
n = np.dot(q, q)
# epsilon for testing whether a number is close to zero
_EPS = np.finfo(float).eps * 4.0
if n < _EPS:
return np.identity(4)
q *= np.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array([
[1.0-(q[1, 1]+q[2, 2]), -(q[2, 3]-q[1, 0]), (q[1, 3]+q[2, 0]), quat[0]],
[q[2, 3]+q[1, 0], -(1.0-(q[1, 1]+q[3, 3])), (q[1, 2]-q[3, 0]), quat[1]],
[-(q[1, 3]-q[2, 0]), (q[1, 2]+q[3, 0]), -(1.0-(q[2, 2]+q[3, 3])), quat[2]],
[0.0, 0.0, 0.0, 1.0]])
def isRotationMatrix(R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-8
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R) :
# assert(isRotationMatrix(R))
sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-8
if not singular :
x = np.arctan2(R[2,1] , R[2,2])
y = np.arctan2(-R[2,0], sy)
z = np.arctan2(R[1,0], R[0,0])
else :
x = np.arctan2(-R[1,2], R[1,1])
y = np.arctan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
def eulerAnglesToRotationMatrix(theta):
R_x = np.array([[1, 0, 0],
[0, np.cos(theta[0]), -np.sin(theta[0]) ],
[0, np.sin(theta[0]), np.cos(theta[0]) ]
])
R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1])],
[0, 1, 0],
[-np.sin(theta[1]), 0, np.cos(theta[1])]])
R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],
[np.sin(theta[2]), np.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def randamPose(translation):
randomAngles = [0, 0, 0]
randomAngles[2] = random.random()*math.pi*2.0
transformMatrix = np.eye(4)
transformMatrix[:3,:3] = eulerAnglesToRotationMatrix(randomAngles)
transformMatrix[:3,-1] = np.array(translation)
return transformMatrix
def refineRotationTransform(T):
return_T = T
R = T[:3,:3]
angles = rotationMatrixToEulerAngles(R)
angles[0] = 0
angles[1] = 0
R_new = eulerAnglesToRotationMatrix(angles)
return_T[:3,:3] = R_new
return return_T
def lookat2rotation(vec_x, vec_y, vec_z):
vec_x = vec_x.reshape((1, 3))
vec_y = vec_y.reshape((1, 3))
vec_z = vec_z.reshape((1, 3))
vec_x = vec_x / | np.linalg.norm(vec_x) | numpy.linalg.norm |
import numpy as np
from numpy.linalg import inv, det
import matplotlib.pyplot as plt
from scipy.stats.distributions import chi2
def generate_data_2D_fun_fil(Q1, Q2, R1, R2):
nSegments = 5
points = np.array([[200, -100],
[100, 100],
[100, 300],
[-200, 300],
[-200, -200],
[0, 0]], dtype=float)
dp = np.diff(points, axis=0)
dist = dp ** 2
dist = np.round( | np.sqrt(dist[:, 0] + dist[:, 1]) | numpy.sqrt |
import numpy as np
from typing import Callable, Tuple, Union
def attributes_check(metric):
# https://towardsdatascience.com/5-ways-to-control-attributes-in-python-an-example-led-guide-2f5c9b8b1fb0
attr = metric.__dict__
if "perturb_func" in attr:
if not callable(attr["perturb_func"]):
raise TypeError("The 'perturb_func' must be a callable.")
if "similarity_func" in attr:
assert callable(
attr["similarity_func"]
), "The 'similarity_func' must be a callable."
if "explain_func" in attr:
assert callable(attr["explain_func"]), "The 'explain_func' must be a callable."
if "normalize_func" in attr:
assert callable(
attr["normalize_func"]
), "The 'normalize_func' must be a callable."
if "text_warning" in attr:
assert isinstance(
attr["text_warning"], str
), "The 'text_warning' function must be a string."
return metric
def assert_model_predictions_deviations(
y_pred: float, y_pred_perturb: float, threshold: float = 0.01
):
"""Check that model predictions does not deviate more than a given threshold."""
if abs(y_pred - y_pred_perturb) > threshold:
return True
else:
return False
def assert_model_predictions_correct(
y_pred: float,
y_pred_perturb: float,
):
"""Assert that model predictions are the same."""
if y_pred == y_pred_perturb:
return True
else:
return False
def set_warn(call):
# TODO. Implement warning logic of decorator if text_warning is an attribute in class.
def call_fn(*args):
return call_fn
return call
# attr = call.__dict__
# print(dir(call))
# attr = {}
# if "text_warning" in attr:
# call.print_warning(text=attr["text_warning"])
# else:
# print("Do nothing.")
# pass
def assert_features_in_step(features_in_step: int, input_shape: Tuple[int, ...]) -> None:
"""Assert that features in step is compatible with the image size."""
assert np.prod(input_shape) % features_in_step == 0, (
"Set 'features_in_step' so that the modulo remainder "
"returns zero given the product of the input shape."
f" ({np.prod(input_shape)} % {features_in_step} != 0)"
)
def assert_max_steps(max_steps_per_input: int, input_shape: Tuple[int, ...]) -> None:
"""Assert that max steps per inputs is compatible with the image size."""
assert np.prod(input_shape) % max_steps_per_input == 0, (
"Set 'max_steps_per_input' so that the modulo remainder "
"returns zero given the product of the input shape."
)
def assert_patch_size(patch_size: int, shape: Tuple[int, ...]) -> None:
"""Assert that patch size is compatible with given shape."""
if isinstance(patch_size, int):
patch_size = (patch_size, )
patch_size = np.array(patch_size)
if len(patch_size) == 1 and len(shape) != 1:
patch_size = tuple(patch_size for _ in shape)
elif patch_size.ndim != 1:
raise ValueError("patch_size has to be either a scalar or a 1d-sequence")
elif len(patch_size) != len(shape):
raise ValueError(
"patch_size sequence length does not match shape length"
f" (len(patch_size) != len(shape))"
)
patch_size = tuple(patch_size)
if np.prod(shape) % np.prod(patch_size) != 0:
raise ValueError(
"Set 'patch_size' so that the input shape modulo remainder returns 0"
f" [np.prod({shape}) % np.prod({patch_size}) != 0"
f" => {np.prod(shape)} % {np.prod(patch_size)} != 0]"
)
def assert_attributions_order(order: str) -> None:
"""Assert that order is in pre-defined list."""
assert order in [
"random",
"morf",
"lorf",
], "The order of sorting the attributions must be either random, morf, or lorf."
def assert_nr_segments(nr_segments: int) -> None:
"""Assert that the number of segments given the segmentation algorithm is more than one."""
assert (
nr_segments > 1
), "The number of segments from the segmentation algorithm must be more than one."
def assert_perturbation_caused_change(x: np.ndarray, x_perturbed: np.ndarray) -> None:
"""Assert that perturbation applied to input caused change so that input and perturbed input is not the smae."""
assert (x.flatten() != x_perturbed.flatten()).any(), (
"The settings for perturbing input e.g., 'perturb_func' "
"didn't cause change in input. "
"Reconsider the parameter settings."
)
def assert_layer_order(layer_order: str) -> None:
"""Assert that layer order is in pre-defined list."""
assert layer_order in ["top_down", "bottom_up", "independent"]
def assert_targets(
x_batch: np.array,
y_batch: np.array,
) -> None:
if not isinstance(y_batch, int):
assert np.shape(x_batch)[0] == np.shape(y_batch)[0], (
"The 'y_batch' should by an integer or a list with "
"the same number of samples as the 'x_batch' input"
"{} != {}".format(np.shape(x_batch)[0], | np.shape(y_batch) | numpy.shape |
import sys
import os
from motif import *
from model import *
from easydict import EasyDict as edict
import tensorflow as tf
from util import *
import numpy as np
from config import *
from main import M6ANetShare
import os
import os
import tensorlayer as tl
from motif import *
from model import *
from keras.utils import to_categorical
import logging
import random
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter ### 今天的主角
seed = int(sys.argv[1])
random.seed(seed)
np.random.seed(seed)
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
dirs = '~/Research/m6a_prediction/motif/weblogo/mass/%s/filter%03d_logo/'
motifdir = '/home/xiongyuanpeng/motif_databases/'
sample_names = ['panTro4','rheMac8','rn5','susScr3','danRer10','hg19','mm10',]
meme_dic = {'hg19': f' {motifdir}CISBP-RNA/Homo_sapiens.dna_encoded.meme {motifdir}/JASPAR/JASPAR2018_SPLICE.meme {motifdir}/JASPAR/JASPAR2018_CNE.meme {motifdir}/JASPAR/JASPAR2018_PHYLOFACTS.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/HUMAN/HOCOMOCOv11_full_HUMAN_mono_meme_format.meme',\
'panTro4':f' {motifdir}/MOUSE/HOCOMOCOv11_full_MOUSE_mono_meme_format.meme {motifdir}/CISBP-RNA/Pan_troglodytes.dna_encoded.meme {motifdir}/JASPAR/JASPAR2018_PHYLOFACTS.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/CISBP-RNA/Macaca_mulatta.dna_encoded.meme',\
'rheMac8':f' {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/CISBP-RNA/Macaca_mulatta.dna_encoded.meme {motifdir}/JASPAR/JASPAR2018_PHYLOFACTS.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme {motifdir}/CISBP-RNA/Macaca_mulatta.dna_encoded.meme',\
'mm10':f' {motifdir}/MOUSE/HOCOMOCOv11_full_MOUSE_mono_meme_format.meme {motifdir}/JASPAR/JASPAR2018_PHYLOFACTS.meme {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme {motifdir}/CISBP-RNA/Mus_musculus.dna_encoded.meme',\
'rn5':f' {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/CISBP-RNA/Rattus_norvegicus.dna_encoded.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme {motifdir}/JASPAR/JASPAR2018_PHYLOFACTS.meme',\
'susScr3':f' {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/CISBP-RNA/Sus_scrofa.dna_encoded.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme {motifdir}/JASPAR/JASPAR2018_PHYLOFACTS.meme',\
'danRer10':f' {motifdir}/CISBP-RNA/Danio_rerio.dna_encoded.meme {motifdir}/JASPAR/JASPAR2018_CORE_redundant.meme {motifdir}/JASPAR/JASPAR2018_POLII.meme',\
}
def gen():
index = 0
while True:
if index > len(lines):
return
start = index
end = index + batch_size
index = end
yield readList(lines[start:end])
'''
id = sample_names[int(sys.argv[1])]
#motif_dir = '/home/xiongyuanpeng/motif_databases/'
for i in range(300):
dirname = dirs%(id,i)
motif_str = meme_dic[id]
cmd = f'tomtom -oc {dirname}tomtom -dist pearson -thresh 0.05 {dirname}/meme.txt {motif_str}'
print(cmd)
os.system(cmd)
#exit(o)
'''
gpu_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpu_config.gpu_options.allow_growth = True
sess = tf.Session(config=gpu_config)
tl.layers.initialize_global_variables(sess)
#print(filter_weights.shape)
#exit()
#dirs = '~/Research/m6a_prediction/motif/weblogo/mass/%s/filter%03d_logo/'
num_targets = len(sample_names)
global_filter_outs = []
seq_targets = []
for si in range(num_targets):
sample = sample_names[si]
print('processing ', sample)
file_name = '/home/xiongyuanpeng/Research/m6a_prediction/data/sequence_samples/positive_samples/test/%s_pos_seqs_1000_100_self_processed'%(sample)
lines = open(file_name).readlines()
lines = random.sample(lines, 2000)
batch_size = 64
#print(len(lines))
max_iter = int(len(lines)/batch_size) + 1
#inputs = readList(seqs)
#x = f'/home/xiongyuanpeng/Research/m6a_prediction/data/processed_transcriptomes/{species}_{types}.seqs.pros'
print('loading files...')
#lines = [line.split()[-2] for line in open(x).readlines()]
print('file loaded')
#gens = gen(lines, batch_size)
ds = tf.data.Dataset.from_generator(
gen, tf.float32, tf.TensorShape([None, 101,4]))
iters = ds.make_one_shot_iterator()
t_sequences = iters.get_next()
share_features, feature1, _ = sharedFeatureExtractor2(t_sequences, 'extractor', is_train = False, reuse=tf.AUTO_REUSE)
predict_score = classifier(share_features, 'test',reuse = tf.AUTO_REUSE, is_train = False)
out = tf.nn.softmax(predict_score.outputs)
feature1 = feature1.outputs
classes_name = f'../checkpoint/m6aNet_classese_{sample}_rnn64_kerel18_18_18_fc128_lr0.01_classnum_fixed_drop0.5_mass_full_data_multi-task_7.npz'
model_name = f'../checkpoint/m6aNet_feature_rnn64_kerel18_18_18_fc128_lr0.01_classnum_fixed_drop0.5_mass_full_data_multi-task_7.npz'
#model_name = f'../checkpoint/m6aNet_feature_rnn64_kerel18_18_18_fc128_lr0.01_classnum_fixed_drop0.5_mass-single_full_data_{species}.npz'
#classes_name = f'../checkpoint/m6aNet_classese_{species}_rnn64_kerel18_18_18_fc128_lr0.01_classnum_fixed_drop0.5_mass-single_full_data_{species}.npz'
tl.files.load_and_assign_npz(sess = sess, network = share_features, name = model_name)
tl.files.load_and_assign_npz(sess = sess, network = predict_score, name = classes_name)
#if not tl.files.load_and_assign_npz(sess = sess, network = share_features, name = '../checkpoint/m6aNet_feature_rnn64_kerel18_18_18_fc128_lr0.01_classnum_fixed_drop0.5_mass_full_data_multi-task_7.npz'):
# print('no file loaded')
# exit()
extractor = tl.layers.get_variables_with_name('extractor', True, True)
filter_weights = extractor[0].eval(session=sess)
filter_outs = []
#test = np.random.randint(0,2,size= 32)
print('running output')
for idx in range(max_iter+1):
#input_np = inputs_np[i*32:min((i+1)*32,inputs_np.shape[0])]
#feed_dict[t_sequences] = input_np
try:
res,scores = sess.run([feature1, out])
except:
continue
if True:
if idx >= max_iter:
continue
#filter_outs.extend(res)
#res_filt = []
for i in range(len(scores)):
#print(scores[i])
if scores[i][1] > 0.:
#res_filt.append(res)
global_filter_outs.append(res[i])
seq_targets.append(si)
percent = (idx + 1) * 50 / max_iter
num_arrow = int(percent)
num_line = 50 - num_arrow
progress_bar = '[' + '>' * num_arrow + '-' * num_line + ']' + '%.2f%% \r' % (percent*2)
sys.stdout.write(progress_bar)
sys.stdout.flush()
print('\n')
#global_filter_outs.extend(filter_outs)
#seq_targets = seq_targets + [si,]*len(filter_outs)
filter_outs = ( | np.array(filter_outs) | numpy.array |
"""
Author: <NAME>
Creates an icosphere by repeated subdivision of an icosahedron.
I have naively built it from:
1 - defining vertex locations
2 - finding their neighbouring vertices from a nearest-neighbour search
3 - interpolating half way between vertices and their neighbours to identify the new vertices
4 - repeating
It would be more efficient to define the faces (triangles), and subdivide them
Alternatives?
https://en.wikipedia.org/wiki/Goldberg%E2%80%93Coxeter_construction
https://en.wikipedia.org/wiki/List_of_geodesic_polyhedra_and_Goldberg_polyhedra
http://donhavey.com/blog/tutorials/tutorial-3-the-icosahedron-sphere/
https://github.com/mbrubake/cryoem-cvpr2015/blob/master/quadrature/icosphere.py
https://github.com/brsr/antitile
http://docs.sympy.org/latest/modules/combinatorics/polyhedron.html
https://www.mathworks.com/matlabcentral/fileexchange/50105-icosphere
"""
import numpy as np
import pandas as pd
from mpl_toolkits import mplot3d
from scipy.spatial import cKDTree as KDTree
def sph2cart(R, t, p):
# R,t,p are Radius, theta (colatitude), phi (longitude)
# 0<t<180, 0<p<360
# Calculate the sines and cosines
rad = np.pi/180
s_p = np.sin(p*rad)
s_t = np.sin(t*rad)
c_p = np.cos(p*rad)
c_t = np.cos(t*rad)
# Calculate the x,y,z over the whole grid
X = R*c_p*s_t
Y = R*s_p*s_t
Z = R*c_t
return X, Y, Z
def cart2sph(X, Y, Z):
"""Returns r, t, p with t,p in degrees
"""
rad = np.pi/180
theta = 90 - np.arctan2(Z, np.sqrt(X**2 + Y**2))/rad
phi = np.mod(np.arctan2(Y, X)/rad, 360)
R = np.sqrt(X**2 + Y**2 + Z**2)
return R, theta, phi
def get_nearest_neighbours(p, N, i):
"""Return the nearest N neighbours to a given point, i
Args:
p (DataFrame): vertices dataframe
N (int): integer for number of nearest neighbours to return
i (int): loc within dataframe p
Returns:
a tuple of locs of the nearest neighbours
"""
# p_new will be the returned dataframe
p_new = p.copy()
# calculate distances to other points
vecs = p_new[["x", "y", "z"]] - p[["x", "y", "z"]].loc[i]
dists = vecs.x**2 + vecs.y**2 + vecs.z**2
# merge distances into the p_new
dists = dists.to_frame(name='dist2')
p_new = p_new.join(dists)
p_new.sort_values(by='dist2', inplace=True)
return p_new.iloc[1:N+1]
def matchxyz(xyz0, xyz1, xyz0arr, xyz1arr):
"""Returns True if vector xyz0->xyz1 occurs in arrays of vectors xyz0arr->xyz1arr
"""
for xyz0_, xyz1_ in zip(xyz0arr, xyz1arr):
if np.array_equal(xyz0, xyz0_) and np.array_equal(xyz1, xyz1_):
return True
return False
def get_edgevecs(vertices, fudge=False):
"""Given a set of vertices, find the neighbouring 5 or 6 vertices to each,
return the set of vectors between vertices (which define the edges)
"""
vertices = vertices.copy()
try:
# Remove the previous neighbours as they will be recalculated
vertices = vertices.drop(['neighbours'], axis=1)
except:
pass
kdt = KDTree(list(zip(vertices.x.values,
vertices.y.values,
vertices.z.values)))
# Get 7 nearest neighbours for every vertex (includes itself, i.e. dist 0)
dists, indices = kdt.query(list(zip(vertices.x.values,
vertices.y.values,
vertices.z.values)), k = 7)
# Add the neighbour vertices to the vertex dataframe
# 5 for the original icosahedron vertices
# 6 for the others
locs_origicos = vertices[vertices.iteration == 0].index.values
locs_others = vertices[vertices.iteration != 0].index.values
neighbs5 = pd.DataFrame({'neighbours':indices[:,1:6].tolist()}).loc[locs_origicos]
neighbs6 = pd.DataFrame({'neighbours':indices[:,1:7].tolist()}).loc[locs_others]
neighbs = pd.concat([neighbs5,neighbs6])
vertices = vertices.join(neighbs)
# # New dataframe with the previous iteration's vertices as centres of faces
# faces = vertices[vertices.iteration < vertices.iteration.max()]
# faces['corners'] = np.empty((faces.shape[0]),dtype=list)
# faces['corners'][:] = []
#faces['corners'] =
# Set up all the edge vectors from each vertex's neighbour sets
# E = 3V-6 number of edges, E, from number of vertices, V
if not fudge:
edgevecs = np.zeros((3*vertices.shape[0]-6, 3, 2))
else:
edgevecs = np.zeros((9*vertices.shape[0], 3, 2))
k = 0 # loop counter through edgevecs
for i in range(vertices.shape[0]):
# i runs from 0 to V
# Coordinates of point i:
x0,y0,z0 = vertices.loc[i].x, vertices.loc[i].y, vertices.loc[i].z
for j in vertices.loc[i].neighbours:
# Coordinates of each neighbour:
x1,y1,z1 = vertices.loc[j].x, vertices.loc[j].y, vertices.loc[j].z
# # Add face corners if we are on a face centre
# if i in faces.index.values:
# faces['corners'].loc[i].append([x1,y1,z1])
# Check if p1->p0 already exists in a previous p0->p1
# https://stackoverflow.com/a/33218744
if not (edgevecs == np.array([[x1,x0],[y1,y0],[z1,z0]])).all((1,2)).any():
# Store the vectors
edgevecs[k] = np.array([[x0,x1],[y0,y1],[z0,z1]])
k+=1
x0 = edgevecs[:,0,0]
x1 = edgevecs[:,0,1]
y0 = edgevecs[:,1,0]
y1 = edgevecs[:,1,1]
z0 = edgevecs[:,2,0]
z1 = edgevecs[:,2,1]
edgevecs = pd.DataFrame({'x0':x0,'x1':x1,'y0':y0,'y1':y1,'z0':z0,'z1':z1})
if fudge:
edgevecs = edgevecs.dropna().reset_index().drop(columns="index")
return edgevecs, vertices
def slerp(p0, p1):
"""Spherical linear interpolation to halfway between between p0 and p1
https://en.wikipedia.org/wiki/Slerp
"""
omega = np.arccos(np.dot(p0/np.linalg.norm(p0), p1/np.linalg.norm(p1)))
# print(np.dot(p0,p1))
slerphalfway = (np.sin(omega/2)/np.sin(omega))*(p0+p1)
return slerphalfway
def vertices2dataframe(vertices):
x = vertices[:, 0]
y = vertices[:, 1]
z = vertices[:, 2]
return pd.DataFrame({'x': x, 'y': y, 'z': z})
class Polyhedron(object):
def __init__(self):
self.vertices = pd.DataFrame({'x': [], 'y': [], 'z': []})
self.edgevecs = pd.DataFrame({'x0': [], 'x1': [], 'y0': [],
'y1': [], 'z0': [], 'z1': []})
def _set_vertices(self, verts):
self.vertices.x = verts[:, 0]
self.vertices.y = verts[:, 1]
self.vertices.z = verts[:, 2]
def _get_edgevecs(self, fudge=False):
self.edgevecs, self.vertices = get_edgevecs(self.vertices, fudge=fudge)
def rotate(self, delta_phi):
"""Rotate by delta_phi degrees.
"""
r, t, p = cart2sph(*[self.vertices[i] for i in "xyz"])
p = (p + delta_phi) % 360
x, y, z = sph2cart(r, t, p)
for i, var in zip("xyz", (x, y, z)):
self.vertices[i] = var
self._get_edgevecs()
return self
def get_faces(self):
"""Construct the triagonal faces.
There are duplicate faces in what gets returned
"""
faces = []
# (p, q, r) are indexes within self.vertices
for p in self.vertices.index:
# define all the faces neighbouring point p
# Loop through the points, q, neighbouring p, and identify
# those neighbours, r, of q, which themselves also neighbour p
for q in self.vertices.loc[p].neighbours:
# build "face", an array containing points (p, q, r)
# to define p->q, q->r, r->p
# [[px, py, pz]
# [qx, qy, qz]
# [rx, ry, rz]]
face = | np.empty((3, 3)) | numpy.empty |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
def subarray_multislice(array_ndim, fixed_axes, indices):
'''
Return tuple of slices that if indexed into an array with given dimensions
will return subarray with the axes in axes fixed at given indices
'''
indices = np.array(indices)
colon = slice(None, None, None)
multislice = ()
for i in range(array_ndim):
if i in fixed_axes:
multislice = multislice + \
(indices[np.where(fixed_axes == i)[0][0]],)
else:
multislice = multislice + (colon,)
return multislice
def subarray_view(array, fixed_axes, indices, checks=True):
'''
Return view of subarray of input array with fixed_axes at
corresponding indices.'''
if checks:
# Coerce the inputs into flat numpy arrays to allow for easy handling
# of a variety of input types
fixed_axes = np.atleast_1d(np.array(fixed_axes)).flatten()
indices = np.atleast_1d(np.array(indices)).flatten()
check_axes_access(fixed_axes, array.ndim)
convert_axes_to_positive(fixed_axes, array.ndim)
if fixed_axes.shape != indices.shape:
raise ValueError('axes and indices must have matching shapes or'
' both be integers')
return array[subarray_multislice(array.ndim, fixed_axes, indices)]
def subrange_view(array, starts, ends, steps=None, checks=True):
'''
Return view of array with each axes indexed between starts and ends.
'''
if checks:
# Coerce the inputs into flat numpy arrays to allow for easy handling
# of a variety of input types
starts = np.atleast_1d(np.array(starts)).flatten()
ends = np.atleast_1d(np.array(ends)).flatten()
if steps is not None:
steps = np.atleast_1d(np.array(steps)).flatten()
# Check number of array axes matches up with starts and ends
if (array.ndim != starts.size) or (array.ndim != ends.size):
raise ValueError('the size of starts and ends must equal the '
'number of array dimensions')
multislice = ()
# If steps is None, default to step size of 1
if steps is None:
for i in range(array.ndim):
multislice = multislice + (slice(starts[i], ends[i], 1),)
else:
for i in range(array.ndim):
multislice = multislice + (slice(starts[i], ends[i], steps[i]),)
return array[multislice]
def check_axes_access(axes, array_ndim):
if np.max(axes) >= array_ndim or np.min(axes) < -array_ndim:
raise IndexError('too many indices for array')
# regular numpy scheme for which positive index a negative index corresponds to
def convert_axes_to_positive(axes, array_ndim):
for index, element in enumerate(axes):
if element < 0:
axes[index] = element + array_ndim
def correct_stencil_shape(array_ndim, axes, summed_axes_shape):
return np.hstack([np.array(summed_axes_shape),
np.array(array_ndim - len(axes))])
def check_stencil_shape(array_ndim, axes, summed_axes_shape, stencil):
if not np.all(np.array(stencil.shape) ==
correct_stencil_shape(array_ndim, axes, summed_axes_shape)):
raise ValueError('The shape of the stencil must match the big'
' array and axes appropriately')
def stenciled_sum(array, summed_axes, stencil):
summed_axes = np.atleast_1d(np.array(summed_axes))
summed_axes_shape = | np.array(array.shape) | numpy.array |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
UCCSD with spatial integrals
'''
import time
from functools import reduce
import numpy as np
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import rccsd
from pyscf.ao2mo import _ao2mo
from pyscf.mp import ump2
from pyscf import scf
from pyscf import __config__
MEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)
# This is unrestricted (U)CCSD, in spatial-orbital form.
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
mo_ea_o = eris.mo_energy[0][:nocca]
mo_ea_v = eris.mo_energy[0][nocca:] + cc.level_shift
mo_eb_o = eris.mo_energy[1][:noccb]
mo_eb_v = eris.mo_energy[1][noccb:] + cc.level_shift
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
u1a = np.zeros_like(t1a)
u1b = np.zeros_like(t1b)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:u2aa += lib.einsum('ijef,aebf->ijab', tauaa, eris_vvvv) * .5
#:u2bb += lib.einsum('ijef,aebf->ijab', taubb, eris_VVVV) * .5
#:u2ab += lib.einsum('iJeF,aeBF->iJaB', tauab, eris_vvVV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
u2aa, u2ab, u2bb = cc._add_vvvv(None, (tauaa,tauab,taubb), eris)
u2aa *= .5
u2bb *= .5
Fooa = .5 * lib.einsum('me,ie->mi', fova, t1a)
Foob = .5 * lib.einsum('me,ie->mi', fovb, t1b)
Fvva = -.5 * lib.einsum('me,ma->ae', fova, t1a)
Fvvb = -.5 * lib.einsum('me,ma->ae', fovb, t1b)
Fooa += eris.focka[:nocca,:nocca] - np.diag(mo_ea_o)
Foob += eris.fockb[:noccb,:noccb] - np.diag(mo_eb_o)
Fvva += eris.focka[nocca:,nocca:] - np.diag(mo_ea_v)
Fvvb += eris.fockb[noccb:,noccb:] - np.diag(mo_eb_v)
dtype = u2aa.dtype
wovvo = np.zeros((nocca,nvira,nvira,nocca), dtype=dtype)
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb), dtype=dtype)
woVvO = np.zeros((nocca,nvirb,nvira,noccb), dtype=dtype)
woVVo = np.zeros((nocca,nvirb,nvirb,nocca), dtype=dtype)
wOvVo = np.zeros((noccb,nvira,nvirb,nocca), dtype=dtype)
wOvvO = np.zeros((noccb,nvira,nvira,noccb), dtype=dtype)
mem_now = lib.current_memory()[0]
max_memory = max(0, cc.max_memory - mem_now - u2aa.size*8e-6)
if nvira > 0 and nocca > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3+1)))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] += lib.einsum('jf,mebf->mbej', t1a, ovvv)
u1a += 0.5*lib.einsum('mief,meaf->ia', t2aa[p0:p1], ovvv)
u2aa[:,p0:p1] += lib.einsum('ie,mbea->imab', t1a, ovvv.conj())
tmp1aa = lib.einsum('ijef,mebf->ijmb', tauaa, ovvv)
u2aa -= lib.einsum('ijmb,ma->ijab', tmp1aa, t1a[p0:p1]*.5)
ovvv = tmp1aa = None
if nvirb > 0 and noccb > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3+1)))
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = lib.einsum('jf,mebf->mbej', t1b, OVVV)
u1b += 0.5*lib.einsum('MIEF,MEAF->IA', t2bb[p0:p1], OVVV)
u2bb[:,p0:p1] += lib.einsum('ie,mbea->imab', t1b, OVVV.conj())
tmp1bb = lib.einsum('ijef,mebf->ijmb', taubb, OVVV)
u2bb -= lib.einsum('ijmb,ma->ijab', tmp1bb, t1b[p0:p1]*.5)
OVVV = tmp1bb = None
if nvirb > 0 and nocca > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3+1)))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = lib.einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = lib.einsum('jf,mfBE->mBEj',-t1a, ovVV)
u1b += lib.einsum('mIeF,meAF->IA', t2ab[p0:p1], ovVV)
u2ab[p0:p1] += lib.einsum('IE,maEB->mIaB', t1b, ovVV.conj())
tmp1ab = lib.einsum('iJeF,meBF->iJmB', tauab, ovVV)
u2ab -= lib.einsum('iJmB,ma->iJaB', tmp1ab, t1a[p0:p1])
ovVV = tmp1ab = None
if nvira > 0 and noccb > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3+1)))
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = lib.einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = lib.einsum('JF,MFbe->MbeJ',-t1b, OVvv)
u1a += lib.einsum('iMfE,MEaf->ia', t2ab[:,p0:p1], OVvv)
u2ab[:,p0:p1] += lib.einsum('ie,MBea->iMaB', t1a, OVvv.conj())
tmp1abba = lib.einsum('iJeF,MFbe->iJbM', tauab, OVvv)
u2ab -= lib.einsum('iJbM,MA->iJbA', tmp1abba, t1b[p0:p1])
OVvv = tmp1abba = None
eris_ovov = np.asarray(eris.ovov)
eris_ovoo = np.asarray(eris.ovoo)
Woooo = lib.einsum('je,nemi->mnij', t1a, eris_ovoo)
Woooo = Woooo - Woooo.transpose(0,1,3,2)
Woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
Woooo += lib.einsum('ijef,menf->mnij', tauaa, eris_ovov) * .5
u2aa += lib.einsum('mnab,mnij->ijab', tauaa, Woooo*.5)
Woooo = tauaa = None
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
Fooa += np.einsum('ne,nemi->mi', t1a, ovoo)
u1a += 0.5*lib.einsum('mnae,meni->ia', t2aa, ovoo)
wovvo += lib.einsum('nb,nemj->mbej', t1a, ovoo)
ovoo = eris_ovoo = None
tilaa = make_tau_aa(t2[0], t1a, t1a, fac=0.5)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
Fvva -= .5 * lib.einsum('mnaf,menf->ae', tilaa, ovov)
Fooa += .5 * lib.einsum('inef,menf->mi', tilaa, ovov)
Fova = np.einsum('nf,menf->me',t1a, ovov)
u2aa += ovov.conj().transpose(0,2,1,3) * .5
wovvo -= 0.5*lib.einsum('jnfb,menf->mbej', t2aa, ovov)
woVvO += 0.5*lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmpaa = lib.einsum('jf,menf->mnej', t1a, ovov)
wovvo -= lib.einsum('nb,mnej->mbej', t1a, tmpaa)
eirs_ovov = ovov = tmpaa = tilaa = None
eris_OVOV = np.asarray(eris.OVOV)
eris_OVOO = np.asarray(eris.OVOO)
WOOOO = lib.einsum('je,nemi->mnij', t1b, eris_OVOO)
WOOOO = WOOOO - WOOOO.transpose(0,1,3,2)
WOOOO += np.asarray(eris.OOOO).transpose(0,2,1,3)
WOOOO += lib.einsum('ijef,menf->mnij', taubb, eris_OVOV) * .5
u2bb += lib.einsum('mnab,mnij->ijab', taubb, WOOOO*.5)
WOOOO = taubb = None
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
Foob += np.einsum('ne,nemi->mi', t1b, OVOO)
u1b += 0.5*lib.einsum('mnae,meni->ia', t2bb, OVOO)
wOVVO += lib.einsum('nb,nemj->mbej', t1b, OVOO)
OVOO = eris_OVOO = None
tilbb = make_tau_aa(t2[2], t1b, t1b, fac=0.5)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fvvb -= .5 * lib.einsum('MNAF,MENF->AE', tilbb, OVOV)
Foob += .5 * lib.einsum('inef,menf->mi', tilbb, OVOV)
Fovb = np.einsum('nf,menf->me',t1b, OVOV)
u2bb += OVOV.conj().transpose(0,2,1,3) * .5
wOVVO -= 0.5*lib.einsum('jnfb,menf->mbej', t2bb, OVOV)
wOvVo += 0.5*lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmpbb = lib.einsum('jf,menf->mnej', t1b, OVOV)
wOVVO -= lib.einsum('nb,mnej->mbej', t1b, tmpbb)
eris_OVOV = OVOV = tmpbb = tilbb = None
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
Fooa += np.einsum('NE,NEmi->mi', t1b, eris_OVoo)
u1a -= lib.einsum('nMaE,MEni->ia', t2ab, eris_OVoo)
wOvVo -= lib.einsum('nb,MEnj->MbEj', t1a, eris_OVoo)
woVVo += lib.einsum('NB,NEmj->mBEj', t1b, eris_OVoo)
Foob += np.einsum('ne,neMI->MI', t1a, eris_ovOO)
u1b -= lib.einsum('mNeA,meNI->IA', t2ab, eris_ovOO)
woVvO -= lib.einsum('NB,meNJ->mBeJ', t1b, eris_ovOO)
wOvvO += lib.einsum('nb,neMJ->MbeJ', t1a, eris_ovOO)
WoOoO = lib.einsum('JE,NEmi->mNiJ', t1b, eris_OVoo)
WoOoO+= lib.einsum('je,neMI->nMjI', t1a, eris_ovOO)
WoOoO += np.asarray(eris.ooOO).transpose(0,2,1,3)
eris_OVoo = eris_ovOO = None
eris_ovOV = np.asarray(eris.ovOV)
WoOoO += lib.einsum('iJeF,meNF->mNiJ', tauab, eris_ovOV)
u2ab += lib.einsum('mNaB,mNiJ->iJaB', tauab, WoOoO)
WoOoO = None
tilab = make_tau_ab(t2[1], t1 , t1 , fac=0.5)
Fvva -= lib.einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= lib.einsum('nMfA,nfME->AE', tilab, eris_ovOV)
Fooa += lib.einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob += lib.einsum('nIfE,nfME->MI', tilab, eris_ovOV)
Fova += np.einsum('NF,meNF->me',t1b, eris_ovOV)
Fovb += np.einsum('nf,nfME->ME',t1a, eris_ovOV)
u2ab += eris_ovOV.conj().transpose(0,2,1,3)
wovvo += 0.5*lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO += 0.5*lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
wOvVo -= 0.5*lib.einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
woVvO -= 0.5*lib.einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
woVVo += 0.5*lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += 0.5*lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpabab = lib.einsum('JF,meNF->mNeJ', t1b, eris_ovOV)
tmpbaba = lib.einsum('jf,nfME->MnEj', t1a, eris_ovOV)
woVvO -= lib.einsum('NB,mNeJ->mBeJ', t1b, tmpabab)
wOvVo -= lib.einsum('nb,MnEj->MbEj', t1a, tmpbaba)
woVVo += lib.einsum('NB,NmEj->mBEj', t1b, tmpbaba)
wOvvO += lib.einsum('nb,nMeJ->MbeJ', t1a, tmpabab)
tmpabab = tmpbaba = tilab = None
Fova += fova
Fovb += fovb
u1a += fova.conj()
u1a += np.einsum('ie,ae->ia', t1a, Fvva)
u1a -= np.einsum('ma,mi->ia', t1a, Fooa)
u1a -= np.einsum('imea,me->ia', t2aa, Fova)
u1a += np.einsum('iMaE,ME->ia', t2ab, Fovb)
u1b += fovb.conj()
u1b += np.einsum('ie,ae->ia',t1b,Fvvb)
u1b -= np.einsum('ma,mi->ia',t1b,Foob)
u1b -= np.einsum('imea,me->ia', t2bb, Fovb)
u1b += np.einsum('mIeA,me->IA', t2ab, Fova)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
wovvo -= eris_oovv.transpose(0,2,3,1)
wovvo += eris_ovvo.transpose(0,2,1,3)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
u1a-= np.einsum('nf,niaf->ia', t1a, oovv)
tmp1aa = lib.einsum('ie,mjbe->mbij', t1a, oovv)
u2aa += 2*lib.einsum('ma,mbij->ijab', t1a, tmp1aa)
eris_ovvo = eris_oovv = oovv = tmp1aa = None
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
wOVVO += eris_OVVO.transpose(0,2,1,3)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
u1b-= np.einsum('nf,niaf->ia', t1b, OOVV)
tmp1bb = lib.einsum('ie,mjbe->mbij', t1b, OOVV)
u2bb += 2*lib.einsum('ma,mbij->ijab', t1b, tmp1bb)
eris_OVVO = eris_OOVV = OOVV = None
eris_ooVV = np.asarray(eris.ooVV)
eris_ovVO = np.asarray(eris.ovVO)
woVVo -= eris_ooVV.transpose(0,2,3,1)
woVvO += eris_ovVO.transpose(0,2,1,3)
u1b+= np.einsum('nf,nfAI->IA', t1a, eris_ovVO)
tmp1ab = lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
tmp1ab+= lib.einsum('IE,mjBE->mBjI', t1b, eris_ooVV)
u2ab -= lib.einsum('ma,mBiJ->iJaB', t1a, tmp1ab)
eris_ooVV = eris_ovVo = tmp1ab = None
eris_OOvv = np.asarray(eris.OOvv)
eris_OVvo = np.asarray(eris.OVvo)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
wOvVo += eris_OVvo.transpose(0,2,1,3)
u1a+= np.einsum('NF,NFai->ia', t1b, eris_OVvo)
tmp1ba = lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
tmp1ba+= lib.einsum('ie,MJbe->MbJi', t1a, eris_OOvv)
u2ab -= lib.einsum('MA,MbIj->jIbA', t1b, tmp1ba)
eris_OOvv = eris_OVvO = tmp1ba = None
u2aa += 2*lib.einsum('imae,mbej->ijab', t2aa, wovvo)
u2aa += 2*lib.einsum('iMaE,MbEj->ijab', t2ab, wOvVo)
u2bb += 2*lib.einsum('imae,mbej->ijab', t2bb, wOVVO)
u2bb += 2*lib.einsum('mIeA,mBeJ->IJAB', t2ab, woVvO)
u2ab += lib.einsum('imae,mBeJ->iJaB', t2aa, woVvO)
u2ab += lib.einsum('iMaE,MBEJ->iJaB', t2ab, wOVVO)
u2ab += lib.einsum('iMeA,MbeJ->iJbA', t2ab, wOvvO)
u2ab += lib.einsum('IMAE,MbEj->jIbA', t2bb, wOvVo)
u2ab += lib.einsum('mIeA,mbej->jIbA', t2ab, wovvo)
u2ab += lib.einsum('mIaE,mBEj->jIaB', t2ab, woVVo)
wovvo = wOVVO = woVvO = wOvVo = woVVo = wOvvO = None
Ftmpa = Fvva - .5*lib.einsum('mb,me->be', t1a, Fova)
Ftmpb = Fvvb - .5*lib.einsum('mb,me->be', t1b, Fovb)
u2aa += lib.einsum('ijae,be->ijab', t2aa, Ftmpa)
u2bb += lib.einsum('ijae,be->ijab', t2bb, Ftmpb)
u2ab += lib.einsum('iJaE,BE->iJaB', t2ab, Ftmpb)
u2ab += lib.einsum('iJeA,be->iJbA', t2ab, Ftmpa)
Ftmpa = Fooa + 0.5*lib.einsum('je,me->mj', t1a, Fova)
Ftmpb = Foob + 0.5*lib.einsum('je,me->mj', t1b, Fovb)
u2aa -= lib.einsum('imab,mj->ijab', t2aa, Ftmpa)
u2bb -= lib.einsum('imab,mj->ijab', t2bb, Ftmpb)
u2ab -= lib.einsum('iMaB,MJ->iJaB', t2ab, Ftmpb)
u2ab -= lib.einsum('mIaB,mj->jIaB', t2ab, Ftmpa)
eris_ovoo = np.asarray(eris.ovoo).conj()
eris_OVOO = np.asarray(eris.OVOO).conj()
eris_OVoo = np.asarray(eris.OVoo).conj()
eris_ovOO = np.asarray(eris.ovOO).conj()
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
u2aa -= lib.einsum('ma,jbim->ijab', t1a, ovoo)
u2bb -= lib.einsum('ma,jbim->ijab', t1b, OVOO)
u2ab -= lib.einsum('ma,JBim->iJaB', t1a, eris_OVoo)
u2ab -= lib.einsum('MA,ibJM->iJbA', t1b, eris_ovOO)
eris_ovoo = eris_OVoo = eris_OVOO = eris_ovOO = None
u2aa *= .5
u2bb *= .5
u2aa = u2aa - u2aa.transpose(0,1,3,2)
u2aa = u2aa - u2aa.transpose(1,0,2,3)
u2bb = u2bb - u2bb.transpose(0,1,3,2)
u2bb = u2bb - u2bb.transpose(1,0,2,3)
eia_a = lib.direct_sum('i-a->ia', mo_ea_o, mo_ea_v)
eia_b = lib.direct_sum('i-a->ia', mo_eb_o, mo_eb_v)
u1a /= eia_a
u1b /= eia_b
u2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
u2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
u2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
time0 = log.timer_debug1('update t1 t2', *time0)
t1new = u1a, u1b
t2new = u2aa, u2ab, u2bb
return t1new, t2new
def energy(cc, t1=None, t2=None, eris=None):
'''UCCSD correlation energy'''
if t1 is None: t1 = cc.t1
if t2 is None: t2 = cc.t2
if eris is None: eris = cc.ao2mo()
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
e = np.einsum('ia,ia', fova, t1a)
e += np.einsum('ia,ia', fovb, t1b)
e += 0.25*np.einsum('ijab,iajb',t2aa,eris_ovov)
e -= 0.25*np.einsum('ijab,ibja',t2aa,eris_ovov)
e += 0.25*np.einsum('ijab,iajb',t2bb,eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja',t2bb,eris_OVOV)
e += | np.einsum('iJaB,iaJB',t2ab,eris_ovOV) | numpy.einsum |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
import matplotlib.ticker as ticker
'''
B-field calculator using Biot-Savart law
You will need to provide:
- series of points in (x, y, z, I) describing the geometry of a coil in a text file.
- Each section of coil will have some amount of current flowing through it, in the forwards direction of the points in the coil.
- The current in a given segment of coil is defined by the current listed at the starting point of the coil
- Ex. P1------P2 is on our coil. The current P1 --> P2 is given by I1 (assuming P1 = (x1, y1, z1, I1))
- rectangular volume over which the fields need to be calculated
- resolution at which fields should be calculated
- all lengths in cm, B-field in G
version history:
v2: <NAME> May 31, 2020. Accelerated using numpy meshgrids
v2.1: Jun 1, 2020. tkinter dialogs for opening & saving files. Defaults of 1 cm resolution in calculation.
v3: <NAME> Jun 1, 2020. Plotting code integrated.
v3.1: Minor cosmetic improvements to plot.
v3.2: 3D plot of coil geometry.
v3.3: Plotted B-fields together but code is long.
v3.5: all B-field plots together
v3.7: B-fields plotted together with 50 levels (now works on windows) and combined v3.3 and v3.5
v3.8: Changed up all np.aranges to np.linspaces and changed up the plotting code to work with non-integer step sizes and non-integer levels
v4: <NAME> June 9, 2020 Using Richardson Extrapolation for midpoint rule to improve accuracy (5 to 30x better at 1.4x speed penalty), tweaked linspaces to correctly do step size
v4.1: Minor change in function indexing to use more numpy, cleaning up for export
wishlist:
1. improve plot_coil with different colors for different values of current?
2. improve plot_fields to reduce space between Bz plot and colorbar ---- done
'''
def parseCoil(filename):
'''
Parses 4 column CSV into x,y,z,I slices for coil
'''
with open(filename, "r") as f: return np.array([[eval(i) for i in line.split(",")] for line in f.read().splitlines()]).T
'''
FILE FORMAT for coil.txt:
x1,y1,z1,I1
x2,y2,z2,I2
.
.
xn,yn,zn,In
'''
def sliceCoil(coil, steplength):
'''
Slices a coil into smaller steplength-sized pieces based on the coil resolution
'''
def interpolatePoints(p1, p2, parts):
'''
Produces a series of linearly spaced points between two given points in R3+I (retains same current)
'''
return np.column_stack((np.linspace(p1[0], p2[0], parts+1), np.linspace(p1[1], p2[1], parts+1),
np.linspace(p1[2], p2[2], parts+1), p1[3] * np.ones((parts+1))))
newcoil = np.zeros((1, 4)) # fill with dummy first column
segment_starts = coil[:,:-1]
segment_ends = coil[:,1:]
# determine start and end of each segment
segments = segment_ends-segment_starts
segment_lengths = np.apply_along_axis(np.linalg.norm, 0, segments)
# create segments; determine start and end of each segment, as well as segment lengths
# chop up into smaller bits (elements)
stepnumbers = (segment_lengths/steplength).astype(int)
# determine how many steps we must chop each segment into
for i in range(segments.shape[1]):
newrows = interpolatePoints(segment_starts[:,i], segment_ends[:,i], stepnumbers[i])
# set of new interpolated points to feed in
newcoil = np.vstack((newcoil, newrows))
## Force the coil to have an even number of segments, for Richardson Extrapolation
if newcoil.shape[0] %2 != 0: newcoil = np.vstack((newcoil, newcoil[-1,:]))
return newcoil[1:,:].T # return non-dummy columns
def calculateField(coil, x, y, z):
'''
Calculates magnetic field vector as a result of some position and current x, y, z, I
[In the same coordinate system as the coil]
Coil: Input Coil Positions, already sub-divided into small pieces using sliceCoil
x, y, z: position in cm
Output B-field is a 3-D vector in units of G
'''
FACTOR = 0.1 # = mu_0 / 4pi when lengths are in cm, and B-field is in G
def BSintegrate(start, end):
'''
Produces tiny segment of magnetic field vector (dB) using the midpoint approximation over some interval
for future optimization: Get this to work with meshgrids
'''
dl = (end-start).T
mid = (start+end)/2
position = np.array((x-mid[0], y-mid[1], z-mid[2])).T
mag = np.sqrt((x-mid[0])**2 + (y-mid[1])**2 + (z-mid[2])**2)
return start[3] * np.cross(dl[:3], position) / np.array((mag ** 3, mag ** 3, mag ** 3)).T
# Biot-Savart Law
# current flowing in this segment is represented by start[3]
B = 0
# midpoint integration with 1 layer of Richardson Extrapolation
starts, mids, ends = coil[:,:-1:2], coil[:,1::2], coil[:,2::2]
for start, mid, end in | np.nditer([starts, mids, ends], flags=['external_loop'], order='F') | numpy.nditer |
#from frm_generate_data_np import *
import numpy as np
from numpy import pi,sqrt
model_folder="models/freq_2019_07_02_2/"
from frm_modulations import mod_list,cont_phase_mod_list,linear_mod_const
# import tensorflow as tf
import datetime
import pickle
import sys
import copy
from frm_dataset_creator import *
from numba import jit
from frm_modulations_fast import modulate_symbols_fast,modulate_symbols
# In[199]:
def func(my_dict):
# print(my_dict)
return generate_dataset_sig2(**my_dict)
def generate_dataset_sig2_parallel(n_samples, pkt_size,max_sps,mod_list,sps_rng,pulse_ebw_list,timing_offset_rng,fading_spread_rng,freq_err_rng,phase_err_rng,snr_rng, complex_fading = False,freq_in_hz = False,
seed = None, fname = None, version = 1,nthreads = 10 ): #1e4
args_in = locals()
args_in.pop('nthreads',None)
rand_step =374861
args_list = []
for i in range(nthreads):
args_list.append(copy.deepcopy(args_in))
if args_in['seed'] is not None:
for indx,args in enumerate(args_list):
args['seed'] = args_in['seed'] + indx * rand_step
get_tmp_name = lambda base, indx : "{}_{}".format(base, indx)
# if fname is not None:
# base_name = fname
# else:
# base_name = 'tmp/dataset'
# base_name = '/tmp/dataset{}'.format(np.random.randint(0,1000000))
for indx,args in enumerate(args_list):
args['fname'] = None #get_tmp_name(base_name,indx)
args['n_samples'] = args_in['n_samples']//nthreads
p = Pool(nthreads)
datasets = p.map(func, args_list)
# with open(get_tmp_name(base_name,0),'rb') as f:
# dataset = pickle.load(f)
dataset_out = datasets[0]
for i in range(1,nthreads):
dataset_i = datasets[i]
for k1 in dataset_out.keys():
if isinstance(dataset_out[k1],dict):
for k2 in dataset_out[k1].keys():
# print(k1,k2)
if k1!='args' and k1!='time':
dataset_out[k1][k2] = np.append(dataset_out[k1][k2],dataset_i[k1][k2],axis = 0)
dataset_out['args'] = args_in
dataset_out['time'] = str(datetime.datetime.now())
if fname is not None:
with open(fname,'wb') as f:
pickle.dump(dataset_out,f)
return dataset_out
def generate_dataset_sig2(n_samples, pkt_size,max_sps,mod_list,sps_rng,pulse_ebw_list,timing_offset_rng,fading_spread_rng,freq_err_rng,phase_err_rng,snr_rng,complex_fading = False, freq_in_hz = False,
seed = None, fname = None, version = 1):
args = locals()
if seed is not None:
np.random.seed(seed)
comb_v = np.zeros((n_samples,pkt_size,2))
carrier_v = np.zeros((n_samples,pkt_size,2))
fading_v = np.zeros((n_samples,pkt_size,2))
clean_v = np.zeros((n_samples,pkt_size,2))
timing_v = np.zeros((n_samples,pkt_size,2))
raw_v = np.zeros((n_samples,pkt_size,2))
mod_v = np.zeros((n_samples,len(mod_list)))
if not complex_fading:
coeff = np.zeros((n_samples,6))
else:
coeff = np.zeros((n_samples,6),dtype='complex')
mod_index = np.random.choice(len(mod_list),(n_samples,)).astype(np.int_)
mod_v[range(n_samples),mod_index] = 1
sps = np.random.uniform(sps_rng[0],sps_rng[1],(n_samples,))
pulse_ebw = np.random.choice(pulse_ebw_list,(n_samples,))
timing_offset = np.random.uniform(timing_offset_rng[0],timing_offset_rng[1],(n_samples,))
fading_spread = np.random.uniform(fading_spread_rng[0],fading_spread_rng[1],(n_samples,))
freq_err = np.random.uniform(freq_err_rng[0],freq_err_rng[1],(n_samples,))
phase_err = np.random.uniform(phase_err_rng[0],phase_err_rng[1],(n_samples,))
if np.array(snr_rng).size==2:
snr = np.random.uniform(snr_rng[0],snr_rng[1],(n_samples,))
else:
snr = np.random.choice(snr_rng,(n_samples,))
progress_step = 1000
a = datetime.datetime.now()
strt_time = copy.deepcopy(a)
for samp_indx in range(n_samples):
mod = mod_list[mod_index[samp_indx]]
op = create_sample_fast( mod = mod,pkt_len = pkt_size,sps=sps[samp_indx],pulse_ebw = pulse_ebw[samp_indx],
timing_offset = timing_offset[samp_indx],
fading_spread = fading_spread[samp_indx],
freq_err = freq_err[samp_indx], phase_err =phase_err[samp_indx],
snr = snr[samp_indx], max_sps = max_sps, complex_fading = complex_fading, freq_in_hz = freq_in_hz,
seed = None)
mod_v[:,0] = 1
comb_v[samp_indx] ,carrier_v[samp_indx],fading_v[samp_indx],clean_v[samp_indx],timing_v[samp_indx],raw_v[samp_indx],coeff[samp_indx] = op
if samp_indx%progress_step == 0 and samp_indx>0:
b = datetime.datetime.now()
diff_time = b-a
# the exact output you're looking for:
sys.stdout.write("\rGenerated {} out of {} ({:.1f}%), Elapsed {} , estimated {}".format(samp_indx,n_samples, float(samp_indx)/n_samples*100, b-strt_time , (n_samples-samp_indx)*diff_time /progress_step ))
sys.stdout.flush()
a = copy.deepcopy(b)
op ={'sig':{},'params':{},'data':{}}
op['sig']['comb'] = comb_v
op['sig']['timing_fading_carrier'] = carrier_v
op['sig']['timing_fading'] = fading_v
op['sig']['timing'] = clean_v
op['params']['mod'] = mod_index
op['params']['fading_spread'] = fading_spread
op['params']['fading_taps'] = coeff
op['params']['freq_off'] = freq_err
op['params']['phase_off'] = phase_err
op['params']['timing_off'] = timing_offset
op['params']['symb_rate'] = sps
op['data']['binary_marking'] = timing_v
op['params']['sps'] = sps
op['params']['pulse_ebw'] = pulse_ebw
op['sig']['timing_raw_unique'] = raw_v
op['params']['snr'] = snr
op['args'] = args
op['time'] = str(datetime.datetime.now())
op['version'] = version
if fname is not None:
with open(fname,'wb') as f:
pickle.dump(op,f)
return op
def create_sample( mod = 'bpsk',pkt_len = 128,sps=8,pulse_ebw = 0.35,
timing_offset = 0.5,
fading_spread = 1,
freq_err = 0.0001, phase_err = np.pi,
snr = 10, max_sps = 128, complex_fading = False, freq_in_hz = False,
seed = None):
samp_rate = 1
if seed is not None:
np.random.seed(seed)
if mod in cont_phase_mod_list:
order = 2
else: # Linear modulation
order = linear_mod_const[mod].size
n_symbols = int( (pkt_len)/(sps*0.5)) + 2
data_symbs=np.random.randint(0,order,n_symbols)
mag = timing_offset
timing_offset = calc_timing_offset(mag, max_sps)
timing_step = int(max_sps/sps)
mod_symbs_max_sps=modulate_symbols(data_symbs,mod,max_sps,ebw = pulse_ebw)
data_symbs_max_sps= np.repeat(data_symbs,max_sps)
t_max_sps= np.arange(0,1.0*max_sps*n_symbols/samp_rate,1.0/samp_rate)
transition_data_ideal = np.array(([1,]*max_sps + [0,]*max_sps) * int(n_symbols/2+1))
mod_symbs_timing_err = simulate_timing_error(mod_symbs_max_sps,timing_offset,timing_step, pkt_len)
data_symbs_timing_err = simulate_timing_error(data_symbs_max_sps,timing_offset,timing_step, pkt_len)
mod_raw_symbs_timing_err = modulate_symbols(data_symbs_timing_err,mod,sps = 1, ebw = None, pulse_shape = None)
t_timing_err = simulate_timing_error(t_max_sps,timing_offset,timing_step, pkt_len)
marking_b_timing = simulate_timing_error(transition_data_ideal,timing_offset,timing_step, pkt_len)
transition_data_timing = simulate_timing_error(transition_data_ideal,timing_offset,timing_step, pkt_len+1)
transition_data_timing = np.abs(np.diff(transition_data_timing)).astype('int')
mod_raw_unique_symbs_timing_err = mod_raw_symbs_timing_err*transition_data_timing
mod_raw_unique_symbs_timing_err[transition_data_timing==0]=np.nan+1j*np.nan
if not complex_fading:
coeff = generate_fading_taps(max_sps / timing_step, fading_spread)
mod_symbs_timing_fading = simulate_fading_channel(mod_symbs_timing_err, coeff)
else:
coeff=generate_complex_fading_taps(max_sps / timing_step, fading_spread)
mod_symbs_timing_fading = simulate_fading_channel_complex(mod_symbs_timing_err, coeff)
if not freq_in_hz:
t_freq = t_timing_err
else:
t_freq = np.arange(t_timing_err.size)
mod_symbs_timing_fading_freq_err = simulate_frequency_error(mod_symbs_timing_fading,t_freq,freq_err,phase_err)
carrier_timing_err = simulate_frequency_error(1.0,t_freq,freq_err,phase_err)
mod_symbs_timing_fading_freq_noise = add_noise(mod_symbs_timing_fading_freq_err,snr)
op = mod_symbs_timing_fading_freq_noise
comb = assign_iq2(mod_symbs_timing_fading_freq_noise)
carrier = assign_iq2(mod_symbs_timing_fading_freq_err)
fading = assign_iq2(mod_symbs_timing_fading)
clean = assign_iq2(mod_symbs_timing_err)#assign_iq2(mod_symbs_max_sps)#
timing = np.zeros((pkt_len,2))
timing[range(pkt_len),marking_b_timing] = 1
raw = assign_iq2(mod_raw_unique_symbs_timing_err)
return (comb ,carrier,fading,clean,timing,raw,coeff)
@jit(nopython=True)
def create_marking(max_sps,timing_step,timing_offset,pkt_len):
x = np.zeros(pkt_len+1,dtype=np.int_)
timing_offset = int(timing_offset)
indx = int(timing_offset)
state = True
prev_max_sps = indx% max_sps
for i in range(0,x.size):
x[i] = state
indx = indx +timing_step
cur_max_sps = indx%max_sps
if cur_max_sps<prev_max_sps:
state = not state
prev_max_sps = cur_max_sps
return x
def create_sample_fast( mod = 'bpsk',pkt_len = 128,sps=8,pulse_ebw = 0.35,
timing_offset = 0.5,
fading_spread = 1,
freq_err = 0.0001, phase_err = np.pi,
snr = 10, max_sps = 128,complex_fading = False, freq_in_hz = False,
seed = None):
samp_rate = 1
if seed is not None:
| np.random.seed(seed) | numpy.random.seed |
import glob
import numpy as np
import pandas as pd
from shapely.geometry import LineString,MultiLineString,Point,MultiPoint
from shapely.ops import linemerge
import pyproj
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
import xgboost
from tqdm import tqdm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix,accuracy_score
import pickle
import os
import argparse
np.random.seed(10)
from param import *
#get an ensemble of 5 classifiers from scikit-learn i.e randome_forest, extra_tree,svc,KNeighbours
#and xgboost classifier
#the parameters are tuned for this dataset, set class_weights to balanced as the start to end
#goals have different distribution
def get_ensemble_of_classifiers(vote=True):
clfs={}
clf1=ExtraTreesClassifier(100,class_weight='balanced',n_jobs=-1)
clfs['extra_tree']=clf1
clf2=RandomForestClassifier(50,class_weight='balanced',n_jobs=-1)
clfs['random_forest']=clf2
clf3=KNeighborsClassifier(20,weights='distance',n_jobs=-1)
clfs['knn']=clf3
clf4=xgboost.XGBClassifier(n_estimators=100,subsample=.7)
clfs['xgb']=clf4
if vote:
clf5=SVC(0.1)
cvote=VotingClassifier(estimators=[('et', clf1), ('rf', clf2), ('kn', clf3),('xgb',clf4),('svc',clf5)], voting='hard')
return {'cvote':cvote}
else:
clf5=SVC(0.1,class_weight='balanced',probability=True)
clfs['svc']=clf5
return clfs
# get the closest and farthest distance for a track to all the goals
def closest_farthest(track):
closest_to_track=[]
farthest_to_track=[]
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
cd=[]
for item in track:
point1=Point(item)
_,_,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
cd.append(distance)
closest_to_track.append(np.min(cd))
farthest_to_track.append(np.max(cd))
return closest_to_track,farthest_to_track
# get distance to a goal given a point on the track
def goal_dist(point1):
d={}
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
angle1,angle2,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
d[i]=distance
return d.values()
# gets distance features for training and testing
# the feature vector includes closest and nearest distances
# and distance to goal from the start or end points of track
def get_distances(df,goal,trim=None):
start,end=Point(df[['lon','lat']].values[0]),Point(df[['lon','lat']].values[-1])
duration=df.elapsedTime_sec.values[-1]
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
closest,farthest=closest_farthest(df[['lon','lat']].values)
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# similar to get_distance function above but additionally trims the start and end point randomly
def get_distances_multi(df,goal):
# how much to trim from start
trim_start=np.random.randint(TRIM_START,TRIM_END)
idx_s=np.where(df.elapsedTime_sec>trim_start)[0][0]
start=Point(df[['lon','lat']].values[idx_s])
# how much to trim from end
trim_end=np.random.randint(TRIM_START,TRIM_END)
idx_e=np.where(df.elapsedTime_sec>df.elapsedTime_sec.values[-1]-trim_end)[0][0]
end=Point(df[['lon','lat']].values[idx_e])
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
duration=df.elapsedTime_sec.values[idx_e]
closest,farthest=closest_farthest(df[['lon','lat']].values[idx_s:idx_e])
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# get the train feature vector. The feature vector are aggressively augmented
# i.e for each feature vector 20 tracks with random trims are created from start and end
# also include other feature such as age, gender,duration,velocity and total distance covered
def get_train_feat(datafiles):
print ('Multi trim featurees 20 samp in each')
xfeat={}
for f in tqdm(datafiles):
for i in range(0,20):
df = pd.read_csv(f)
if i==0:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances(df,goal,trim=None)
else:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances_multi(df,goal)
feat=[duration,total_distance_covered]
feat.extend(distance_to_goal_from_start)
feat.extend(distance_to_goal_from_end)
feat.extend(cd)
feat.extend(fd)
if df.tripID.values[0] not in xfeat.keys():
xfeat[df.tripID.values[0]]=[feat]
else:
xfeat[df.tripID.values[0]].append(feat)
train_info['gender']=pd.factorize(train_info['gender'])[0]
train_info['age']=train_info['age'].fillna(train_info['age'].mean())
features=[]
labels_start=[]
labels_end=[]
for i,k in enumerate(train_info.tripID.values):
for item in xfeat[k]:
feat=train_info.loc[k][['age','gender']].values.tolist()
duration=item[0]
velocity=item[1]/duration
feat.extend([duration,velocity])
feat.extend(item)
features.append(feat)
labels_start.append(train_info.iloc[i]['startLocID'])
labels_end.append(train_info.iloc[i]['destLocID'])
features= | np.asarray(features) | numpy.asarray |
import pandas as pd
import numpy as np
import torch
from scipy.io import arff
from abc import ABC, abstractmethod
from torch.utils.data import DataLoader, TensorDataset
class BaseADDataset(ABC):
"""Anomaly detection dataset base class."""
def __init__(self, root: str):
super().__init__()
self.root = root # root path to data
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = None # tuple with original class labels that define the normal class
self.outlier_classes = None # tuple with original class labels that define the outlier class
self.train_set = None # must be of type torch.utils.data.Dataset
self.test_set = None # must be of type torch.utils.data.Dataset
@abstractmethod
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (
DataLoader, DataLoader):
"""Implement data loaders of type torch.utils.data.DataLoader for train_set and test_set."""
pass
def __repr__(self):
return self.__class__.__name__
class TorchvisionDataset(BaseADDataset):
"""TorchvisionDataset class for datasets already implemented in torchvision.datasets."""
def __init__(self, root: str):
super().__init__(root)
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (
DataLoader, DataLoader):
train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle_train,
num_workers=num_workers)
test_loader = DataLoader(dataset=self.test_set, batch_size=batch_size, shuffle=shuffle_test,
num_workers=num_workers)
return train_loader, test_loader
class SAD_Dataset(TorchvisionDataset):
def __init__(self, root: str, normal_class):
super().__init__(root)
self.n_classes = 2
self.normal_class = normal_class
# train set
#load data file path
url1_train = 'data/sad/SpokenArabicDigitsDimension1_TRAIN.arff'
url2_train = 'data/sad/SpokenArabicDigitsDimension2_TRAIN.arff'
url3_train = 'data/sad/SpokenArabicDigitsDimension3_TRAIN.arff'
url4_train = 'data/sad/SpokenArabicDigitsDimension4_TRAIN.arff'
url5_train = 'data/sad/SpokenArabicDigitsDimension5_TRAIN.arff'
url6_train = 'data/sad/SpokenArabicDigitsDimension6_TRAIN.arff'
url7_train = 'data/sad/SpokenArabicDigitsDimension7_TRAIN.arff'
url8_train = 'data/sad/SpokenArabicDigitsDimension8_TRAIN.arff'
url9_train = 'data/sad/SpokenArabicDigitsDimension9_TRAIN.arff'
url10_train = 'data/sad/SpokenArabicDigitsDimension10_TRAIN.arff'
url11_train = 'data/sad/SpokenArabicDigitsDimension11_TRAIN.arff'
url12_train = 'data/sad/SpokenArabicDigitsDimension12_TRAIN.arff'
url13_train = 'data/sad/SpokenArabicDigitsDimension13_TRAIN.arff'
# get x and y as dataframe
x_dim1_train, target_train = get_data(url1_train)
x_dim2_train, __ = get_data(url2_train)
x_dim3_train, __ = get_data(url3_train)
x_dim4_train, __ = get_data(url4_train)
x_dim5_train, __ = get_data(url5_train)
x_dim6_train, __ = get_data(url6_train)
x_dim7_train, __ = get_data(url7_train)
x_dim8_train, __ = get_data(url8_train)
x_dim9_train, __ = get_data(url9_train)
x_dim10_train, __ = get_data(url10_train)
x_dim11_train, __ = get_data(url11_train)
x_dim12_train, __ = get_data(url12_train)
x_dim13_train, __ = get_data(url13_train)
x_dim1_train = get_features(x_dim1_train)
x_dim2_train = get_features(x_dim2_train)
x_dim3_train = get_features(x_dim3_train)
x_dim4_train = get_features(x_dim4_train)
x_dim5_train = get_features(x_dim5_train)
x_dim6_train = get_features(x_dim6_train)
x_dim7_train = get_features(x_dim7_train)
x_dim8_train = get_features(x_dim8_train)
x_dim9_train = get_features(x_dim9_train)
x_dim10_train = get_features(x_dim10_train)
x_dim11_train = get_features(x_dim11_train)
x_dim12_train = get_features(x_dim12_train)
x_dim13_train = get_features(x_dim13_train)
# combine 13 dimensions of x
x_train = np.dstack([x_dim1_train, x_dim2_train, x_dim3_train, x_dim4_train, x_dim5_train, x_dim6_train, x_dim7_train, x_dim8_train, x_dim9_train, x_dim10_train, x_dim11_train, x_dim12_train, x_dim13_train])
# process output y and produce index
y_train, index_train = get_target(target_train, normal_class)
# train only on normal data, extracting normal data
x_final_train, y_final_train, index_final_train = get_training_set(x_train, y_train, index_train)
# print("size: ", x_final_train.shape)
train_set = TensorDataset(torch.Tensor(x_final_train), torch.Tensor(y_final_train), torch.Tensor(index_final_train))
self.train_set = train_set
# set up testing set
url1_test = 'data/sad/SpokenArabicDigitsDimension1_TEST.arff'
url2_test = 'data/sad/SpokenArabicDigitsDimension2_TEST.arff'
url3_test = 'data/sad/SpokenArabicDigitsDimension3_TEST.arff'
url4_test = 'data/sad/SpokenArabicDigitsDimension4_TEST.arff'
url5_test = 'data/sad/SpokenArabicDigitsDimension5_TEST.arff'
url6_test = 'data/sad/SpokenArabicDigitsDimension6_TEST.arff'
url7_test = 'data/sad/SpokenArabicDigitsDimension7_TEST.arff'
url8_test = 'data/sad/SpokenArabicDigitsDimension8_TEST.arff'
url9_test = 'data/sad/SpokenArabicDigitsDimension9_TEST.arff'
url10_test = 'data/sad/SpokenArabicDigitsDimension10_TEST.arff'
url11_test = 'data/sad/SpokenArabicDigitsDimension11_TEST.arff'
url12_test = 'data/sad/SpokenArabicDigitsDimension12_TEST.arff'
url13_test = 'data/sad/SpokenArabicDigitsDimension13_TEST.arff'
x_dim1_test, target_test = get_data(url1_test)
x_dim2_test, __ = get_data(url2_test)
x_dim3_test, __ = get_data(url3_test)
x_dim4_test, __ = get_data(url4_test)
x_dim5_test, __ = get_data(url5_test)
x_dim6_test, __ = get_data(url6_test)
x_dim7_test, __ = get_data(url7_test)
x_dim8_test, __ = get_data(url8_test)
x_dim9_test, __ = get_data(url9_test)
x_dim10_test, __ = get_data(url10_test)
x_dim11_test, __ = get_data(url11_test)
x_dim12_test, __ = get_data(url12_test)
x_dim13_test, __ = get_data(url13_test)
x_dim1_test = get_features(x_dim1_test)
x_dim2_test = get_features(x_dim2_test)
x_dim3_test = get_features(x_dim3_test)
x_dim4_test = get_features(x_dim4_test)
x_dim5_test = get_features(x_dim5_test)
x_dim6_test = get_features(x_dim6_test)
x_dim7_test = get_features(x_dim7_test)
x_dim8_test = get_features(x_dim8_test)
x_dim9_test = get_features(x_dim9_test)
x_dim10_test = get_features(x_dim10_test)
x_dim11_test = get_features(x_dim11_test)
x_dim12_test = get_features(x_dim12_test)
x_dim13_test = get_features(x_dim13_test)
x_final_test = np.dstack([x_dim1_test, x_dim2_test, x_dim3_test, x_dim4_test, x_dim5_test, x_dim6_test, x_dim7_test, x_dim8_test, x_dim9_test, x_dim10_test, x_dim11_test, x_dim12_test, x_dim13_test])
y_final_test, index_test = get_target(target_test, normal_class)
test_set = TensorDataset(torch.Tensor(x_final_test), torch.Tensor(y_final_test), torch.Tensor(index_test))
self.test_set = test_set
def get_data(url):
"""
input: path to arff data file
This function loads the arff file, then converts into dataframe.
The dataframe is then split into x and y.
output: x is dataframe object without the last column. y is series.
"""
loaded = arff.loadarff(url)
df = pd.DataFrame(loaded[0])
# dropping the last column of dataframe
# it is still a dataframe object
x = df.iloc[:, :-1].to_numpy()
# getting last column as series, not dataframe object
# as dataframe object is using iloc[:, -1:]
y = df.iloc[:, -1]
return x, y
def get_features(x):
"""
input: unprocessed features data
This function replaces missing values with zeroes.
output: processed features data
"""
for i in range(0, len(x)):
for j in range(0, 93):
if pd.isna(x[i][j]):
x[i][j] = 0
return x
def get_target(y, normal_class):
"""
input: pandas series. last column of dataframe.
This function converts the byte string of series and compare to each classification group
Each class is represented as a number.
output: returns numpy array of numbers and index array
"""
y_new = []
y_temp = []
idx = []
length = len(y)
for i in range(0, length):
if y[i].decode('UTF-8') == '1':
y_temp.append(0)
elif y[i].decode('UTF-8') == '2':
y_temp.append(1)
elif y[i].decode('UTF-8') == '3':
y_temp.append(2)
elif y[i].decode('UTF-8') == '4':
y_temp.append(3)
elif y[i].decode('UTF-8') == '5':
y_temp.append(4)
elif y[i].decode('UTF-8') == '6':
y_temp.append(5)
elif y[i].decode('UTF-8') == '7':
y_temp.append(6)
elif y[i].decode('UTF-8') == '8':
y_temp.append(7)
elif y[i].decode('UTF-8') == '9':
y_temp.append(8)
elif y[i].decode('UTF-8') == '10':
y_temp.append(9)
idx.append(i)
for i in range(0, length):
if y_temp[i] == normal_class:
y_new.append(1) # normal
else:
y_new.append(0) # anomaly
return np.array(y_new), | np.array(idx) | numpy.array |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.functional as F
from mindspore import Tensor
from mindspore.common.initializer import TruncatedNormal
from mindspore.communication.management import init
from mindspore.nn.loss.loss import _Loss
from mindspore.nn.optim.momentum import Momentum
from mindspore.ops import operations as P
from mindspore.parallel import set_algo_parameters
from mindspore.train.callback import Callback
from mindspore.train.model import Model, ParallelMode
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(device_id=int(os.getenv('DEVICE_ID')))
init()
context.set_auto_parallel_context(mirror_mean=True, parallel_mode=ParallelMode.AUTO_PARALLEL)
| np.random.seed(10) | numpy.random.seed |
import os, time, sys, io
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tight_layout as tlt
from iminuit import Minuit, describe
from datetime import datetime
from obspy.signal.detrend import polynomial
import bead_util as bu
import peakdetect as pdet
import dill as pickle
import scipy.optimize as opti
import scipy.signal as signal
import scipy.constants as constants
from tqdm import tqdm
from joblib import Parallel, delayed
n_core = 20
plt.rcParams.update({'font.size': 14})
date = '20200322'
date = '20200924'
# fig_base = '/home/cblakemore/plots/20190626/'
savefig = True
fig_base = '/home/cblakemore/plots/{:s}/spinning/'.format(date)
#fig_base = '/home/cblakemore/plots/spinsim/'
suffix = ''
# suffix = '_less-yrange'
#suffix = '_3_5e-6mbar_110kHz_real-noise'
#dirname = '/data/old_trap_processed/spinning/ringdown/20190626/'
dirname = '/data/old_trap_processed/spinning/ringdown/{:s}/'.format(date)
#dirname = '/data/old_trap_processed/spinning/ringdown_manual/{:s}/'.format(date)
paths, lengths = bu.find_all_fnames(dirname, ext='.p')
newpaths = paths
# # for 20190626:
# newpaths = [paths[1], paths[2]]
# labels = ['Initial', 'Later']
# mbead = 85.0e-15 # convert picograms to kg
# mbead_err = 1.6e-15
priors = False
manual_priors = False
fix_fterm = False
fit_end_time = 3000.0
exp_fit_end_time = 3000.0
two_point_end_time = 3000.0
tau_ylim = (1100, 1400)
# tau_ylim = (1850,2050)
both_two_point = False
err_adjust = 5.0
# newpaths = [#dirname + '100kHz_start_4_all.p', \
# #dirname + '100kHz_start_5_all.p', \
# #dirname + '100kHz_start_6_all.p', \
# #dirname + '100kHz_start_7_all.p', \
# #dirname + '100kHz_start_8_all.p', \
# #dirname + '100kHz_start_9_all.p', \
# #dirname + '100kHz_start_10_all.p', \
# #dirname + '100kHz_start_11_all.p', \
# #dirname + '100kHz_start_12_all.p', \
# #dirname + '100kHz_start_13_all.p', \
# #dirname + '100kHz_start_14_all.p', \
# #dirname + '50kHz_start_1_all.p', \
# #dirname + '50kHz_start_2_all.p', \
# #dirname + '50kHz_start_3_all.p', \
# #dirname + '110kHz_start_1_all.p', \
# #dirname + '110kHz_start_2_all.p', \
# #dirname + '110kHz_start_3_all.p', \
# #dirname + '110kHz_start_4_all.p', \
# #dirname + '110kHz_start_5_all.p', \
# #dirname + '110kHz_start_6_all.p', \
# dirname + '110kHz_start_2_coarse_all.p', \
# dirname + '110kHz_start_3_coarse_all.p', \
# dirname + '110kHz_start_5_coarse_all.p', \
# dirname + '110kHz_start_6_coarse_all.p', \
# ]
newpaths = [\
# os.path.join(dirname, '110kHz_start_1_all.p'), \
os.path.join(dirname, '110kHz_start_2_all.p'), \
os.path.join(dirname, '110kHz_start_3_all.p'), \
]
sim_data = False
sim_path = '/data/old_trap_processed/spinsim_data/spindowns_processed/sim_110kHz_real-noise/'
sim_fig_base = '/home/cblakemore/plots/spinsim/'
sim_suffix = '_3_5e-6mbar_110kHz_real-noise'
paths, lengths = bu.find_all_fnames(sim_path, ext='.p')
sim_prior_data = [0.0, 1]
if sim_data:
newpaths = paths[:50]
labels = []
for pathind, path in enumerate(newpaths):
labels.append('Meas. {:d}'.format(pathind))
def gauss(x, A, mu, sigma, c):
return A * np.exp( -1.0 * (x - mu)**2 / (2.8 * sigma**2)) + c
def ngauss(x, A, mu, sigma, c, n=2):
return A * np.exp(-1.0*np.abs(x-mu)**n / (2.0*sigma**n)) + c
def fit_fun(x, A, mu, sigma):
return ngauss(x, A, mu, sigma, 0, n=5)
#if manual_priors:
# fterm_dirname = '/data/old_trap_processed/spinning/ringdown/20191017/'
fterm_dirname = '/data/old_trap_processed/spinning/ringdown/20200322/'
fterm_paths = [fterm_dirname + 'term_velocity_check_1.npy', \
fterm_dirname + 'term_velocity_check_2.npy', \
#fterm_dirname + 'term_velocity_check_3.npy', \
# fterm_dirname + 'term_velocity_check_4.npy', \
# fterm_dirname + 'term_velocity_check_5.npy', \
# fterm_dirname + 'term_velocity_check_6.npy', \
# fterm_dirname + 'term_velocity_check_7.npy', \
]
all_fterm = []
for pathind, path in enumerate(fterm_paths):
data = np.load(open(path, 'rb'))
#plt.plot(data[1])
#plt.show()
all_fterm += list(data[1])
all_fterm = np.array(all_fterm)
fig_term, ax_term = plt.subplots(1,1,dpi=200)
vals, bin_edge, _ = ax_term.hist(all_fterm, density=True)
bins = bin_edge[:-1] + 0.5*(bin_edge[1] - bin_edge[0])
prior_popt, prior_pcov = opti.curve_fit(fit_fun, bins, vals, maxfev=10000,\
p0=[1, np.mean(all_fterm), np.std(all_fterm)])
plot_x = np.linspace( | np.mean(all_fterm) | numpy.mean |
import os
import pickle
from collections import defaultdict
import numpy as np
from utils import experiments
from grid_envs import GridCore
def make_epsilon_greedy_policy(Q: defaultdict, epsilon: float, nA: int) -> callable:
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
I.e. create weight vector from which actions get sampled.
:param Q: tabular state-action lookup function
:param epsilon: exploration factor
:param nA: size of action space to consider for this policy
"""
def policy_fn(observation):
policy = np.ones(nA) * epsilon / nA
best_action = np.random.choice(np.flatnonzero( # random choice for tie-breaking only
Q[observation] == Q[observation].max()
))
policy[best_action] += (1 - epsilon)
return policy
return policy_fn
def get_decay_schedule(start_val: float, decay_start: int, num_steps: int, type_: str):
"""
Create epsilon decay schedule
:param start_val: Start decay from this value (i.e. 1)
:param decay_start: number of iterations to start epsilon decay after
:param num_steps: Total number of steps to decay over
:param type_: Which strategy to use. Implemented choices: 'const', 'log', 'linear'
:return:
"""
if type_ == 'const':
return np.array([start_val for _ in range(num_steps)])
elif type_ == 'log':
return np.hstack([[start_val for _ in range(decay_start)],
np.logspace(np.log10(start_val), np.log10(0.000001), (num_steps - decay_start))])
elif type_ == 'linear':
return np.hstack([[start_val for _ in range(decay_start)],
np.linspace(start_val, 0, (num_steps - decay_start), endpoint=True)])
else:
raise NotImplementedError
def td_update(q: defaultdict, state: int, action: int, reward: float, next_state: int, gamma: float, alpha: float):
""" Simple TD update rule """
# TD update
best_next_action = np.random.choice(np.flatnonzero(q[next_state] == q[next_state].max())) # greedy best next
td_target = reward + gamma * q[next_state][best_next_action]
td_delta = td_target - q[state][action]
return q[state][action] + alpha * td_delta
def q_learning(
environment: GridCore,
num_episodes: int,
discount_factor: float = 1.0,
alpha: float = 0.5,
epsilon: float = 0.1,
epsilon_decay: str = 'const',
decay_starts: int = 0,
eval_every: int = 10,
render_eval: bool = True):
"""
Vanilla tabular Q-learning algorithm
:param environment: which environment to use
:param num_episodes: number of episodes to train
:param discount_factor: discount factor used in TD updates
:param alpha: learning rate used in TD updates
:param epsilon: exploration fraction (either constant or starting value for schedule)
:param epsilon_decay: determine type of exploration (constant, linear/exponential decay schedule)
:param decay_starts: After how many episodes epsilon decay starts
:param eval_every: Number of episodes between evaluations
:param render_eval: Flag to activate/deactivate rendering of evaluation runs
:return: training and evaluation statistics (i.e. rewards and episode lengths)
"""
assert 0 <= discount_factor <= 1, 'Lambda should be in [0, 1]'
assert 0 <= epsilon <= 1, 'epsilon has to be in [0, 1]'
assert alpha > 0, 'Learning rate has to be positive'
# The action-value function.
# Nested dict that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(environment.action_space.n))
# Keeps track of episode lengths and rewards
rewards = []
lens = []
test_rewards = []
test_lens = []
train_steps_list = []
test_steps_list = []
epsilon_schedule = get_decay_schedule(epsilon, decay_starts, num_episodes, epsilon_decay)
for i_episode in range(num_episodes + 1):
# print('#' * 100)
epsilon = epsilon_schedule[min(i_episode, num_episodes - 1)]
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, environment.action_space.n)
policy_state = environment.reset()
episode_length, cummulative_reward = 0, 0
while True: # roll out episode
policy_action = np.random.choice(list(range(environment.action_space.n)), p=policy(policy_state))
s_, policy_reward, policy_done, _ = environment.step(policy_action)
cummulative_reward += policy_reward
episode_length += 1
Q[policy_state][policy_action] = td_update(Q, policy_state, policy_action,
policy_reward, s_, discount_factor, alpha)
if policy_done:
break
policy_state = s_
rewards.append(cummulative_reward)
lens.append(episode_length)
train_steps_list.append(environment.total_steps)
# evaluation with greedy policy
test_steps = 0
if i_episode % eval_every == 0:
policy_state = environment.reset()
episode_length, cummulative_reward = 0, 0
if render_eval:
environment.render()
while True: # roll out episode
policy_action = np.random.choice(np.flatnonzero(Q[policy_state] == Q[policy_state].max()))
environment.total_steps -= 1 # don't count evaluation steps
s_, policy_reward, policy_done, _ = environment.step(policy_action)
test_steps += 1
if render_eval:
environment.render()
s_ = s_
cummulative_reward += policy_reward
episode_length += 1
if policy_done:
break
policy_state = s_
test_rewards.append(cummulative_reward)
test_lens.append(episode_length)
test_steps_list.append(test_steps)
print('Done %4d/%4d episodes' % (i_episode, num_episodes))
return (rewards, lens), (test_rewards, test_lens), (train_steps_list, test_steps_list)
class SkipTransition:
"""
Simple helper class to keep track of all transitions observed when skipping through an MDP
"""
def __init__(self, skips, df):
self.state_mat = np.full((skips, skips), -1, dtype=int) # might need to change type for other envs
self.reward_mat = np.full((skips, skips), np.nan, dtype=float)
self.idx = 0
self.df = df
def add(self, reward, next_state):
"""
Add reward and next_state to triangular matrix
:param reward: received reward
:param next_state: state reached
"""
self.idx += 1
for i in range(self.idx):
self.state_mat[self.idx - i - 1, i] = next_state
# Automatically discount rewards when adding to corresponding skip
self.reward_mat[self.idx - i - 1, i] = reward * self.df ** i + | np.nansum(self.reward_mat[self.idx - i - 1]) | numpy.nansum |
# -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import check_random_state
from ..utils import check_X_y
from ..utils._joblib import Parallel, delayed, effective_n_jobs
from ..exceptions import ConvergenceWarning
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : array, shape = [n_features]
Current start vector.
Returns
-------
x_new : array, shape = [n_features]
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
<NAME> and <NAME>
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm( | np.sum(diff / diff_norm, axis=0) | numpy.sum |
import cv2
import numpy as np
import IPython
def _cvt_2uint8(array, clip=True):
output = np.clip(array, 0, 255) if clip else array
output = output.astype('uint8')
return output
def jupyter_img_show(img):
_, i_img = cv2.imencode('.png', img)
IPython.display.display(IPython.display.Image(data=i_img))
"""
********************************
Intensity transformation
********************************
"""
def gamma_correction(img, gamma, c=1):
"""
Apply gamma correction on input image.
s = 255 * c * (r / 255) ^ gamma
Args:
img: input image array
gamma: the gamma value of gamma value, which is between 0 and 1.
c: the constant of gamma_correction, which is between 0 and 1.
Returns:
a transformed image array.
"""
trans_img = 255 * (c * (img / 255) ** gamma)
# clip
trans_img = _cvt_2uint8(trans_img)
return trans_img
def histogram_equalization(img) :
"""
Apply histogram equalization on input image.
s = (L - 1) \sum_{j=0}^{k} p_{j}(r_{j})
Args:
img: input image array
Returns:
a transformed image array.
"""
# tuning dimension
is_grayscale = len(img.shape) == 2
img_3dim = np.expand_dims(img, axis=2) if is_grayscale else img
# count
count = np.zeros((256, img_3dim.shape[2]), dtype='int32')
for c in range(img_3dim.shape[2]):
for x in range(img_3dim.shape[0]):
for y in range(img_3dim.shape[1]):
count[img_3dim[x][y][c]][c] += 1
# Build lookup table
lookup_table = _cvt_2uint8(255 * np.cumsum(count, axis=0) / (img_3dim.shape[0] * img_3dim.shape[1]), False)
# apply transform
trans_img_3dim = np.zeros(img_3dim.shape, dtype='float32')
for x in range(img_3dim.shape[0]):
for y in range(img_3dim.shape[1]):
for c in range(img_3dim.shape[2]):
trans_img_3dim[x][y][c] = lookup_table[img_3dim[x][y][c]][c]
# tuning dimension
trans_img = np.squeeze(trans_img_3dim, axis=2) if is_grayscale else trans_img_3dim
# clip
trans_img = _cvt_2uint8(trans_img)
return trans_img
def piecewise_linear_transformation(img, funcs, break_points):
"""
Apply piecewise linear transformation on input image.
The following conditions should be satisfied
1. each function is an increasing linear function
2. len(funcs) - len(break_points) = 1
3. for each element b in break_points, 0 < b < 255
4. 2 neighbor function must have same value at their common break point.
Args:
img: input image array
funcs: a list of functions those are used on transformation
break_points: a list of break point.
Returns:
a transformed image array.
"""
def binary_search(array, target):
start = 0
end = len(array)
while end - start > 1:
mid = (start + end) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid
else:
start = mid
return start
# tuning dimension
is_grayscale = len(img.shape) == 2
img_3dim = np.expand_dims(img, axis=2) if is_grayscale else img
# apply transformation
trans_img_3dim = np.zeros(img_3dim.shape, dtype='float32')
for x in range(trans_img_3dim.shape[0]):
for y in range(trans_img_3dim.shape[1]):
for c in range(trans_img_3dim.shape[2]):
func = funcs[binary_search([0] + break_points, img_3dim[x][y][c])]
trans_img_3dim[x][y][c] = func(img_3dim[x][y][c])
# tuning dimension
trans_img = | np.squeeze(trans_img_3dim, axis=2) | numpy.squeeze |
"""
This module contains routines for modeling cluster and source signals.
"""
import os
import sys
from pixell import enmap
import astropy
import astropy.wcs as enwcs
import astropy.io.fits as pyfits
import astropy.constants as constants
#from astropy.cosmology import FlatLambdaCDM
from astLib import *
from scipy import ndimage
from scipy import interpolate
from scipy import stats
import time
import astropy.table as atpy
import nemo
from . import maps
from . import catalogs
from . import photometry
from . import filters
from . import gnfw
from . import plotSettings
import numpy as np
import numpy.fft as fft
import math
import pylab as plt
import pickle
import operator
import pyximport; pyximport.install()
import nemoCython
import nemo
import glob
import shutil
import yaml
import warnings
#import IPython
np.random.seed()
#------------------------------------------------------------------------------------------------------------
# Global constants (we could move others here but then need to give chunky obvious names, not just e.g. h)
TCMB=2.72548
Mpc_in_cm=constants.pc.value*100*1e6
MSun_in_g=constants.M_sun.value*1000
# Default cosmology (e.g., for fitQ)
#fiducialCosmoModel=FlatLambdaCDM(H0 = 70.0, Om0 = 0.3, Ob0 = 0.05, Tcmb0 = TCMB)
# Default cosmology (e.g., for fitQ) - now based on CCL rather than astropy
Om0=0.3
Ob0=0.05
H0=70
sigma8=0.8
ns=0.95
transferFunction="boltzmann_camb"
on_rtd=os.environ.get('READTHEDOCS', None)
if on_rtd is None:
import pyccl as ccl
fiducialCosmoModel=ccl.Cosmology(Omega_c=Om0-Ob0, Omega_b=Ob0, h=0.01*H0, sigma8=sigma8, n_s=ns,
transfer_function=transferFunction)
# For CCL-based mass conversions
M200mDef=ccl.halos.MassDef(200, "matter", c_m_relation = 'Bhattacharya13')
M200cDef=ccl.halos.MassDef(200, "critical", c_m_relation = 'Bhattacharya13')
M500cDef=ccl.halos.MassDef(500, "critical")
else:
fiducialCosmoModel=None
M200mDef=None
M200cDef=None
M500cDef=None
#------------------------------------------------------------------------------------------------------------
class BeamProfile(object):
"""Describes the beam profile (i.e., the point spread function for some instrument in real space). This
can be either read from a white-space delimited text file (with the angle in degrees in the first column
and the response in the second column), or can be set directly using arrays.
Args:
beamFileName(:obj:`str`, optional): Path to text file containing a beam profile in the ACT format.
profile1d (:obj:`np.ndarray`, optional): One dimensional beam profile, with index 0 at the centre.
rDeg (:obj:`np.ndarray`, optional): Corresponding angular distance in degrees from the centre for
the beam profile.
Attributes:
profile1d (:obj:`np.ndarray`): One dimensional beam profile, with index 0 at the centre.
rDeg (:obj:`np.ndarray`): Corresponding angular distance in degrees from the centre for the
beam profile.
tck (:obj:`tuple`): Spline knots for interpolating the beam onto different angular binning
(in degrees), for use with :meth:`scipy.interpolate.splev`.
FWHMArcmin (float): Estimate of the beam FWHM in arcmin.
"""
def __init__(self, beamFileName = None, profile1d = None, rDeg = None):
if beamFileName is not None:
beamData=np.loadtxt(beamFileName).transpose()
self.profile1d=beamData[1]
self.rDeg=beamData[0]
else:
self.profile1d=profile1d
self.rDeg=rDeg
if self.profile1d is not None and self.rDeg is not None:
self.tck=interpolate.splrep(self.rDeg, self.profile1d)
# This is really just for sorting a list of beams by resolution
self.FWHMArcmin=self.rDeg[np.argmin(abs(self.profile1d-0.5))]*60*2
#------------------------------------------------------------------------------------------------------------
class QFit(object):
"""A class for managing the filter mismatch function, referred to as `Q` in the ACT papers from
`Hasselfield et al. (2013) <http://adsabs.harvard.edu/abs/2013JCAP...07..008H>`_ onwards.
Args:
QFitFileName (:obj:`str`): Path to a FITS-table format file as made by :meth:`fitQ`.
tileNames (:obj:`list`): If given, the Q-function will be defined only for these tiles (their names
must appear in the file specified by `QFitFileName`).
Attributes:
fitDict (:obj:`dict`): Dictionary of interpolation objects, indexed by `tileName`. You should not
need to access this directly - use :meth:`getQ` instead.
"""
def __init__(self, QFitFileName = None, tileNames = None):
self._zGrid=np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1.0, 1.2, 1.6, 2.0])
self._theta500ArcminGrid=np.logspace(np.log10(0.1), np.log10(55), 10)
self.zMin=(self._zGrid).min()
self.zMax=(self._zGrid).max()
self.zDependent=None
self.zDepThetaMax=None
self.fitDict={}
if QFitFileName is not None:
self.loadQ(QFitFileName, tileNames = tileNames)
def loadQ(self, source, tileNames = None):
"""Load the filter mismatch function Q (see `Hasselfield et al. 2013
<https://ui.adsabs.harvard.edu/abs/2013JCAP...07..008H/abstract>`_) as a dictionary of spline fits.
Args:
source (:obj:`nemo.startUp.NemoConfig` or str): Either the path to a .fits table (containing Q fits
for all tiles - this is normally ``selFn/QFit.fits``), or a :obj:`nemo.startUp.NemoConfig` object
(from which the path and tiles to use will be inferred).
tileNames (optional, list): A list of tiles for which the Q function spline fit coefficients
will be extracted. If source is a :obj:`nemo.startUp.NemoConfig` object, this should be set to
``None``.
Returns:
A dictionary (with tilNames as keys), containing spline knots for the Q function for each tile.
Q values can then be obtained by using these with :func:`scipy.interpolate.splev`.
"""
# Bit messy, but two modes here:
# - combined Q fit file for all tiles
# - single Q fit for a single tile (interim stage, when under nemo MPI run)
if type(source) == nemo.startUp.NemoConfig:
tileNames=source.tileNames
combinedQTabFileName=source.selFnDir+os.path.sep+"QFit.fits"
loadMode=None
if os.path.exists(combinedQTabFileName) == True:
tileNamesInFile=[]
with pyfits.open(combinedQTabFileName) as QTabFile:
for ext in QTabFile:
if type(ext) == astropy.io.fits.hdu.table.BinTableHDU:
tileNamesInFile.append(ext.name)
tileNamesInFile.sort()
if tileNames is None:
tileNames=tileNamesInFile
loadMode="combined"
else:
globStr=source.selFnDir+os.path.sep+"QFit#*.fits"
QTabFileNames=glob.glob(globStr)
loadMode="single"
if len(QTabFileNames) == 0:
raise Exception("could not find either '%s' or '%s' - needed to make QFit object" % (combinedQTabFileName, globStr))
zMin=self._zGrid.max()
zMax=self._zGrid.min()
for tileName in tileNames:
if loadMode == "combined":
QTab=atpy.Table().read(combinedQTabFileName, hdu = tileName)
elif loadMode == "single":
QTab=atpy.Table().read(source.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName))
else:
raise Exception("loadMode is not defined")
if QTab['z'].min() < zMin:
self.zMin=QTab['z'].min()
if QTab['z'].max() > zMax:
self.zMax=QTab['z'].max()
self.fitDict[tileName]=self._makeInterpolator(QTab)
elif os.path.exists(source) == True:
# Inspect file and get tile names if MEF
if tileNames is None:
tileNames=[]
with pyfits.open(source) as QTab:
for ext in QTab:
if type(ext) == astropy.io.fits.hdu.table.BinTableHDU:
tileNames.append(ext.name)
zMin=self._zGrid.max()
zMax=self._zGrid.min()
for tileName in tileNames:
if tileName == '': # Individual, interim file name
assert(source.find("QFit#") > 0)
tileName=os.path.split(source)[-1].split("QFit#")[-1].split(".fits")[0]
QTab=atpy.Table().read(source)
else:
QTab=atpy.Table().read(source, hdu = tileName)
if QTab['z'].min() < zMin:
self.zMin=QTab['z'].min()
if QTab['z'].max() > zMax:
self.zMax=QTab['z'].max()
self.fitDict[tileName]=self._makeInterpolator(QTab)
def _makeInterpolator(self, QTab):
"""Inspects QTab, and makes an interpolator object - 2d if there is z-dependence, 1d if not.
"""
if QTab.meta['ZDEPQ'] == 0:
QTab.sort('theta500Arcmin')
spline=interpolate.InterpolatedUnivariateSpline(QTab['theta500Arcmin'], QTab['Q'], ext = 1)
if self.zDependent == True:
raise Exception("QFit contains a mixture of z-dependent and z-independent tables")
self.zDepThetaMax=None
self.zDependent=False
elif QTab.meta['ZDEPQ'] == 1:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
spline=interpolate.LSQBivariateSpline(QTab['z'], QTab['theta500Arcmin'], QTab['Q'],
self._zGrid, self._theta500ArcminGrid)
zs=np.unique(QTab['z'])
thetaMaxs=[]
for z in zs:
thetaMaxs.append(QTab['theta500Arcmin'][QTab['z'] == z].max())
self.zDepThetaMax=interpolate.InterpolatedUnivariateSpline(zs, thetaMaxs)
if self.zDependent == False:
raise Exception("QFit contains a mixture of z-dependent and z-independent tables")
self.zDependent=True
else:
raise Exception("Valid ZDEPQ values are 0 or 1 only")
return spline
def getQ(self, theta500Arcmin, z = None, tileName = None):
"""Return the value of Q (the filter mismatch function) using interpolation.
Args:
theta500Arcmin (:obj:`float` or :obj:`np.ndarray`): The angular scale at which *Q* will
be calculated. This can be an array or a single value.
z (:obj:`float`, optional): Redshift, only used if *Q* is a function of
redshift, otherwise it is ignored. This must be a single value only,
i.e., not an array.
tileName (:obj:`str`, optional): The name of the tile to use for the *Q* function.
Returns:
The value of *Q* (an array or a single float, depending on the input).
Note:
In the case where *Q* is a function of redshift, values outside of the range for which
*Q* has been calculated will be filled with zeros (i.e., there is no extrapolation in
redshift).
"""
if z is not None:
if type(z) == np.ndarray and z.shape == (1,):
z=float(z)
if type(z) is not float and type(z) is not np.float64:
raise Exception("z must be a float, and not, e.g., an array")
if self.zDependent == True:
Qs=self.fitDict[tileName](z, theta500Arcmin)[0]
thetaMask=theta500Arcmin > self.zDepThetaMax(z)
Qs[thetaMask]=0.0
if z < self.zMin or z > self.zMax:
Qs=0
else:
# Univariate case handles own valid bounds checking
Qs=self.fitDict[tileName](theta500Arcmin)
return Qs
#------------------------------------------------------------------------------------------------------------
def fSZ(obsFrequencyGHz, TCMBAlpha = 0.0, z = None):
"""Returns the frequency dependence of the (non-relativistic) Sunyaev-Zel'dovich effect.
Args:
obsFrequencyGHz (float): Frequency in GHz at which to calculate fSZ.
TCMBAlpha (float, optional): This should always be zero unless you really do want to make a model
where CMB temperature evolves T0*(1+z)^{1-TCMBAlpha}.
z (float, optional): Redshift - needed only if TCMBAlpha is non-zero.
Returns:
Value of SZ spectral shape at given frequency (neglecting relativistic corrections).
"""
h=constants.h.value
kB=constants.k_B.value
sigmaT=constants.sigma_T.value
me=constants.m_e.value
c=constants.c.value
x=(h*obsFrequencyGHz*1e9)/(kB*TCMB)
if TCMBAlpha != 0 and z is not None:
assert(z >= 0)
x=x*np.power(1+z, TCMBAlpha)
fSZ=x*((np.exp(x)+1)/(np.exp(x)-1))-4.0
return fSZ
#------------------------------------------------------------------------------------------------------------
def calcRDeltaMpc(z, MDelta, cosmoModel, delta = 500, wrt = 'critical'):
"""Calculate RDelta (e.g., R500c, R200m etc.) in Mpc, for a halo with the given mass and redshift.
Args:
z (float): Redshift.
MDelta (float): Halo mass in units of solar masses, using the definition set by `delta` and `wrt`.
cosmoModel (:obj:`pyccl.Cosmology`): Cosmology object.
delta (float, optional): Overdensity (e.g., typically 500 or 200).
wrt (str, optional): Use 'critical' or 'mean' to set the definition of density with respect to the
critical density or mean density at the given redshift.
Returns:
RDelta (in Mpc)
"""
if type(MDelta) == str:
raise Exception("MDelta is a string - use, e.g., 1.0e+14 (not 1e14 or 1e+14)")
Ez=ccl.h_over_h0(cosmoModel, 1/(1+z))
if wrt == 'critical':
wrtDensity=ccl.physical_constants.RHO_CRITICAL*(Ez*cosmoModel['h'])**2
elif wrt == 'mean':
wrtDensity=ccl.omega_x(cosmoModel, 1/(1+z), 'matter')
#wrtDensity=cosmoModel.Om(z)*cosmoModel.critical_density(z).value
else:
raise Exception("wrt should be either 'critical' or 'mean'")
#wrtDensity=(wrtDensity*np.power(Mpc_in_cm, 3))/MSun_in_g # NOTE: not needed for CCL units (MSun, Mpc etc.)
RDeltaMpc=np.power((3*MDelta)/(4*np.pi*delta*wrtDensity), 1.0/3.0)
return RDeltaMpc
#------------------------------------------------------------------------------------------------------------
def calcR500Mpc(z, M500c, cosmoModel):
"""Calculate R500 (in Mpc), with respect to critical density.
Args:
z (float): Redshift.
M500c (float): Mass within R500c (i.e., with respect to critical density) in units of solar masses.
cosmoModel (`:obj:`pyccl.Cosmology`): Cosmology object.
Returns:
R500c (in Mpc)
"""
R500Mpc=calcRDeltaMpc(z, M500c, cosmoModel, delta = 500, wrt = 'critical')
return R500Mpc
#------------------------------------------------------------------------------------------------------------
def calcTheta500Arcmin(z, M500, cosmoModel):
"""Given `z`, `M500` (in MSun), returns the angular size equivalent to R:sub:`500c`, with respect to the
critical density.
Args:
z (float): Redshift.
M500 (float): Mass within R500c (i.e., with respect to critical density) in units of solar masses.
cosmoModel (`:obj:`pyccl.Cosmology`): Cosmology object.
Returns:
theta500c (in arcmin)
"""
R500Mpc=calcR500Mpc(z, M500, cosmoModel)
#theta500Arcmin=np.degrees(np.arctan(R500Mpc/cosmoModel.angular_diameter_distance(z).value))*60.0
theta500Arcmin=np.degrees(np.arctan(R500Mpc/ccl.angular_diameter_distance(cosmoModel, 1/(1+z))))*60.0
return theta500Arcmin
#------------------------------------------------------------------------------------------------------------
def makeArnaudModelProfile(z, M500, GNFWParams = 'default', cosmoModel = None):
"""Given z, M500 (in MSun), returns dictionary containing Arnaud model profile (well, knots from spline
fit, 'tckP' - assumes you want to interpolate onto an array with units of degrees) and parameters
(particularly 'y0', 'theta500Arcmin').
Use GNFWParams to specify a different shape. If GNFWParams = 'default', then the default parameters as listed
in gnfw.py are used, i.e.,
GNFWParams = {'P0': 8.403, 'c500': 1.177, 'gamma': 0.3081, 'alpha': 1.0510, 'beta': 5.4905, 'tol': 1e-7,
'npts': 100}
Otherwise, give a dictionary that specifies the wanted values. This would usually be specified as
GNFWParams in the filter params in the nemo .par file (see the example .par files).
If cosmoModel is None, use default (Om0, Ol0, H0) = (0.3, 0.7, 70 km/s/Mpc) cosmology.
Used by ArnaudModelFilter
"""
if cosmoModel is None:
cosmoModel=fiducialCosmoModel
if GNFWParams == 'default':
GNFWParams=gnfw._default_params
# Adjust tol for speed vs. range of b covered
bRange=np.linspace(0, 30, 1000)
cylPProfile=[]
tol=1e-6
for i in range(len(bRange)):
b=bRange[i]
cylPProfile.append(gnfw.integrated(b, params = GNFWParams))
if i > 0 and abs(cylPProfile[i] - cylPProfile[i-1]) < tol:
break
cylPProfile=np.array(cylPProfile)
bRange=bRange[:i+1]
# Normalise to 1 at centre
cylPProfile=cylPProfile/cylPProfile.max()
# Calculate R500Mpc, theta500Arcmin corresponding to given mass and redshift
theta500Arcmin=calcTheta500Arcmin(z, M500, cosmoModel)
# Map between b and angular coordinates
# NOTE: c500 now taken into account in gnfw.py
thetaDegRange=bRange*(theta500Arcmin/60.)
tckP=interpolate.splrep(thetaDegRange, cylPProfile)
return {'tckP': tckP, 'theta500Arcmin': theta500Arcmin, 'rDeg': thetaDegRange}
#------------------------------------------------------------------------------------------------------------
def makeBattagliaModelProfile(z, M500c, GNFWParams = 'default', cosmoModel = None):
"""Given z, M500 (in MSun), returns dictionary containing Battaglia+2012 model profile (well, knots from
spline fit, 'tckP' - assumes you want to interpolate onto an array with units of degrees) and parameters
(particularly 'y0', 'theta500Arcmin').
Use GNFWParams to specify a different shape. If GNFWParams = 'default', then the default parameters as
listed in Battaglia et al. 2012 are used, i.e., GNFWParams = {'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49,
'c500': 1.408, 'tol': 1e-7, 'npts': 100}. Note that the definitions/sign convention is slightly
different in Battaglia+2012 compared to Arnaud+2010 (we follow the latter).
Otherwise, give a dictionary that specifies the wanted values. This would usually be specified as
GNFWParams in the filter params in the nemo .par file (see the example .par files).
If cosmoModel is None, use default (Om0, Ol0, H0) = (0.3, 0.7, 70 km/s/Mpc) cosmology.
Used by ArnaudModelFilter
"""
if cosmoModel is None:
cosmoModel=fiducialCosmoModel
if GNFWParams == 'default':
# NOTE: These are Table 1 values from Battaglia+2012 for M500c
GNFWParams={'P0': 7.49, 'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49, 'c500': 1.408, 'tol': 1e-7, 'npts': 100}
# Redshift dependence
# (we do P0 here anyway but since we have arbitrary normalization that seems pointless)
# These are all defined for M200c in Battaglia+2012
# Parameters for shape are for M500c in Table 1 of Battaglia+2012
# NOTE: Some transforming between A10 <-> B12 conventions here
P0=GNFWParams['P0']
P0_alpha_m=0.226
P0_alpha_z=-0.957
xc=1/GNFWParams['c500']
xc_alpha_m=-0.0833
xc_alpha_z=0.853
beta=GNFWParams['beta']-0.3
beta_alpha_m=0.0480
beta_alpha_z=0.615
M200c=M500cToMdef(M500c, z, M200cDef, cosmoModel)
P0z=P0*np.power(M200c/1e14, P0_alpha_m)*np.power(1+z, P0_alpha_z)
xcz=xc*np.power(M200c/1e14, xc_alpha_m)*np.power(1+z, xc_alpha_z)
betaz=beta*np.power(M200c/1e14, beta_alpha_m)*np.power(1+z, beta_alpha_z)
# Some more B12 -> A10 notation conversion
GNFWParams['P0']=P0z
GNFWParams['beta']=betaz+0.3
GNFWParams['c500']=1/xcz
GNFWParams['gamma']=0.3
GNFWParams['alpha']=1.0
# Adjust tol for speed vs. range of b covered
bRange=np.linspace(0, 30, 1000)
cylPProfile=[]
tol=1e-6
for i in range(len(bRange)):
b=bRange[i]
cylPProfile.append(gnfw.integrated(b, params = GNFWParams))
if i > 0 and abs(cylPProfile[i] - cylPProfile[i-1]) < tol:
break
cylPProfile=np.array(cylPProfile)
bRange=bRange[:i+1]
# Normalise to 1 at centre
cylPProfile=cylPProfile/cylPProfile.max()
# Calculate R500Mpc, theta500Arcmin corresponding to given mass and redshift
theta500Arcmin=calcTheta500Arcmin(z, M500c, cosmoModel)
# Map between b and angular coordinates
# NOTE: c500 now taken into account in gnfw.py
thetaDegRange=bRange*(theta500Arcmin/60.)
tckP=interpolate.splrep(thetaDegRange, cylPProfile)
return {'tckP': tckP, 'theta500Arcmin': theta500Arcmin, 'rDeg': thetaDegRange}
#------------------------------------------------------------------------------------------------------------
def makeBeamModelSignalMap(degreesMap, wcs, beam, amplitude = None):
"""Makes a 2d signal only map containing the given beam.
Args:
degreesMap (:obj:`np.ndarray`): Map of angular distance from the object position.
wcs (:obj:`astWCS.WCS`): WCS corresponding to degreesMap.
beam (:obj:`BeamProfile` or str): Either a BeamProfile object, or a string that gives the path to a
text file that describes the beam profile.
amplitude (:obj: float, optional): Specifies the amplitude of the input signal (in map units,
e.g., uK), before beam convolution. This is only needed if this routine is being used to inject
sources into maps. It is not needed for making filter kernels.
Returns:
signalMap (:obj:`np.ndarray`)
Note:
The pixel window function is not applied here; use pixell.enmap.apply_window to do that (see
nemo.filters.filterMaps).
"""
if amplitude is None:
amplitude=1.0
if type(beam) == str:
beam=BeamProfile(beamFileName = beam)
profile1d=amplitude*beam.profile1d
# Turn 1d profile into 2d
r2p=interpolate.interp1d(beam.rDeg, profile1d, bounds_error=False, fill_value=0.0)
signalMap=r2p(degreesMap)
return signalMap
#------------------------------------------------------------------------------------------------------------
def makeArnaudModelSignalMap(z, M500, degreesMap, wcs, beam, GNFWParams = 'default', amplitude = None,
maxSizeDeg = 15.0, convolveWithBeam = True):
"""Makes a 2d signal only map containing an Arnaud model cluster.
Args:
z (float): Redshift; used for setting angular size.
M500 (float): Mass within R500, defined with respect to critical density; units are solar masses.
degreesMap (:obj:`numpy.ndarray`): A 2d array containing radial distance measured in degrees from
the centre of the model to be inserted. The output map will have the same dimensions and pixel
scale (see nemoCython.makeDegreesDistanceMap).
GNFWParams (dict, optional): Used to specify a different profile shape to the default (which follows
Arnaud et al. 2010). If GNFWParams = 'default', then the default parameters as listed in
gnfw.py are used, i.e., GNFWParams = {'gamma': 0.3081, 'alpha': 1.0510, 'beta': 5.4905,
'tol': 1e-7, 'npts': 100}. Otherwise, give a dictionary that specifies the wanted values. This
would usually be specified using the GNFWParams key in the .yml config used when running nemo
(see the examples/ directory).
amplitude (float, optional): Amplitude of the cluster, i.e., the central decrement (in map units,
e.g., uK), or the central Comptonization parameter (dimensionless), before beam convolution.
Not needed for generating filter kernels.
maxSizeDeg (float, optional): Use to limit the region over which the beam convolution is done,
for optimization purposes.
convolveWithBeam (bool, optional): If False, no beam convolution is done (it can be quicker to apply
beam convolution over a whole source-injected map rather than per object).
Returns:
signalMap (:obj:`np.ndarray`).
Note:
The pixel window function is not applied here; use pixell.enmap.apply_window to do that (see
nemo.filters.filterMaps).
"""
# Making the 1d profile itself is the slowest part (~1 sec)
signalDict=makeArnaudModelProfile(z, M500, GNFWParams = GNFWParams)
tckP=signalDict['tckP']
# Make cluster map (unit-normalised profile)
rDeg=np.linspace(0.0, maxSizeDeg, 5000)
profile1d=interpolate.splev(rDeg, tckP, ext = 1)
if amplitude is not None:
profile1d=profile1d*amplitude
r2p=interpolate.interp1d(rDeg, profile1d, bounds_error=False, fill_value=0.0)
signalMap=r2p(degreesMap)
if convolveWithBeam == True:
signalMap=maps.convolveMapWithBeam(signalMap, wcs, beam, maxDistDegrees = maxSizeDeg)
return signalMap
#------------------------------------------------------------------------------------------------------------
def makeBattagliaModelSignalMap(z, M500, degreesMap, wcs, beam, GNFWParams = 'default', amplitude = None,
maxSizeDeg = 15.0, convolveWithBeam = True):
"""Makes a 2d signal only map containing a Battaglia+2012 model cluster (taking into account the redshift
evolution described in Table 1 and equation 11 there).
Args:
z (float): Redshift; used for setting angular size.
M500 (float): Mass within R500, defined with respect to critical density; units are solar masses.
degreesMap (:obj:`numpy.ndarray`): A 2d array containing radial distance measured in degrees from
the centre of the model to be inserted. The output map will have the same dimensions and pixel
scale (see nemoCython.makeDegreesDistanceMap).
GNFWParams (dict, optional): Used to specify a different profile shape to the default (which follows
Battaglia et al. 2012). If GNFWParams = 'default', then the default parameters as listed in
Battaglia et al. 2012 are used, i.e., GNFWParams = {'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49,
'c500': 1.408, 'tol': 1e-7, 'npts': 100}. Note that the definitions/sign convention is slightly
different in Battaglia+2012 compared to Arnaud+2010 (we follow the latter).
Otherwise, give a dictionary that specifies the wanted values. This
would usually be specified using the GNFWParams key in the .yml config used when running nemo
(see the examples/ directory).
amplitude (float, optional): Amplitude of the cluster, i.e., the central decrement (in map units,
e.g., uK), or the central Comptonization parameter (dimensionless), before beam convolution.
Not needed for generating filter kernels.
maxSizeDeg (float, optional): Use to limit the region over which the beam convolution is done,
for optimization purposes.
convolveWithBeam (bool, optional): If False, no beam convolution is done (it can be quicker to apply
beam convolution over a whole source-injected map rather than per object).
Returns:
signalMap (:obj:`np.ndarray`).
Note:
The pixel window function is not applied here; use pixell.enmap.apply_window to do that (see
nemo.filters.filterMaps).
"""
if GNFWParams == 'default':
# NOTE: These are Table 1 values from Battaglia+2012 for M500c
GNFWParams={'P0': 7.49, 'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49, 'c500': 1.408, 'tol': 1e-7, 'npts': 100}
# Making the 1d profile itself is the slowest part (~1 sec)
signalDict=makeBattagliaModelProfile(z, M500, GNFWParams = GNFWParams)
tckP=signalDict['tckP']
# Make cluster map (unit-normalised profile)
rDeg=np.linspace(0.0, maxSizeDeg, 5000)
profile1d=interpolate.splev(rDeg, tckP, ext = 1)
if amplitude is not None:
profile1d=profile1d*amplitude
r2p=interpolate.interp1d(rDeg, profile1d, bounds_error=False, fill_value=0.0)
signalMap=r2p(degreesMap)
if convolveWithBeam == True:
signalMap=maps.convolveMapWithBeam(signalMap, wcs, beam, maxDistDegrees = maxSizeDeg)
return signalMap
#------------------------------------------------------------------------------------------------------------
def getFRelWeights(config):
"""Returns a dictionary of frequency weights used in relativistic corrections for each tile. This is
cached in the selFn/ dir after the first time this routine is called.
"""
if 'photFilter' not in config.parDict.keys() or config.parDict['photFilter'] is None:
return {}
fRelWeightsFileName=config.selFnDir+os.path.sep+"fRelWeights.fits"
if os.path.exists(fRelWeightsFileName) == False:
fRelTab=atpy.Table()
fRelTab.add_column(atpy.Column(config.allTileNames, 'tileName'))
for tileCount in range(len(config.allTileNames)):
tileName=config.allTileNames[tileCount]
filterFileName=config.diagnosticsDir+os.path.sep+tileName+os.path.sep+"filter_%s#%s.fits" % (config.parDict['photFilter'], tileName)
with pyfits.open(filterFileName) as img:
for i in range(1, 10):
if 'RW%d_GHZ' % (i) in img[0].header.keys():
freqGHz=str(img[0].header['RW%d_GHZ' % (i)])
if freqGHz == '':
freqGHz='148.0'
print(">>> WARNING: setting freqGHz = '%s' in getFRelWeights - this is okay if you're running on a TILe-C y-map" % (freqGHz))
if freqGHz not in fRelTab.keys():
fRelTab.add_column(atpy.Column(np.zeros(len(config.allTileNames)), freqGHz))
fRelTab[freqGHz][tileCount]=img[0].header['RW%d' % (i)]
fRelTab.meta['NEMOVER']=nemo.__version__
fRelTab.write(fRelWeightsFileName, overwrite = True)
return loadFRelWeights(fRelWeightsFileName)
#------------------------------------------------------------------------------------------------------------
def loadFRelWeights(fRelWeightsFileName):
"""Returns a dictionary of frequency weights used in relativistic corrections for each tile (stored in
a .fits table, made by getFRelWeights).
"""
fRelTab=atpy.Table().read(fRelWeightsFileName)
fRelWeightsDict={}
for row in fRelTab:
fRelWeightsDict[row['tileName']]={}
for key in fRelTab.keys():
if key != 'tileName':
fRelWeightsDict[row['tileName']][float(key)]=row[key]
return fRelWeightsDict
#------------------------------------------------------------------------------------------------------------
def fitQ(config):
"""Calculates the filter mismatch function *Q* on a grid of scale sizes for each tile in the map. The
results are initially cached (with a separate .fits table for each tile) under the `selFn` directory,
before being combined into a single file at the end of a :ref:`nemoCommand` run.
The `GNFWParams` key in the `config` dictionary can be used to specify a different cluster profile shape.
Args:
config (:obj:`startUp.NemoConfig`): A NemoConfig object.
Note:
See :class:`QFit` for how to read in and use the output of this function.
"""
t0=time.time()
cosmoModel=fiducialCosmoModel
# Spin through the filter kernels
photFilterLabel=config.parDict['photFilter']
filterList=config.parDict['mapFilters']
for f in filterList:
if f['label'] == photFilterLabel:
ref=f
# This could be more general... but A10 model has no z-dependence, B12 model does
# So Q is a function of (theta500, z) for the latter
# We add a header keyword to the QFit.fits table to indicate if z-dependence important or not
# Everything is then handled internally by QFit class
if ref['class'].find("Arnaud") != -1:
makeSignalModelMap=makeArnaudModelSignalMap
zDepQ=0
elif ref['class'].find("Battaglia") != -1:
makeSignalModelMap=makeBattagliaModelSignalMap
zDepQ=1
else:
raise Exception("Signal model for Q calculation should either be 'Arnaud' or 'Battaglia'")
# M, z -> theta ranges for Q calc - what's most efficient depends on whether there is z-dependence, or not
# NOTE: ref filter that sets scale we compare to must ALWAYS come first
if zDepQ == 0:
# To safely (numerically, at least) apply Q at z ~ 0.01, we need to go to theta500 ~ 500 arcmin (< 10 deg)
MRange=[ref['params']['M500MSun']]
zRange=[ref['params']['z']]
minTheta500Arcmin=0.1
maxTheta500Arcmin=500.0
numPoints=50
theta500Arcmin_wanted=np.logspace(np.log10(minTheta500Arcmin), | np.log10(maxTheta500Arcmin) | numpy.log10 |
import matplotlib.pyplot as plt
import numpy as np
'''Setting up reaction scheme
inital conditions, rate constants, and stoichiometry matrices
'''
#defining initial conditions for species in concentration
#TODO:these need to be convert to number of molecules
C2 = [0.015*(6*10**3)]
CP = [0.015*(6*10**3)]
pM = [0.001*(6*10**3)]
M = [0.001*(6*10**3)]
Y = [0.005*(6*10**3)]
YP = [0.005*(6*10**3)]
YT = [Y[0] + YP[0] + pM[0] + M[0]]
CT = [C2[0] + CP[0] + pM[0] + M[0]]
# Rate Constants
k1aaCT = 0.015
k2 = 0
k3CT = 200
k4 = 180 # adjustable
k4prime = 0.018
k5tilP = 0
k6 = 1 # adjustable
k7 = 0.6
k8tilP = 50 # >> k9
k9 = 10 # >> k6
# reactant stoichiometry
# Y,pM, M,YP,C2,CP
V_r = np.array([[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
# product stoichiometry
# Y,pM, M,YP,C2,CP
V_p = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0]])
#stoichiometry of total reaction
V_tot =V_r-V_p
# initial concentration
X0 = np.array([Y[0], pM[0], M[0], YP[0], C2[0], CP[0]])
def react(propensity):
'''Works out which reaction has occurred
'''
rand2 = np.random.rand()
num =0
for i in propensity:
if rand2 <= i:
num = i
break
return propensity.index(num)
# reaction rates
time = [0]
while time[-1] < 1:
# rate of each reaction
k = np.array([k1aaCT, k2, k3CT/(C2[-1] + CP[-1] +pM[-1] + M[-1]),
(k4prime+k4*((M[-1]/CT[-1])**2)), k5tilP, k6, k7, k8tilP, k9])
# current amount of each species evoved in reactions 1 - 9
species= np.array([1, Y[-1], CP[-1], pM[-1], M[-1], M[-1], YP[-1], C2[-1], CP[-1]])
# Calculate The total reaction rate
rates = k*species
Rtot = rates.sum()
# Calculate probability of each reaction occurring
ProbReact = rates/Rtot
# distributed propensities
tot = 0.0
propensity = []
for prob in ProbReact:
tot = tot + prob
propensity.append(tot)
# Generate random numbers r1,r2 uniformly distributed in (0,1)
rand1 = np.random.rand()
# Compute the time until the next reaction takes place.
tau = (1.0/ Rtot)*np.log(float(1.0/rand1))
print('tau: ', tau)
time.append(time[-1] +tau)
print('time: ', time[-1])
#working out which reaction has occured
reaction = react(propensity)
#print('reaction choosen: ', reaction)
# array of current number of molecules
#molc = np.array([Y[-1], pM[-1], M[-1], YP[-1], C2[-1], CP[-1]])
#molc = molc - V_r[reaction] + V_p[reaction]
molc = [Y[-1], pM[-1], M[-1], YP[-1], C2[-1], CP[-1]]
for i in range(0,len(molc),1):
molc[i]= molc[i] + V_tot[reaction][i]
Y.append(molc[0])
pM.append(molc[1])
M .append(molc[2])
YP.append(molc[3])
C2.append(molc[4])
CP.append(molc[5])
plt.figure(figsize=(18,10))
plt.title("MPF - SSA")
plt.xlabel("Time (min)")
plt.ylabel("Species Population")
Ynp = np.asarray(Y)
YPnp = | np.asarray(YP) | numpy.asarray |
"""
Contains class for Voronoi screens
Copyright (c) 2022, SKAO / Science Data Processor
SPDX-License-Identifier: BSD-3-Clause
"""
import os
import lsmtool
import numpy as np
import scipy.interpolate as si
import shapely.geometry
import shapely.ops
from astropy import wcs
from scipy.spatial import Voronoi # pylint: disable=E0611
from shapely.geometry import Point
import ska_sdp_screen_fitting.utils.processing_utils as misc
from ska_sdp_screen_fitting.screen import Screen
from ska_sdp_screen_fitting.utils.h5parm import H5parm
class VoronoiScreen(Screen):
"""
Class for Voronoi screens.
"""
def __init__(
self,
name,
h5parm_filename,
skymodel_filename,
rad,
dec,
width_ra,
width_dec,
solset_name="sol000",
phase_soltab_name="phase000",
amplitude_soltab_name=None,
):
super(VoronoiScreen, self).__init__(
name,
h5parm_filename,
skymodel_filename,
rad,
dec,
width_ra,
width_dec,
solset_name=solset_name,
phase_soltab_name=phase_soltab_name,
amplitude_soltab_name=amplitude_soltab_name,
)
self.data_rasertize_template = None
self.polygons = None
def fit(self):
"""
Fitting is not needed: the input solutions are used directly, after
referencing the phases to a single station
"""
# Open solution tables
h5_file = H5parm(self.input_h5parm_filename)
solset = h5_file.get_solset(self.input_solset_name)
soltab_ph = solset.get_soltab(self.input_phase_soltab_name)
if not self.phase_only:
soltab_amp = solset.get_soltab(self.input_amplitude_soltab_name)
# Input data are [time, freq, ant, dir, pol] for slow amplitudes
# and [time, freq, ant, dir] for fast phases (scalarphase).
# We reference the phases to the station with the least amount of
# flagged solutions, drawn from the first 10 stations
# (to ensure it is fairly central)
self.vals_ph = soltab_ph.val
ref_ind = misc.get_reference_station(soltab_ph, 10)
vals_ph_ref = self.vals_ph[:, :, ref_ind, :].copy()
for i in range(len(soltab_ph.ant)):
# Subtract phases of reference station
self.vals_ph[:, :, i, :] -= vals_ph_ref
self.times_ph = soltab_ph.time
self.freqs_ph = soltab_ph.freq
if not self.phase_only:
self.log_amps = False
self.vals_amp = soltab_amp.val
self.times_amp = soltab_amp.time
self.freqs_amp = soltab_amp.freq
else:
self.vals_amp = np.ones_like(self.vals_ph)
self.times_amp = self.times_ph
self.freqs_amp = self.freqs_ph
self.source_names = soltab_ph.dir
self.source_dict = solset.get_source()
self.source_positions = [
self.source_dict[source] for source in self.source_names
]
self.station_names = soltab_ph.ant
self.station_dict = solset.get_ant()
self.station_positions = [
self.station_dict[station] for station in self.station_names
]
h5_file.close()
def get_memory_usage(self, cellsize_deg):
"""
Returns memory usage per time slot in GB
Parameters
----------
cellsize_deg : float
Size of one pixel in degrees
"""
# Make a test array and find its memory usage
ximsize = int(self.width_ra / cellsize_deg) # pix
yimsize = int(self.width_dec / cellsize_deg) # pix
test_array = np.zeros(
[
1,
len(self.freqs_ph),
len(self.station_names),
4,
yimsize,
ximsize,
]
)
mem_per_timeslot_gb = (
test_array.nbytes / 1024**3 * 10
) # include factor of 10 overhead
return mem_per_timeslot_gb
def make_matrix(
self,
t_start_index,
t_stop_index,
freq_ind,
stat_ind,
cellsize_deg,
out_dir,
_,
):
"""
Makes the matrix of values for the given time, frequency, and station
indices
Parameters
----------
t_start_index : int
Index of first time
t_stop_index : int
Index of last time
t_start_index : int
Index of frequency
t_stop_index : int
Index of station
cellsize_deg : float
Size of one pixel in degrees
out_dir : str
Full path to the output directory
ncpu : int, optional
Number of CPUs to use (0 means all)
"""
# Make the template that converts polynomials to a rasterized 2-D image
# This only needs to be done once
if self.data_rasertize_template is None:
self.make_rasertize_template(cellsize_deg, out_dir)
# Fill the output data array
data = np.zeros(
(
t_stop_index - t_start_index,
4,
self.data_rasertize_template.shape[0],
self.data_rasertize_template.shape[1],
)
)
for _, poly in enumerate(self.polygons):
ind = np.where(self.data_rasertize_template == poly.index + 1)
if not self.phase_only:
val_amp_xx = self.vals_amp[
t_start_index:t_stop_index,
freq_ind,
stat_ind,
poly.index,
0,
]
val_amp_yy = self.vals_amp[
t_start_index:t_stop_index,
freq_ind,
stat_ind,
poly.index,
1,
]
else:
val_amp_xx = self.vals_amp[
t_start_index:t_stop_index, freq_ind, stat_ind, poly.index
]
val_amp_yy = val_amp_xx
val_phase = self.vals_ph[
t_start_index:t_stop_index, freq_ind, stat_ind, poly.index
]
for time in range(t_stop_index - t_start_index):
data[time, 0, ind[0], ind[1]] = val_amp_xx[time] * np.cos(
val_phase[time]
)
data[time, 2, ind[0], ind[1]] = val_amp_yy[time] * np.cos(
val_phase[time]
)
data[time, 1, ind[0], ind[1]] = val_amp_xx[time] * np.sin(
val_phase[time]
)
data[time, 3, ind[0], ind[1]] = val_amp_yy[time] * np.sin(
val_phase[time]
)
return data
def make_rasertize_template(self, cellsize_deg, out_dir):
"""
Makes the template that is used to fill the output FITS cube
Parameters
----------
cellsize_deg : float
Size of one pixel in degrees
out_dir : str
Full path to the output directory
"""
temp_image = os.path.join(out_dir, f"{self.name}_template.fits")
hdu = self.make_fits_file(
temp_image, cellsize_deg, 0, 1, aterm_type="gain"
)
data = hdu[0].data
wcs_obj = wcs.WCS(hdu[0].header)
ra_ind = wcs_obj.axis_type_names.index("RA")
dec_ind = wcs_obj.axis_type_names.index("DEC")
# Get x, y coords for directions in pixels. We use the input
# calibration sky model for this, as the patch positions written to the
# H5parm file by DPPP may be different
skymod = lsmtool.load(self.input_skymodel_filename)
source_dict = skymod.getPatchPositions()
source_positions = []
for source in self.source_names:
radecpos = source_dict[source.strip("[]")]
source_positions.append([radecpos[0].value, radecpos[1].value])
source_positions = np.array(source_positions)
ra_deg = source_positions.T[0]
dec_deg = source_positions.T[1]
xy_coord = []
for ra_vert, dec_vert in zip(ra_deg, dec_deg):
ra_dec = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
ra_dec[0][ra_ind] = ra_vert
ra_dec[0][dec_ind] = dec_vert
xy_coord.append(
(
wcs_obj.wcs_world2pix(ra_dec, 0)[0][ra_ind],
wcs_obj.wcs_world2pix(ra_dec, 0)[0][dec_ind],
)
)
# Get boundary of tessellation region in pixels
bounds_deg = [
self.rad + self.width_ra / 2.0,
self.dec - self.width_dec / 2.0,
self.rad - self.width_ra / 2.0,
self.dec + self.width_dec / 2.0,
]
ra_dec = | np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) | numpy.array |
import time
from collections import defaultdict
from functools import partial
import numpy as np
import tensorflow as tf
from utils import get_segment_ids, get_unique, groupby_2cols_nlargest, groupby_1cols_nlargest, groupby_1cols_merge
class Graph(object):
def __init__(self, graph_triples, n_ents, n_rels, reversed_rel_dct):
self.reversed_rel_dct = reversed_rel_dct
full_edges = np.array(graph_triples.tolist(), dtype='int32').view('<i4,<i4,<i4')
full_edges = np.sort(full_edges, axis=0, order=['f0', 'f1', 'f2']).view('<i4')
# `full_edges`: use all train triples
# full_edges[i] = [id, head, tail, rel] sorted by head, tail, rel with ascending and consecutive `id`s
self.full_edges = np.concatenate([np.expand_dims(np.arange(len(full_edges), dtype='int32'), 1),
full_edges], axis=1)
self.n_full_edges = len(self.full_edges)
self.n_entities = n_ents
self.selfloop = n_rels
self.n_relations = n_rels + 1
# `edges`: for current train batch
# edges[i] = [id, head, tail, rel] sorted by head, tail, rel with ascending but not consecutive `id`s
self.edges = None
self.n_edges = 0
# `memorized_nodes`: for current train batch
self.memorized_nodes = None # (np.array) (eg_idx, v) sorted by ed_idx, v
def make_temp_edges(self, batch, remove_all_head_tail_edges=True):
""" batch: (np.array) (head, tail, rel)
"""
if remove_all_head_tail_edges:
batch_set = set([(h, t) for h, t, r in batch])
edges_idx = [i for i, (eid, h, t, r) in enumerate(self.full_edges)
if (h, t) not in batch_set and (t, h) not in batch_set]
else:
batch_set = set([(h, t, r) for h, t, r in batch])
if self.reversed_rel_dct is None:
edges_idx = [i for i, (eid, h, t, r) in enumerate(self.full_edges)
if (h, t, r) not in batch_set]
else:
edges_idx = [i for i, (eid, h, t, r) in enumerate(self.full_edges)
if (h, t, r) not in batch_set and (t, h, self.reversed_rel_dct.get(r, -1)) not in batch_set]
self.edges = self.full_edges[edges_idx]
self.n_edges = len(self.edges)
def use_full_edges(self):
self.edges = self.full_edges
self.n_edges = len(self.edges)
def get_candidate_edges(self, attended_nodes=None, tc=None):
""" attended_nodes:
(1) None: use all graph edges with batch_size=1
(2) (np.array) n_attended_nodes x 2, (eg_idx, vi) sorted
"""
if tc is not None:
t0 = time.time()
if attended_nodes is None:
candidate_edges = np.concatenate([np.zeros((self.n_edges, 1), dtype='int32'),
self.edges], axis=1) # (0, edge_id, vi, vj, rel) sorted by (0, edge_id)
else:
candidate_idx, new_eg_idx = groupby_1cols_merge(attended_nodes[:, 0], attended_nodes[:, 1],
self.edges[:, 1], self.edges[:, 0])
if len(candidate_idx) == 0:
return np.zeros((0, 5), dtype='int32')
candidate_edges = np.concatenate([np.expand_dims(new_eg_idx, 1),
self.full_edges[candidate_idx]], axis=1) # (eg_idx, edge_id, vi, vj, rel) sorted by (eg_idx, edge_id)
if tc is not None:
tc['candi_e'] += time.time() - t0
# candidate_edges: (np.array) n_candidate_edges x 5, (eg_idx, edge_id, vi, vj, rel)
# sorted by (eg_idx, edge_id) or (eg_idx, vi, vj, rel)
return candidate_edges
def get_sampled_edges(self, candidate_edges, mode=None, max_edges_per_eg=None, max_edges_per_vi=None, tc=None):
""" candidate_edges: (np.array) n_candidate_edges x 5, (eg_idx, edge_id, vi, vj, rel) sorted by (eg_idx, edge_id)
"""
assert mode is not None
if tc is not None:
t0 = time.time()
if len(candidate_edges) == 0:
return np.zeros((0, 6), dtype='int32')
logits = tf.random.uniform((len(candidate_edges),))
if mode == 'by_eg':
assert max_edges_per_eg is not None
sampled_edges = candidate_edges[:, 0] # n_candidate_edges
sampled_idx = groupby_1cols_nlargest(sampled_edges, logits, max_edges_per_eg) # n_sampled_edges
sampled_edges = np.concatenate([candidate_edges[sampled_idx],
np.expand_dims(sampled_idx, 1)], axis=1) # n_sampled_edges x 6
elif mode == 'by_vi':
assert max_edges_per_vi is not None
sampled_edges = candidate_edges[:, [0, 2]] # n_candidate_edges x 2
sampled_idx = groupby_2cols_nlargest(sampled_edges, logits, max_edges_per_vi) # n_sampled_edges
sampled_edges = np.concatenate([candidate_edges[sampled_idx],
np.expand_dims(sampled_idx, 1)], axis=1) # n_sampled_edges x 6
else:
raise ValueError('Invalid `mode`')
if tc is not None:
tc['sampl_e'] += time.time() - t0
# sampled_edges: (np.array) n_sampled_edges x 6, (eg_idx, edge_id, vi, vj, rel, ca_idx)
# sorted by (eg_idx, edge_id)
return sampled_edges
def get_selected_edges(self, sampled_edges, tc=None):
""" sampled_edges: (np.array) n_sampled_edges x 6, (eg_idx, edge_id, vi, vj, rel, ca_idx) sorted by (eg_idx, edge_id)
"""
if tc is not None:
t0 = time.time()
if len(sampled_edges) == 0:
return np.zeros((0, 6), dtype='int32')
idx_vi = get_segment_ids(sampled_edges[:, [0, 2]])
_, idx_vj = np.unique(sampled_edges[:, [0, 3]], axis=0, return_inverse=True)
idx_vi = np.expand_dims(np.array(idx_vi, dtype='int32'), 1)
idx_vj = np.expand_dims(np.array(idx_vj, dtype='int32'), 1)
selected_edges = np.concatenate([sampled_edges[:, [0, 2, 3, 4]], idx_vi, idx_vj], axis=1)
if tc is not None:
tc['sele_e'] += time.time() - t0
# selected_edges: (np.array) n_selected_edges (=n_sampled_edges) x 6, (eg_idx, vi, vj, rel, idx_vi, idx_vj]
# sorted by (eg_idx, vi, vj)
return selected_edges
def set_init_memorized_nodes(self, heads, tc=None):
""" heads: batch_size
"""
if tc is not None:
t0 = time.time()
batch_size = heads.shape[0]
eg_idx = np.array(np.arange(batch_size), dtype='int32')
self.memorized_nodes = np.stack([eg_idx, heads], axis=1)
if tc is not None:
tc['i_memo_v'] += time.time() - t0
# memorized_nodes: n_memorized_nodes (=batch_size) x 2, (eg_idx, v) sorted by (ed_idx, v)
return self.memorized_nodes
def get_topk_nodes(self, node_attention, max_nodes, tc=None):
""" node_attention: (tf.Tensor) batch_size x n_nodes
"""
if tc is not None:
t0 = time.time()
eps = 1e-20
node_attention = node_attention.numpy()
n_nodes = node_attention.shape[1]
max_nodes = min(n_nodes, max_nodes)
sorted_idx = np.argsort(-node_attention, axis=1)[:, :max_nodes]
sorted_idx = np.sort(sorted_idx, axis=1)
node_attention = np.take_along_axis(node_attention, sorted_idx, axis=1) # sorted node attention
mask = node_attention > eps
eg_idx = np.repeat(np.expand_dims(np.arange(mask.shape[0]), 1), mask.shape[1], axis=1)[mask].astype('int32')
vi = sorted_idx[mask].astype('int32')
topk_nodes = np.stack([eg_idx, vi], axis=1)
if tc is not None:
tc['topk_v'] += time.time() - t0
# topk_nodes: (np.array) n_topk_nodes x 2, (eg_idx, vi) sorted
return topk_nodes
def get_selfloop_edges(self, attended_nodes, tc=None):
""" attended_nodes: (np.array) n_attended_nodes x 2, (eg_idx, vi) sorted
"""
if tc is not None:
t0 = time.time()
eg_idx, vi = attended_nodes[:, 0], attended_nodes[:, 1]
selfloop_edges = np.stack([eg_idx, vi, vi, np.repeat(np.array(self.selfloop, dtype='int32'), eg_idx.shape[0])],
axis=1) # (eg_idx, vi, vi, selfloop)
if tc is not None:
tc['sl_bt'] += time.time() - t0
return selfloop_edges # (eg_idx, vi, vi, selfloop)
def get_union_edges(self, scanned_edges, selfloop_edges, tc=None):
""" scanned_edges: (np.array) n_scanned_edges x 6, (eg_idx, vi, vj, rel, idx_vi, idx_vj) sorted by (eg_idx, vi, vj)
selfloop_edges: (np.array) n_selfloop_edges x 4 (eg_idx, vi, vi, selfloop)
"""
if tc is not None:
t0 = time.time()
scanned_edges = np.zeros((0, 4), dtype='int32') if len(scanned_edges) == 0 else scanned_edges[:, :4] # (eg_idx, vi, vj, rel)
all_edges = np.concatenate([scanned_edges, selfloop_edges], axis=0).copy()
sorted_idx = np.squeeze(np.argsort(all_edges.view('<i4,<i4,<i4,<i4'),
order=['f0', 'f1', 'f2'], axis=0), 1).astype('int32')
aug_scanned_edges = all_edges[sorted_idx] # sorted by (eg_idx, vi, vj)
idx_vi = get_segment_ids(aug_scanned_edges[:, [0, 1]])
_, idx_vj = | np.unique(aug_scanned_edges[:, [0, 2]], axis=0, return_inverse=True) | numpy.unique |
import numpy.random
import pandas
import numpy
import vtreat # https://github.com/WinVector/pyvtreat
import vtreat.util
def test_nan_inf():
numpy.random.seed(235)
d = pandas.DataFrame(
{"x": [1.0, numpy.nan, numpy.inf, -numpy.inf, None, 0], "y": [1, 2, 3, 4, 5, 6]}
)
transform = vtreat.NumericOutcomeTreatment(
outcome_name="y",
params=vtreat.vtreat_parameters({"filter_to_recommended": False}),
)
d_treated = transform.fit_transform(d, d["y"])
for c in d_treated.columns:
assert vtreat.util.can_convert_v_to_numeric(d_treated[c])
assert numpy.sum(vtreat.util.is_bad(d_treated[c])) == 0
expect = pandas.DataFrame(
{
"x": [1.0, 0.5, 0.5, 0.5, 0.5, 0],
"x_is_bad": [0, 1, 1, 1, 1, 0],
"y": [1, 2, 3, 4, 5, 6],
}
)
for c in expect.columns:
ec = numpy.asarray(expect[c])
ed = | numpy.asarray(d_treated[c]) | numpy.asarray |
import pdb
import copy
import numpy as np
import os
import scipy
import math
import torch
import torch
from pytorch3d.transforms import euler_angles_to_matrix, matrix_to_euler_angles
def read_total_poses(cam_file_path):
with open(cam_file_path) as f:
lines = f.readlines()
index_list = np.array(list(range(len(lines))))
index_poses = np.where((index_list % 5) == 0)
index = index_list[index_poses]
total_poses = []
for i in index:
pose = np.empty([4, 4]).astype(np.float32)
pose[0, :] = np.array(lines[i + 1].rstrip().split(' ')[:4], dtype=np.float32)
pose[1, :] = np.array(lines[i + 2].rstrip().split(' ')[:4], dtype=np.float32)
pose[2, :] = np.array(lines[i + 3].rstrip().split(' ')[:4], dtype=np.float32)
pose[3, :] = np.array(lines[i + 4].rstrip().split(' ')[:4], dtype=np.float32)
pose_new = pose[:3, :4]
# pose_new = np.linalg.inv(pose)
# pose_new = np.matmul(trans_mat_inv,pose_new)[:3,:4]
total_poses.append(pose_new)
return total_poses
def readCameraRTK_as_np_tanks(cameraPO_file, datasetName):
with open(cameraPO_file) as f:
lines = f.readlines()
cameraRTO = np.empty((3, 4)).astype(np.float64)
cameraRTO[0, :] = np.array(lines[1].rstrip().split(' ')[:4], dtype=np.float64)
cameraRTO[1, :] = np.array(lines[2].rstrip().split(' ')[:4], dtype=np.float64)
cameraRTO[2, :] = np.array(lines[3].rstrip().split(' ')[:4], dtype=np.float64)
cameraKO = np.empty((3, 3)).astype(np.float64)
cameraKO[0, :] = np.array(lines[7].rstrip().split(' ')[:3], dtype=np.float64)
cameraKO[1, :] = np.array(lines[8].rstrip().split(' ')[:3], dtype=np.float64)
cameraKO[2, :] = np.array(lines[9].rstrip().split(' ')[:3], dtype=np.float64)
if datasetName == 'DTU':
cameraPO = np.dot(cameraKO, cameraRTO)
elif datasetName == 'tanks_COLMAP':
cameraPO = np.dot(cameraKO, cameraRTO)
elif datasetName == 'blendedMVS':
cameraPO = np.dot(cameraKO, cameraRTO)
elif datasetName == 'giga_ours':
cameraPO = np.dot(cameraKO, cameraRTO)
return cameraRTO, cameraKO
def readCameraP0_as_np_tanks(cameraPO_file, datasetName, ):
with open(cameraPO_file) as f:
lines = f.readlines()
cameraRTO = np.empty((3, 4)).astype(np.float64)
cameraRTO[0, :] = np.array(lines[1].rstrip().split(' ')[:4], dtype=np.float64)
cameraRTO[1, :] = np.array(lines[2].rstrip().split(' ')[:4], dtype=np.float64)
cameraRTO[2, :] = np.array(lines[3].rstrip().split(' ')[:4], dtype=np.float64)
cameraKO = np.empty((3, 3)).astype(np.float64)
cameraKO[0, :] = np.array(lines[7].rstrip().split(' ')[:3], dtype=np.float64)
cameraKO[1, :] = np.array(lines[8].rstrip().split(' ')[:3], dtype=np.float64)
cameraKO[2, :] = np.array(lines[9].rstrip().split(' ')[:3], dtype=np.float64)
if datasetName == 'DTU':
cameraPO = np.dot(cameraKO, cameraRTO)
elif datasetName == 'tanks_COLMAP':
cameraPO = np.dot(cameraKO, cameraRTO)
elif datasetName == 'blendedMVS':
cameraPO = np.dot(cameraKO, cameraRTO)
elif datasetName == 'giga_ours':
cameraPO = np.dot(cameraKO, cameraRTO)
return cameraPO
def __readCameraPO_as_np_DTU__(cameraPO_file):
"""
only load a camera PO in the file
------------
inputs:
cameraPO_file: the camera pose file of a specific view
outputs:
cameraPO: np.float64 (3,4)
------------
usage:
>>> p = __readCameraPO_as_np_DTU__(cameraPO_file = './test/cameraPO/pos_060.txt')
>>> p # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 1.67373847e+03, -2.15171320e+03, 1.26963515e+03,
...
6.58552305e+02]])
"""
cameraPO = np.loadtxt(cameraPO_file, dtype=np.float64, delimiter=' ')
return cameraPO
def __readCameraPOs_as_np_Middlebury__(cameraPO_file, viewList):
"""
load camera POs of multiple views in one file
------------
inputs:
cameraPO_file: the camera pose file of a specific view
viewList: view list
outputs:
cameraPO: np.float64 (N_views,3,4)
------------
usage:
>>> p = __readCameraPOs_as_np_Middlebury__(cameraPO_file = './test/cameraPO/dinoSR_par.txt', viewList=[3,8])
>>> p # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[[ -1.22933223e+03, 3.08329199e+03, 2.02784015e+02,
...
6.41227584e-01]]])
"""
with open(cameraPO_file) as f:
lines = f.readlines()
cameraPOs = np.empty((len(lines), 3, 4)).astype(np.float64)
for _n, _l in enumerate(lines):
if _n == 0:
continue
_params = np.array(_l.strip().split(' ')[1:], dtype=np.float64)
_K = _params[:9].reshape((3, 3))
_R = _params[9:18].reshape((3, 3))
_t = _params[18:].reshape((3, 1))
cameraPOs[_n] = np.dot(_K, np.c_[_R, _t])
return cameraPOs[viewList]
def readCameraPOs_as_np(
datasetFolder,
datasetName,
poseNamePattern,
# model,
viewList,
model=None,
):
"""
inputs:
datasetFolder: 'x/x/x/middlebury'
datasetName: 'DTU' / 'Middlebury'
#model: 1..128 / 'dinoxx'
viewList: [3,8,21,...]
output:
cameraPOs (N_views,3,4) np.flost64
"""
cameraPOs = np.empty((len(viewList), 3, 4), dtype=np.float64)
cameraRTOs = np.empty((len(viewList), 3, 4), dtype=np.float64)
cameraKOs = np.empty((len(viewList), 3, 3), dtype=np.float64)
if 'Middlebury' in datasetName:
cameraPOs = self.__readCameraPOs_as_np_Middlebury__(
cameraPO_file=os.path.join(datasetFolder, poseNamePattern), viewList=viewList)
elif datasetName == 'tanks':
for _i, _view in enumerate(viewList):
_cameraPO = readCameraP0_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view)).replace(
'@', '{}'.format(_view))))
# _cameraPO = readCameraP0_as_np_tanks(cameraPO_file = datasetFolder+poseNamePattern.replace('#', '{:03}'.format(_view)).replace('@', '{}'.format(_view)))
cameraPOs[_i] = _cameraPO
_cameraRT, _cameraK = readCameraRTK_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view)).replace(
'@', '{}'.format(_view))))
cameraRTOs[_i] = _cameraRT
cameraKOs[_i] = _cameraK
elif datasetName == 'tanks_COLMAP': # zhiwei
for _i, _view in enumerate(viewList):
_cameraPO = readCameraP0_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view))),
datasetName=datasetName)
# _cameraPO = readCameraP0_as_np_tanks(cameraPO_file = datasetFolder+poseNamePattern.replace('#', '{:03}'.format(_view)).replace('@', '{}'.format(_view)))
cameraPOs[_i] = _cameraPO
_cameraRT, _cameraK = readCameraRTK_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view))),
datasetName=datasetName)
cameraRTOs[_i] = _cameraRT
cameraKOs[_i] = _cameraK
elif datasetName == 'blendedMVS': # zhiwei
for _i, _view in enumerate(viewList):
_cameraPO = readCameraP0_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view))),
datasetName=datasetName)
# _cameraPO = readCameraP0_as_np_tanks(cameraPO_file = datasetFolder+poseNamePattern.replace('#', '{:03}'.format(_view)).replace('@', '{}'.format(_view)))
cameraPOs[_i] = _cameraPO
_cameraRT, _cameraK = readCameraRTK_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view))),
datasetName=datasetName)
cameraRTOs[_i] = _cameraRT
cameraKOs[_i] = _cameraK
elif datasetName == 'giga_ours': # zhiwei
for _i, _view in enumerate(viewList):
_cameraPO = readCameraP0_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view))),
datasetName=datasetName)
# _cameraPO = readCameraP0_as_np_tanks(cameraPO_file = datasetFolder+poseNamePattern.replace('#', '{:03}'.format(_view)).replace('@', '{}'.format(_view)))
cameraPOs[_i] = _cameraPO
_cameraRT, _cameraK = readCameraRTK_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view))),
datasetName=datasetName)
cameraRTOs[_i] = _cameraRT
cameraKOs[_i] = _cameraK
else: # cameraPOs are stored in different files
# tran_mat_path = os.path.join(datasetFolder,transMatPattern)
for _i, _view in enumerate(viewList):
# if 'DTU' in datasetName:
# _cameraPO = __readCameraPO_as_np_DTU__(cameraPO_file=os.path.join(datasetFolder,
# poseNamePattern.replace('#',
# '{:03}'.format(
# _view)).replace(
# '@', '{}'.format(_view))))
_cameraPO = readCameraP0_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view - 1)).replace(
'@', '{}'.format(_view - 1))),
datasetName=datasetName)
cameraPOs[_i] = _cameraPO
_cameraRT, _cameraK = readCameraRTK_as_np_tanks(cameraPO_file=os.path.join(datasetFolder,
poseNamePattern.replace('#',
'{:03}'.format(
_view - 1)).replace(
'@',
'{}'.format(_view - 1))),
datasetName=datasetName)
cameraRTOs[_i] = _cameraRT
cameraKOs[_i] = _cameraK
# print('cameraPOs', cameraPOs)
return cameraPOs, cameraRTOs, cameraKOs
def readCameraP0s_np_allModel(datasetFolder,
datasetName,
poseNamePatternModels,
modelList,
viewList,
transMatPattern=None
):
cameraPs = []
cameraP4s = []
cameraRTs = []
cameraKs = []
for i in modelList:
if datasetName == 'tanks':
##########TODO###################
cameraPOs, cameraRTOs, cameraKOs = readCameraPOs_as_np(datasetFolder,
datasetName,
poseNamePattern,
viewList, )
ones = np.repeat(np.array([[[0, 0, 0, 1]]]), repeats=cameraPOs.shape[0], axis=0)
cameraPOs, cameraRTOs, cameraKOs = np.concatenate((cameraPOs, ones), axis=1)
elif datasetName == 'DTU':
cameraPOs, cameraRTOs, cameraKOs = readCameraPOs_as_np(datasetFolder,
datasetName,
poseNamePatternModels,
viewList,
)
ones = np.repeat(np.array([[[0, 0, 0, 1]]]), repeats=cameraPOs.shape[0], axis=0)
cameraP0s = np.concatenate((cameraPOs, ones), axis=1)
elif datasetName == 'tanks_COLMAP': # zhiwei
cameraPOs, cameraRTOs, cameraKOs = readCameraPOs_as_np(datasetFolder,
datasetName,
poseNamePatternModels.replace('$', str(i)),
viewList,
)
ones = np.repeat(np.array([[[0, 0, 0, 1]]]), repeats=cameraPOs.shape[0], axis=0)
cameraP0s = np.concatenate((cameraPOs, ones), axis=1)
elif datasetName == 'blendedMVS': # zhiwei
cameraPOs, cameraRTOs, cameraKOs = readCameraPOs_as_np(datasetFolder,
datasetName,
poseNamePatternModels.replace('$', str(i)),
viewList,
)
ones = np.repeat(np.array([[[0, 0, 0, 1]]]), repeats=cameraPOs.shape[0], axis=0)
cameraP0s = np.concatenate((cameraPOs, ones), axis=1)
elif datasetName == 'giga_ours': # zhiwei
cameraPOs, cameraRTOs, cameraKOs = readCameraPOs_as_np(datasetFolder,
datasetName,
poseNamePatternModels.replace('$', str(i)),
viewList,
)
ones = np.repeat(np.array([[[0, 0, 0, 1]]]), repeats=cameraPOs.shape[0], axis=0)
cameraP0s = np.concatenate((cameraPOs, ones), axis=1)
cameraPs.append(cameraPOs)
cameraP4s.append(cameraP0s)
cameraRTs.append(cameraRTOs)
cameraKs.append(cameraKOs)
return (cameraPs, np.array(cameraP4s), np.array(cameraRTs), np.array(cameraKs))
def __cameraP2T__(cameraPO):
"""
cameraPO: (3,4)
return camera center in the world coords: cameraT (3,0)
>>> P = np.array([[798.693916, -2438.153488, 1568.674338, -542599.034996], \
[-44.838945, 1433.912029, 2576.399630, -1176685.647358], \
[-0.840873, -0.344537, 0.417405, 382.793511]])
>>> t = np.array([555.64348632032, 191.10837560939, 360.02470478273])
>>> np.allclose(__cameraP2T__(P), t)
True
"""
homo4D = np.array([np.linalg.det(cameraPO[:, [1, 2, 3]]), -1 * np.linalg.det(cameraPO[:, [0, 2, 3]]),
np.linalg.det(cameraPO[:, [0, 1, 3]]), -1 * np.linalg.det(cameraPO[:, [0, 1, 2]])])
# print('homo4D', homo4D)
cameraT = homo4D[:3] / homo4D[3]
return cameraT
def cameraPs2Ts_all(cameraPOs_all):
"""
"""
model_num = len(cameraPOs_all)
# pdb.set_trace()
cameraT_all = np.zeros((model_num, cameraPOs_all[0].shape[0], 3))
for i in range(model_num):
cameraT_all[i] = cameraPs2Ts(cameraPOs_all[i])
return cameraT_all
def cameraPs2Ts(cameraPOs):
"""
convert multiple POs to Ts.
----------
input:
cameraPOs: list / numpy
output:
cameraTs: list / numpy
"""
if type(cameraPOs) is list:
N = len(cameraPOs)
else:
N = cameraPOs.shape[0]
cameraT_list = []
for _cameraPO in cameraPOs:
cameraT_list.append(__cameraP2T__(_cameraPO))
return cameraT_list if type(cameraPOs) is list else np.stack(cameraT_list)
def inverse_camera_matrix(cameraP0s):
N_Ms = cameraP0s.shape[0]
projection_new = np.zeros((N_Ms, 4, 4))
projection_new[:, 0:3, :] = cameraP0s
projection_new[:, 3, :] = np.array(([[0, 0, 0, 1]]))
projection_new = np.linalg.inv(projection_new)
return projection_new
def calculate_angle_p1_p2_p3(p1, p2, p3, return_angle=True, return_cosine=True):
"""
calculate angle <p1,p2,p3>, which is the angle between the vectors p2p1 and p2p3
Parameters
----------
p1/p2/p3: numpy with shape (3,)
return_angle: return the radian angle
return_cosine: return the cosine value
Returns
-------
angle, cosine
Examples
--------
"""
unit_vector = lambda v: v / np.linalg.norm(v)
angle = lambda v1, v2: np.arccos(np.clip(np.dot(unit_vector(v1), unit_vector(v2)), -1.0, 1.0))
cos_angle = lambda v1, v2: np.clip(np.dot(unit_vector(v1), unit_vector(v2)), -1.0, 1.0)
vect_p2p1 = p1 - p2
vect_p2p3 = p3 - p2
return angle(vect_p2p1, vect_p2p3) if return_angle else None, \
cos_angle(vect_p2p1, vect_p2p3) if return_cosine else None
def k_combination_np(iterable, k=2):
"""
list all the k-combination along the output rows:
input: [2,5,8], list 2-combination to a numpy array
output: np.array([[2,5],[2,8],[5,8]])
----------
usages:
>>> k_combination_np([2,5,8])
array([[2, 5],
[2, 8],
[5, 8]])
>>> k_combination_np([2,5,8]).dtype
dtype('int64')
>>> k_combination_np([2.2,5.5,8.8,9.9], k=3)
array([[ 2.2, 5.5, 8.8],
[ 2.2, 5.5, 9.9],
[ 2.2, 8.8, 9.9],
[ 5.5, 8.8, 9.9]])
"""
combinations = []
for _combination in itertools.combinations(iterable, k):
combinations.append(_combination)
return np.asarray(combinations)
def viewPairAngles_wrt_pts(cameraTs, pts_xyz):
"""
given a set of camera positions and a set of points coordinates, output the angle between camera pairs w.r.t. each 3D point.
-----------
inputs:
cameraTs: (N_views, 3) camera positions
pts_xyz: (N_pts, 3) 3D points' coordinates
-----------
outputs:
viewPairAngle_wrt_pts: (N_pts, N_viewPairs) angle
-----------
usages:
>>> pts_xyz = np.array([[0,0,0],[1,1,1]], dtype=np.float32) # p1 / p2
>>> cameraTs = np.array([[0,0,1], [0,1,1], [1,0,1]], dtype=np.float32) # c1/2/3
>>> viewPairAngles_wrt_pts(cameraTs, pts_xyz) * 180 / math.pi # output[i]: [<c1,pi,c2>, <c1,pi,c3>, <c2,pi,c3>]
array([[ 45., 45., 60.],
[ 45., 45., 90.]], dtype=float32)
"""
unitize_array = lambda array, axis: array / np.linalg.norm(array, axis=axis, ord=2, keepdims=True)
calc_arccos = lambda cos_values: np.arccos(np.clip(cos_values, -1.0, 1.0)) # TODO does it need clip ?
N_views = cameraTs.shape[0]
vector_pts2cameras = pts_xyz[:, None, :] - cameraTs[
None, ...] # (N_pts, 1, 3) - (1, N_views, 3) ==> (N_pts, N_views, 3)
unit_vector_pts2cameras = unitize_array(vector_pts2cameras,
axis=-1) # (N_pts, N_views, 3) unit vector along axis=-1
# do the matrix multiplication for the (N_pats,) tack of (N_views, 3) matrixs
## (N_pts, N_views, 3) * (N_pts, 3, N_views) ==> (N_pts, N_views, N_views)
# viewPairCosine_wrt_pts = np.matmul(unit_vector_pts2cameras, unit_vector_pts2cameras.transpose((0,2,1)))
viewPairs = self.k_combination_np(range(N_views), k=2) # (N_combinations, 2)
viewPairCosine_wrt_pts = np.sum(
np.multiply(unit_vector_pts2cameras[:, viewPairs[:, 0]], unit_vector_pts2cameras[:, viewPairs[:, 1]]),
axis=-1) # (N_pts, N_combinations, 3) elementwise multiplication --> (N_pts, N_combinations) sum over the last axis
viewPairAngle_wrt_pts = calc_arccos(viewPairCosine_wrt_pts) # (N_pts, N_combinations)
return viewPairAngle_wrt_pts
# def viewPairAngles_p0s_pts(self, projection_M, )
def viewPairAngles_wrt_groupView(cameraTs, group_cameraTs, xyz_3D):
'''
:param cameraTs:
shape: (N_views,3)
:param group_cameraTs:
shape:(N_bool_views,3)
:param xyz_3D:
shape:(3)
:return:
angle_total: the angle of group T and camera T
shape: (N_bool_views, N_views)
'''
cameraTs_array = (cameraTs - xyz_3D)[None, :, :, None] # (N_views,3)->(1,N_views, 3,1)
group_cameraTs_array = (group_cameraTs - xyz_3D)[:, None, None, :] # (N_bool_views,3)->(N_bool_views,1,3,1)
dot_two = np.matmul(group_cameraTs_array, cameraTs_array)[:, :, 0, 0] # (N_bool_views, N_views)
len_cameraTs = np.linalg.norm(cameraTs - xyz_3D, axis=1)[None, :] # (1, N_views)
len_group_cameraTs = np.linalg.norm(group_cameraTs - xyz_3D, axis=1)[:, None] # (N_bool_views, 1)
len_total = len_cameraTs * len_group_cameraTs # (N_bool_views, N_views)
cos_total = dot_two / (len_total + 1e-10) # (N_bool_views, N_views)
angle_total = np.arccos(np.clip(cos_total, -1.0, 1.0))
return (angle_total)
def select_group_pairs(projection_M, cameraTs, group_cameraTs, xyz_3D, cube_length, image_shape, angle_thres,
group_pair_num_max, group_pair_num_min, group_pair_index):
'''
given group view number, select groupviews
:param projection_M: the
shape:(N_views, 3,4)
:param cameraTs:
shape:(N_views, 3)
:param group_cameraTs:
shape:(N_boole_views, 3)
:param xyz_3D:
shape:(3)
:param cube_length:
float: the length of the cube
:param image_shape:
(img_h, img_w)
:param angle_thres:
float ses params.in_group_angle
:param group_pair_num_max/min:
int see params.group_pair_num_max/min
:param group_pair_index
list of int pair: see params.group_pair_index
:return:
view_pair_list: list of view_pair index
element in list:
(group_left, group_right, (group_id_left, group_id_right))
group_left/right:
numpy 1d array of view pair number
e.g. [(array([ 6, 16, 4, 2, 6]), array([33, 24, 16, 14, 24]), (0, 2)), (array([ 3, 15, 20, 4, 33]), array([ 7, 36, 5, 19, 4]), (1, 3)), (array([33, 24, 16, 14, 24]), array([24, 15, 22, 34, 15]), (2, 4)), (array([ 7, 36, 5, 19, 4]), array([24, 43, 34, 42, 14]), (3, 5)), (array([24, 15, 22, 34, 15]), array([42, 34, 38, 18, 37]), (4, 6)), (array([24, 43, 34, 42, 14]), array([43, 42, 33, 15, 35]), (5, 7))]
'''
view_in_flag = judge_cubic_center_in_view(projection_M,
xyz_3D,
cube_length,
image_shape,
)
angle_total = viewPairAngles_wrt_groupView(cameraTs, group_cameraTs, xyz_3D)
group_pair_flag = view_in_flag[None, :] * (angle_total < angle_thres)
# print('group_pair_flag', group_pair_flag.shape)
view_list = np.repeat((np.arange(group_pair_flag.shape[1]))[None, :], axis=0, repeats=group_pair_flag.shape[0])
# print(group_pair_flag)
view_num_list = []
for i in range(group_pair_flag.shape[0]):
view_num_i = view_list[i, group_pair_flag[i, :]]
if (view_num_i.shape[0] >= group_pair_num_max):
view_num_i = np.random.choice(view_num_i, group_pair_num_max, replace=False)
view_num_list.append(view_num_i)
view_pair_list = []
for (i, j) in (group_pair_index):
if ((view_num_list[i].shape[0] >= group_pair_num_min) and (view_num_list[j].shape[0] >= group_pair_num_min)):
view_pair_list.append((view_num_list[i], view_num_list[j], (i, j)))
# print('view_pair_list',view_pair_list)
return view_pair_list
def select_group(projection_M, cameraTs, group_cameraTs, xyz_3D, cube_length, image_shape, angle_thres,
group_pair_num_max, group_pair_num_min):
'''
given group view number, select groupviews
:param projection_M: the
shape:(N_views, 3,4)
:param cameraTs:
shape:(N_views, 3)
:param group_cameraTs:
shape:(N_boole_views, 3)
:param xyz_3D:
shape:(3)
:param cube_length:
float: the length of the cube
:param image_shape:
(img_h, img_w)
:param angle_thres:
float ses params.in_group_angle
:param group_pair_num_max/min:
int see params.group_pair_num_max/min
:return:
view_list: list of view index
element in list:
group:
numpy 1d array of view number
'''
# view_in_flag = judge_cubic_center_in_view(projection_M ,
# xyz_3D ,
# cube_length,
# image_shape,
# )
view_in_flag = np.ones((projection_M.shape[0]), dtype=np.bool)
angle_total = viewPairAngles_wrt_groupView(cameraTs, group_cameraTs, xyz_3D)
group_pair_flag = view_in_flag[None, :] * (angle_total < angle_thres)
# print('group_pair_flag', group_pair_flag.shape)
view_list = np.repeat((np.arange(group_pair_flag.shape[1]))[None, :], axis=0, repeats=group_pair_flag.shape[0])
# print(group_pair_flag)
view_num_list = []
for i in range(group_pair_flag.shape[0]):
view_num_i = view_list[i, group_pair_flag[i, :]]
if (view_num_i.shape[0] >= group_pair_num_max):
view_num_i = np.sort(np.random.choice(view_num_i, group_pair_num_max, replace=False), axis=0)
# view_num_i = (np.random.choice(view_num_i, group_pair_num_max, replace = False))
# pdb.set_trace()
if (view_num_i.shape[0] >= group_pair_num_min):
view_num_list.append(view_num_i)
return view_num_list
def perspectiveProj(
projection_M,
xyz_3D,
return_int_hw=True,
return_depth=False):
"""
perform perspective projection from 3D points to 2D points given projection matrix(es)
support multiple projection_matrixes and multiple 3D vectors
notice: [matlabx,matlaby] = [width, height]
----------
inputs:
projection_M: numpy with shape (3,4) / (N_Ms, 3,4), during calculation (3,4) will --> (1,3,4)
xyz_3D: numpy with shape (3,) / (N_pts, 3), during calculation (3,) will --> (1,3)
return_int_hw: bool, round results to integer when True.
----------
outputs:
img_h, img_w: (N_pts,) / (N_Ms, N_pts)
----------
usages:
inputs: (N_Ms, 3,4) & (N_pts, 3), return_int_hw = False/True
>>> np.random.seed(201611)
>>> Ms = np.random.rand(2,3,4)
>>> pts_3D = np.random.rand(2,3)
>>> pts_2Dh, pts_2Dw = perspectiveProj(Ms, pts_3D, return_int_hw = False)
>>> np.allclose(pts_2Dw, np.array([[ 1.35860185, 0.9878389 ],
... [ 0.64522543, 0.76079278 ]]))
True
>>> pts_2Dh_int, pts_2Dw_int = perspectiveProj(Ms, pts_3D, return_int_hw = True)
>>> np.allclose(pts_2Dw_int, np.array([[1, 1], [1, 1]]))
True
inputs: (3,4) & (3,)
>>> np.allclose(
... np.r_[perspectiveProj(Ms[1], pts_3D[0], return_int_hw = False)],
... np.stack((pts_2Dh, pts_2Dw))[:,1,0])
True
"""
if projection_M.shape[-2:] != (3, 4):
raise ValueError(
"perspectiveProj needs projection_M with shape (3,4), however got {}".format(projection_M.shape))
if xyz_3D.ndim == 1:
xyz_3D = xyz_3D[None, :]
if xyz_3D.shape[1] != 3 or xyz_3D.ndim != 2:
raise ValueError(
"perspectiveProj needs xyz_3D with shape (3,) or (N_pts, 3), however got {}".format(xyz_3D.shape))
# perspective projection
N_pts = xyz_3D.shape[0]
xyz1 = np.c_[xyz_3D, np.ones((N_pts, 1))].astype(np.float64) # (N_pts, 3) ==> (N_pts, 4)
pts_3D = np.matmul(projection_M, xyz1.T) # (3, 4)/(N_Ms, 3, 4) * (4, N_pts) ==> (3, N_pts)/(N_Ms,3,N_pts)
# the result is vector: [w,h,1], w is the first dim!!! (matlab's x/y/1')
pts_2D = pts_3D[..., :2, :]
# self.pts_3D = pts_3D
pts_2D /= pts_3D[..., 2:3, :] # (2, N_pts) /= (1, N_pts) | (N_Ms, 2, N_pts) /= (N_Ms, 1, N_pts)
# self.pts_2D = pts_2D
# print(self.pts_2D)
if return_int_hw:
pts_2D = pts_2D.round().astype(np.int64) # (2, N_pts) / (N_Ms, 2, N_pts)
img_w, img_h = pts_2D[..., 0, :], pts_2D[..., 1, :] # (N_pts,) / (N_Ms, N_pts)
if return_depth:
depth = pts_3D[..., 2, :]
return img_h, img_w, depth
return img_h, img_w
def perspectiveProj_cubesCorner(projection_M, cube_xyz_min, cube_D_mm, return_int_hw=True,
return_depth=False):
"""
perform perspective projection from 3D points to 2D points given projection matrix(es)
support multiple projection_matrixes and multiple 3D vectors
notice: [matlabx,matlaby] = [width, height]
----------
inputs:
projection_M: numpy with shape (3,4) / (N_Ms, 3,4), during calculation (3,4) will --> (1,3,4)
cube_xyz_min: numpy with shape (3,) / (N_pts, 3), during calculation (3,) will --> (1,3)
cube_D_mm: cube with shape D^3
return_int_hw: bool, round results to integer when True.
return_depth: bool
----------
outputs:
img_h, img_w: (N_Ms, N_pts, 8)
----------
usages:
inputs: (N_Ms, 3, 4) & (N_pts, 3), return_int_hw = False/True, outputs (N_Ms, N_pts, 8)
>>> np.random.seed(201611)
>>> Ms = np.random.rand(2,3,4)
>>> pts_3D = np.random.rand(2,3)
>>> pts_2Dh, pts_2Dw = perspectiveProj_cubesCorner(Ms, pts_3D, cube_D_mm = 1, return_int_hw = False)
>>> np.allclose(pts_2Dw[:,:,0], np.array([[ 1.35860185, 0.9878389 ],
... [ 0.64522543, 0.76079278 ]]))
True
>>> pts_2Dh_int, pts_2Dw_int = perspectiveProj_cubesCorner(Ms, pts_3D, cube_D_mm = 1, return_int_hw = True)
>>> np.allclose(pts_2Dw_int[:,:,0], np.array([[1, 1], [1, 1]]))
True
inputs: (3,4) & (3,), outputs (1,1,8)
>>> np.allclose(
... perspectiveProj_cubesCorner(Ms[1], pts_3D[0], cube_D_mm = 1, return_int_hw = False)[0],
... pts_2Dh[1,0]) # (1,1,8)
True
"""
if projection_M.shape[-2:] != (3, 4):
raise ValueError(
"perspectiveProj needs projection_M with shape (3,4), however got {}".format(projection_M.shape))
if cube_xyz_min.ndim == 1:
cube_xyz_min = cube_xyz_min[None, :] # (3,) --> (N_pts, 3)
if cube_xyz_min.shape[1] != 3 or cube_xyz_min.ndim != 2:
raise ValueError("perspectiveProj needs cube_xyz_min with shape (3,) or (N_pts, 3), however got {}".format(
cube_xyz_min.shape))
N_pts = cube_xyz_min.shape[0]
cubeCorner_shift = np.indices((2, 2, 2)).reshape((3, -1)).T[None, :, :] * cube_D_mm # (3,2,2,2) --> (1,8,3)
cubeCorner = cube_xyz_min[:, None, :] + cubeCorner_shift # (N_pts, 1, 3) + (1,8,3) --> (N_pts, 8, 3)
img_h, img_w = perspectiveProj(projection_M=projection_M, xyz_3D=cubeCorner.reshape((N_pts * 8, 3)),
return_int_hw=return_int_hw,
return_depth=return_depth) # img_w/h: (N_Ms, N_pts*8)
img_w = img_w.reshape((-1, N_pts, 8))
img_h = img_h.reshape((-1, N_pts, 8))
return img_h, img_w
def image_compress_coef(projection_M,
cube_xyz_min,
cube_D_mm,
_cube_D_,
image_compress_multiple,
compress_ratio=1.0
):
img_h, img_w = perspectiveProj_cubesCorner(projection_M,
cube_xyz_min,
cube_D_mm,
return_int_hw=True,
return_depth=False)
img_h_max = np.max(img_h, axis=2) # (N_Ms, N_pts)
img_w_max = np.max(img_w, axis=2)
img_h_min = np.min(img_h, axis=2)
img_w_min = np.min(img_w, axis=2)
img_h_resol = (img_h_max - img_h_min + 0.0) / _cube_D_
img_w_resol = (img_w_max - img_w_min + 0.0) / _cube_D_
compress_h = compress_ratio * img_h_resol.mean() / image_compress_multiple
compress_w = compress_ratio * img_w_resol.mean() / image_compress_multiple
return ((compress_h), (compress_w))
# def resize_matrix(projection_M, compress_h_new, compress_w_new):
# transform_matrix = np.array([[[1 / compress_w_new, 0, 0], [0, 1 / compress_h_new, 0], [0, 0, 1]]])
# projection_M_new = np.matmul(transform_matrix, projection_M)
#
# cameraTs = cameraPs2Ts(projection_M)
# cameraTs_new = cameraPs2Ts(projection_M_new)
# trans_vector = (cameraTs - cameraTs_new)[:, :, None]
# identical_matrix = np.repeat(np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]), cameraTs.shape[0], axis=0)
# bottom_matrix = np.repeat(np.array([[[0, 0, 0, 1]]]), cameraTs.shape[0], axis=0)
# transform_matrix2 = np.concatenate((identical_matrix, trans_vector), axis=2)
# transform_matrix2 = np.concatenate((transform_matrix2, bottom_matrix), axis=1)
# projection_M_new_f = np.concatenate((projection_M_new, bottom_matrix), axis=1)
#
# projection_M_new = np.matmul(transform_matrix2, projection_M_new_f)
# projection_M_new = projection_M_new[:, :3, :]
# return projection_M_new
def resize_image_and_matrix(images,
projection_M,
cube_xyz_min,
cube_D_mm,
_cube_D_,
image_compress_multiple,
return_list=False,
compress_ratio=1.0):
'''
compress image and garantee the camera position is not changing
:param images: all images of one model
type:list or None
if list
list element: image array
shape: (img_h,img_w, 3)
:param projection_M: camera matrix
shape: (N_views, 3, 4)
:param cube_xyz_min: min xyz coordinate
shape: (3,) / (N_pts, 3) usually it is (3,) because we only sample one cubic to judge the resize term
:param cube_D_mm:
cubic length float
:param _cube_D_:
cubic size int
:param image_compress_multiple:
same as param.image_compress_multiple
:param return_list: bool
if False return the numpy array
:param compress_ratio
see self.params.compress_ratio
:return:
if image is not None
images_resized:resized image
shape:(N_view, img_h_new, img_w_new)resize_image_and_matrix
projection_M_new: new cameraP
shape:(N_view,3,4)
(compress_h_new,compress_w_new):(float,float)
elif image is None: only change the matrix
projection_M_new: new cameraP
shape:(N_view,3,4)
(compress_h_new,compress_w_new):(float,float)
'''
(compress_h, compress_w) = image_compress_coef(projection_M,
cube_xyz_min,
cube_D_mm,
_cube_D_,
image_compress_multiple,
compress_ratio=compress_ratio)
resized_h = int(image_compress_multiple * (images[0].shape[0] // (compress_h * image_compress_multiple)))
resized_w = int(image_compress_multiple * (images[0].shape[1] // (compress_w * image_compress_multiple)))
compress_h_new = images[0].shape[0] / (resized_h + 0.0)
compress_w_new = images[0].shape[1] / (resized_w + 0.0)
transform_matrix = np.array([[[1 / compress_w_new, 0, 0], [0, 1 / compress_h_new, 0], [0, 0, 1]]])
projection_M_new = np.matmul(transform_matrix, projection_M)
cameraTs = cameraPs2Ts(projection_M)
cameraTs_new = cameraPs2Ts(projection_M_new)
trans_vector = (cameraTs - cameraTs_new)[:, :, None]
identical_matrix = np.repeat(np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]), cameraTs.shape[0], axis=0)
bottom_matrix = np.repeat(np.array([[[0, 0, 0, 1]]]), cameraTs.shape[0], axis=0)
transform_matrix2 = np.concatenate((identical_matrix, trans_vector), axis=2)
transform_matrix2 = np.concatenate((transform_matrix2, bottom_matrix), axis=1)
projection_M_new_f = np.concatenate((projection_M_new, bottom_matrix), axis=1)
projection_M_new = np.matmul(transform_matrix2, projection_M_new_f)
projection_M_new = projection_M_new[:, :3, :]
image_resized_list = []
if (images is not None):
for image in images:
image_resized = scipy.misc.imresize(image, size=(resized_h, resized_w), interp='bicubic')
image_resized = image_resized / 256.0 - 0.5
image_resized_list.append(image_resized)
images_resized = image_resized_list if return_list else np.stack(image_resized_list)
return (images_resized, projection_M_new, (compress_h_new, compress_w_new))
else:
return (None, projection_M_new, (compress_h_new, compress_w_new))
# def resize_multistage_image_and_matrix(images,
# projection_M,
# cube_xyz_min,
# cube_D_mm,
# _cube_D_,
# image_compress_multiple,
# image_compress_stage,
# return_list=False,
# compress_ratio=1.0):
# '''
# compress image and garantee the camera position is not changing
# :param images: all images of one model
# type:list or None
# if list
# list element: image array
# shape: (img_h,img_w, 3)
#
# :param projection_M: camera matrix
# shape: (N_views, 3, 4)
# :param cube_xyz_min: min xyz coordinate
# shape: (3,) / (N_pts, 3) usually it is (3,) because we only sample one cubic to judge the resize term
# :param cube_D_mm:
# cubic length float
# :param _cube_D_:
# cubic size int
# :param image_compress_multiple:
# same as param.image_compress_multiple
# :param image_compress_stage
# same as param.image_compress_stage
# :param return_list: bool
# if False return the numpy array
# :param compress_ratio
# see self.params.compress_ratio
# :return:
# if image is not None
# image_resized_stage_list:multistage of resized image
# length : = image_compress_stage
# ele in each list:
# shape:(N_view, img_h_new//2**iter, img_w_new//2**iter)
# projection_M_new: new cameraP
# shape:(N_view,3,4)
# (compress_h_new,compress_w_new):(float,float)
# elif image is None: only change the matrix
# projection_M_new: new cameraP
# shape:(N_view,3,4)
# (compress_h_new,compress_w_new):(float,float)
# '''
# # (compress_h, compress_w) = image_compress_coef(projection_M,
# # cube_xyz_min,
# # cube_D_mm,
# # _cube_D_,
# # 1,
# # compress_ratio = compress_ratio)
#
# # print('compress_h', compress_h, compress_w)
# compress_h = compress_ratio
# compress_w = compress_ratio
# resized_h = int(image_compress_multiple * (images[0].shape[0] // (compress_h * image_compress_multiple)))
# resized_w = int(image_compress_multiple * (images[0].shape[1] // (compress_w * image_compress_multiple)))
#
# # pdb.set_trace()
# compress_h_new = images[0].shape[0] / (resized_h + 0.0)
# compress_w_new = images[0].shape[1] / (resized_w + 0.0)
# transform_matrix = np.array([[[1 / compress_w_new, 0, 0], [0, 1 / compress_h_new, 0], [0, 0, 1]]])
# projection_M_new = np.matmul(transform_matrix, projection_M)
#
# cameraTs = cameraPs2Ts(projection_M)
# cameraTs_new = cameraPs2Ts(projection_M_new)
# trans_vector = (cameraTs - cameraTs_new)[:, :, None]
# identical_matrix = np.repeat(np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]), cameraTs.shape[0], axis=0)
# bottom_matrix = np.repeat(np.array([[[0, 0, 0, 1]]]), cameraTs.shape[0], axis=0)
# transform_matrix2 = np.concatenate((identical_matrix, trans_vector), axis=2)
# transform_matrix2 = np.concatenate((transform_matrix2, bottom_matrix), axis=1)
# projection_M_new_f = np.concatenate((projection_M_new, bottom_matrix), axis=1)
#
# projection_M_new = np.matmul(transform_matrix2, projection_M_new_f)
# projection_M_new = projection_M_new[:, :3, :]
#
# if (images is not None):
# image_resized_stage_list = []
# for iter in range(image_compress_stage):
# image_resized_list = []
# for image in images:
# # print('resized image shape',resized_h, resized_w)
# image_resized = scipy.misc.imresize(image,
# size=(int(resized_h // (2 ** iter)), int(resized_w // (2 ** iter))),
# interp='bicubic')
# image_resized = image_resized / 256.0 - 0.5
# image_resized_list.append(image_resized)
# images_resized = image_resized_list if return_list else np.stack(image_resized_list)
# image_resized_stage_list.append(images_resized)
# return (image_resized_stage_list, projection_M_new, (compress_h_new, compress_w_new))
# else:
# return (None, projection_M_new, (compress_h_new, compress_w_new))
def judge_cubic_center_in_view(projection_M,
xyz_3D,
cube_length,
image_shape,
):
'''
'the bool flag of each view can see the center of cubic:'
:param projection_M:
shape:(N_views, 3, 4)
:param xyz_3D:
shape:(3)
:param cube_length
float
:param image_shape:
(img_h,img_w)
:return:
view_in_flag: bool array
shape: (N_views)
'''
img_h_new, img_w_new = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D,
)
img_h_100, img_w_100 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + np.array((cube_length, 0, 0)),
)
img_h_010, img_w_010 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + np.array((0, cube_length, 0)),
)
img_h_001, img_w_001 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + np.array((0, 0, cube_length)),
)
img_h_011, img_w_011 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + np.array((0, cube_length, cube_length)),
)
img_h_101, img_w_101 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + np.array((cube_length, 0, cube_length)),
)
img_h_110, img_w_110 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + np.array((cube_length, cube_length, 0)),
)
img_h_111, img_w_111 = perspectiveProj(
projection_M=projection_M,
xyz_3D=xyz_3D + cube_length,
)
img_h_bool = (img_h_new < image_shape[0]) * (img_h_new > 0)
img_w_bool = (img_w_new < image_shape[1]) * (img_w_new > 0)
img_h_bool_001 = (img_h_001 < image_shape[0]) * (img_h_001 > 0)
img_w_bool_001 = (img_w_001 < image_shape[1]) * (img_w_001 > 0)
img_h_bool_010 = (img_h_010 < image_shape[0]) * (img_h_010 > 0)
img_w_bool_010 = (img_w_010 < image_shape[1]) * (img_w_010 > 0)
img_h_bool_100 = (img_h_100 < image_shape[0]) * (img_h_100 > 0)
img_w_bool_100 = (img_w_100 < image_shape[1]) * (img_w_100 > 0)
img_h_bool_011 = (img_h_011 < image_shape[0]) * (img_h_011 > 0)
img_w_bool_011 = (img_w_011 < image_shape[1]) * (img_w_011 > 0)
img_h_bool_110 = (img_h_110 < image_shape[0]) * (img_h_110 > 0)
img_w_bool_110 = (img_w_110 < image_shape[1]) * (img_w_110 > 0)
img_h_bool_101 = (img_h_101 < image_shape[0]) * (img_h_101 > 0)
img_w_bool_101 = (img_w_101 < image_shape[1]) * (img_w_101 > 0)
img_h_bool_111 = (img_h_111 < image_shape[0]) * (img_h_111 > 0)
img_w_bool_111 = (img_w_111 < image_shape[1]) * (img_w_111 > 0)
view_in_flag = img_h_bool * img_w_bool * img_h_bool_001 * img_w_bool_001 * img_h_bool_010 * img_w_bool_010 * img_h_bool_100 * img_w_bool_100 * img_h_bool_110 * img_w_bool_110 * img_h_bool_101 * img_w_bool_101 * img_h_bool_011 * img_w_bool_011 * img_h_bool_111 * img_w_bool_111
print('the bool flag of each view can see the center of cubic:', view_in_flag.sum())
return view_in_flag[:, 0]
def count_gx_gy(projection_M, h_length=1, w_length=1):
projection_M_inverse = inverse_camera_matrix(projection_M)
N_view = projection_M_inverse.shape[0]
vector_101 = np.array(([w_length, 0, 1, 1]))[None, :, None]
vector_011 = np.array(([0, h_length, 1, 1]))[None, :, None]
vector_001 = np.array(([0, 0, 1, 1]))[None, :, None]
global_101 = np.matmul(projection_M_inverse, vector_101)[:, :3, 0] # shape: (N_view, 4,1)->(N_view, 3)
global_011 = np.matmul(projection_M_inverse, vector_011)[:, :3, 0]
global_001 = np.matmul(projection_M_inverse, vector_001)[:, :3, 0]
gx = np.linalg.norm(global_101 - global_001, axis=1) # shape: (N_views)
gy = np.linalg.norm(global_011 - global_001, axis=1)
return (gx, gy)
def generateMetaVector_old(
projection_M,
cube_xyz_min,
cameraTs,
cube_D_resol,
_cube_D_,
):
'''
:param projection_M:
shape:(N_views, 3, 4)
:param cube_xyz_min:
shape:(,3)
:param cameraTs:
shape:(N_views, 3)
:param cube_D_resol: resolution of each voxel
float
:param _cube_D_: length of cube
int
:return:
meta_vector: the array of each vector represent camera position
shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 10)
wrapping_vector: the map from each voxel to image
shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 3)
'''
x = np.arange(0, _cube_D_, 1.0)
y = np.arange(0, _cube_D_, 1.0)
z = np.arange(0, _cube_D_, 1.0)
if not (x.shape[0] == _cube_D_):
print('shape of Meta vector went wrong')
raise TypeError
xx, yy, zz = np.meshgrid(x, y, z)
XYZ = np.array([yy.flatten(), xx.flatten(), zz.flatten()]).reshape(3, _cube_D_, _cube_D_, _cube_D_)
XYZ = np.moveaxis(XYZ, 0, 3)
if not (list(XYZ[0, 1, 3, :]) == [0.0, 1.0, 3.0]):
print('index of Meta vector went wrong')
raise TypeError
cube_xyz = cube_xyz_min[None, None, None, :] + XYZ * cube_D_resol # shape:(_cube_D_, _cube_D_, _cube_D_, 3)
ones = np.ones((_cube_D_, _cube_D_, _cube_D_, 1))
cube_xyz_matmul = np.concatenate((cube_xyz, ones), axis=3)[None, :, :, :, :,
None] # shape:(1, _cube_D_, _cube_D_, _cube_D_, 4, 1)
projection_M_matmul = projection_M[:, None, None, None, :, :] # shape:(N_view, 1, 1, 1, 3, 4)
project_cube_xyz = np.matmul(projection_M_matmul,
cube_xyz_matmul) # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 3, 1)
(gx, gy) = count_gx_gy(projection_M)
Z = project_cube_xyz[:, :, :, :, 2,
0] # the depth of each cubic points shape:(N_view, _cube_D_, _cube_D_, _cube_D_)
alpha_x = (Z * gx[:, None, None, None] / cube_D_resol)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 1)
alpha_y = (Z * gy[:, None, None, None] / cube_D_resol)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 1)
print('the average pixel a cubic can get on x axis', alpha_x.mean())
print('the average pixel a cubic can get on y axis', alpha_y.mean())
tau = project_cube_xyz[:, :, :, :, :, 0] / np.linalg.norm(project_cube_xyz[:, :, :, :, :, 0], axis=4)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 3)
vector_xyz = cube_xyz[None, :, :, :, :] - cameraTs[:, None, None, None,
:] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 3)
theta = vector_xyz / np.linalg.norm(vector_xyz, axis=4)[:, :, :, :,
None] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 3)
YX = project_cube_xyz[:, :, :, :, :2, 0] / project_cube_xyz[:, :, :, :, 2, 0][:, :, :, :, None]
H = YX[:, :, :, :, 1][:, :, :, :, None] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
W = YX[:, :, :, :, 0][:, :, :, :, None] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
D = np.zeros(np.shape(H))
X = H - np.floor(H) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
Y = W - np.floor(W) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
meta_vector = np.concatenate((alpha_x, alpha_y, tau, theta, X, Y), axis=4)
wrapping_vector = np.concatenate((D, H, W), axis=4)
return (meta_vector, wrapping_vector)
def generateMetaVector(
projection_M,
compress,
cube_xyz_min,
cameraTs,
cube_D_resol,
_cube_D_,
):
'''
:param projection_M:
shape:(N_views, 3, 4)
:param compress
turple: (compress_h, compress_w)
:param cube_xyz_min:
shape:(,3)
:param cameraTs:
shape:(N_views, 3)
:param cube_D_resol: resolution of each voxel
float
:param _cube_D_: length of cube
int
:return:
meta_vector: the array of each vector represent camera position
shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 10)
wrapping_vector: the map from each voxel to image
shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 3)
'''
compress_h, compress_w = compress
x = np.arange(0, _cube_D_, 1.0)
y = np.arange(0, _cube_D_, 1.0)
z = np.arange(0, _cube_D_, 1.0)
if not (x.shape[0] == _cube_D_):
print('shape of Meta vector went wrong')
raise TypeError
xx, yy, zz = np.meshgrid(x, y, z)
XYZ = np.array([yy.flatten(), xx.flatten(), zz.flatten()]).reshape(3, _cube_D_, _cube_D_, _cube_D_)
XYZ = np.moveaxis(XYZ, 0, 3)
if not (list(XYZ[0, 1, 3, :]) == [0.0, 1.0, 3.0]):
print('index of Meta vector went wrong')
raise TypeError
cube_xyz = cube_xyz_min[None, None, None, :] + XYZ * cube_D_resol # shape:(_cube_D_, _cube_D_, _cube_D_, 3)
# print('cube_xyz_min[None, None, None, :]', cube_xyz_min[None, None, None, :])
# print('@(*#@!#!@(*$&!@(*')
# print('cube_xyz[2,3,1,:]', cube_xyz[2,3,1,:])
# print('cube_xyz[2,3,2,:]', cube_xyz[2, 3, 2, :])
# print('cube_xyz[2,4,1,:]', cube_xyz[2, 4, 1, :])
ones = np.ones((_cube_D_, _cube_D_, _cube_D_, 1))
cube_xyz_matmul = np.concatenate((cube_xyz, ones), axis=3)[None, :, :, :, :,
None] # shape:(1, _cube_D_, _cube_D_, _cube_D_, 4, 1)
projection_M_matmul = projection_M[:, None, None, None, :, :] # shape:(N_view, 1, 1, 1, 3, 4)
project_cube_xyz = np.matmul(projection_M_matmul,
cube_xyz_matmul) # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 3, 1)
# print('@(*#@!#!@(*$&!@(*')
# print(project_cube_xyz.shape)
# print('project_cube_xyz[2,3,1,:]', project_cube_xyz[44, 2, 3, 1, :])
# print('project_cube_xyz[2,3,2,:]', project_cube_xyz[44, 2, 3, 2, :])
# print('project_cube_xyz[2,4,1,:]', project_cube_xyz[44, 2, 4, 1, :])
(gx, gy) = count_gx_gy(projection_M, h_length=compress_h, w_length=compress_w)
Z = project_cube_xyz[:, :, :, :, 2,
0] # the depth of each cubic points shape:(N_view, _cube_D_, _cube_D_, _cube_D_)
alpha_x = (Z * gx[:, None, None, None] / cube_D_resol)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 1)
alpha_y = (Z * gy[:, None, None, None] / cube_D_resol)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 1)
print('the average pixel a cubic can get on x axis', alpha_x.mean())
print('the average pixel a cubic can get on y axis', alpha_y.mean())
tau = project_cube_xyz[:, :, :, :, :, 0] / np.linalg.norm(project_cube_xyz[:, :, :, :, :, 0], axis=4)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 3)
vector_xyz = cube_xyz[None, :, :, :, :] - cameraTs[:, None, None, None,
:] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 3)
theta = vector_xyz / np.linalg.norm(vector_xyz, axis=4)[:, :, :, :,
None] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 3)
YX = project_cube_xyz[:, :, :, :, :2, 0] / project_cube_xyz[:, :, :, :, 2, 0][:, :, :, :, None]
H = YX[:, :, :, :, 1][:, :, :, :, None] / compress_h # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
W = YX[:, :, :, :, 0][:, :, :, :, None] / compress_w # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
D = np.zeros(np.shape(H))
X = H - np.floor(H) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
Y = W - np.floor(W) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
meta_vector = np.concatenate((alpha_x, alpha_y, tau, theta, X, Y), axis=4)
wrapping_vector = np.concatenate((W, H, D),
axis=4) # To avoid confusion in notation, let’s note that x corresponds to the width dimension IW, y corresponds to the height dimension IH and z corresponds to the depth dimension ID.
return (meta_vector, wrapping_vector)
def generate_sparseMetaVector(
projection_M,
compress,
cube_xyz_min,
stage_num,
cameraTs,
cube_D_resol,
_cube_D_,
info_list=None
):
'''
:param projection_M:
shape:(N_views, 3, 4)
:param compress
turple: (compress_h, compress_w)
:param cube_xyz_min:
shape:(,3)
:param stage_num
int
:param cameraTs:
shape:(N_views, 3)
:param cube_D_resol: resolution of each voxel
float
:param _cube_D_: length of cube
int
:param info_list
:return:
meta_list: list of meta\wrapping vector
len: stage_num
ele:
(meta_vector, wrapping_vector)
output_list: list of output vector
len: stage_num
ele:
(q_final, xyz_final, rgb_final, n_final)
'''
meta_list = []
input_list = []
output_list = []
resol_new = cube_D_resol
xyz_3D_new = copy.copy(cube_xyz_min)
cube_D_new = _cube_D_
for i in range(stage_num):
cubes_gt_np = info_list[i]
if (i == (stage_num - 1)):
use_dense = True
else:
use_dense = False
(xyz_global_final, xyz_final, rgb_final, n_final, q_final, sort_index) = generate_sparse(
cube_xyz_min=xyz_3D_new,
cube_D_resol=resol_new,
_cube_D_=cube_D_new,
cubes_gt_np=cubes_gt_np,
use_dense=use_dense
)
(meta_vector, wrapping_vector) = generateMeta_from_xyz(projection_M=projection_M,
compress=compress,
cameraTs=cameraTs,
cube_D_resol=resol_new,
_cube_D_=cube_D_new,
pts_xyz=xyz_global_final
)
meta_list.append((meta_vector, wrapping_vector))
output_list.append((q_final, xyz_final, rgb_final, n_final, xyz_global_final, sort_index))
xyz_3D_new += (resol_new / 2)
resol_new *= 2
cube_D_new /= 2
compress = (compress[0] * 2, compress[1] * 2)
return meta_list, output_list
def generate_sparse(
cube_xyz_min,
cube_D_resol,
_cube_D_,
cubes_gt_np=None,
use_dense=False
):
'''
:param cube_xyz_min:
shape:(,3)
:param cube_D_resol: resolution of each voxel
float
:param _cube_D_: length of cube
int
:param cubes_gt_np
:return:
xyz_global_final : the location of input voxel
shape: (N,3)
xyz_final: the relative location of output voxel
shape: (N,3)
rgb_final: output voxel
shape: (N,3)
n_final: output voxel
shape: (N,3)
q_final: output voxel
shape: bool (N,1)
sort_index: the sort index of the ground truth used for point up convolution
shape: int (N_points,)
'''
# cubes_gt_np_sort = np.sort(cubes_gt_np, order = 'ijk_id')
x = np.arange(0, _cube_D_, 1.0)
y = np.arange(0, _cube_D_, 1.0)
z = np.arange(0, _cube_D_, 1.0)
if not (x.shape[0] == _cube_D_):
print('shape of Meta vector went wrong')
raise TypeError
xx, yy, zz = np.meshgrid(x, y, z)
XYZ = np.array([yy.flatten(), xx.flatten(), zz.flatten()]).T
XYZ_id = 8 * ((_cube_D_ / 2) * (_cube_D_ / 2) * (XYZ[:, 0] // 2) + (_cube_D_ / 2) * (XYZ[:, 1] // 2) + XYZ[:,
2] // 2) + (
4 * (XYZ[:, 0] % 2) + 2 * (XYZ[:, 1] % 2) + XYZ[:, 2] % 2)
XYZ_id_s = (_cube_D_ * _cube_D_ * XYZ[:, 0] + _cube_D_ * XYZ[:, 1] + XYZ[:, 2])
XYZ_np = np.empty((XYZ.shape[0],), dtype=[('ijk', np.uint32, (3,)), ('ijk_id', np.uint32), ('ijk_id_s', np.uint32)])
XYZ_np['ijk'] = XYZ
XYZ_np['ijk_id'] = XYZ_id
XYZ_np['ijk_id_s'] = XYZ_id_s
XYZ_sort_np = np.sort(XYZ_np, order='ijk_id')
XYZ_sort = XYZ_sort_np['ijk']
# xyz_global = np.zeros((XYZ.shape[0], 3))
xyz = np.zeros((XYZ.shape[0], 3))
rgb = np.zeros((XYZ.shape[0], 3))
n = np.zeros((XYZ.shape[0], 3))
q = np.zeros((XYZ.shape[0], 1), dtype=np.bool)
# xyz_global[cubes_gt_np['ijk_id'], :] = cubes_gt_np['xyz_global']
xyz_global = XYZ_sort * cube_D_resol + cube_xyz_min
xyz[cubes_gt_np['ijk_id'], :] = cubes_gt_np['xyz']
rgb[cubes_gt_np['ijk_id'], :] = cubes_gt_np['rgb']
n[cubes_gt_np['ijk_id'], :] = cubes_gt_np['normals']
q[cubes_gt_np['ijk_id'], :] = True
XYZ_big_num = int(XYZ.shape[0] // 8)
xyz_global_new = xyz_global.reshape((XYZ_big_num, 8, 3))
xyz_new = xyz.reshape((XYZ_big_num, 8, 3))
rgb_new = rgb.reshape((XYZ_big_num, 8, 3))
n_new = n.reshape((XYZ_big_num, 8, 3))
q_new = q.reshape((XYZ_big_num, 8, 1))
ijk_id_s_new = XYZ_sort_np['ijk_id_s'].reshape((XYZ_big_num, 8, 1))
if (use_dense):
xyz_global_final = xyz_global_new.reshape((-1, 3))
xyz_final = xyz_new.reshape((-1, 3))
rgb_final = rgb_new.reshape((-1, 3))
n_final = n_new.reshape((-1, 3))
q_final = q_new.reshape((-1, 1))
ijk_id_s_final = ijk_id_s_new.reshape((-1))
else:
cubes_gt_id_big = np.unique(cubes_gt_np['ijk_id'] // 8)
xyz_global_final = xyz_global_new[cubes_gt_id_big, :, :].reshape((-1, 3))
xyz_final = xyz_new[cubes_gt_id_big, :, :].reshape((-1, 3))
rgb_final = rgb_new[cubes_gt_id_big, :, :].reshape((-1, 3))
n_final = n_new[cubes_gt_id_big, :, :].reshape((-1, 3))
q_final = q_new[cubes_gt_id_big, :, :].reshape((-1, 1))
ijk_id_s_final = ijk_id_s_new[cubes_gt_id_big, :, :].reshape((-1))
sort_index = np.argsort(ijk_id_s_final[q_final[:, 0]])
return (xyz_global_final, xyz_final, rgb_final, n_final, q_final, sort_index)
def generateMeta_from_xyz(projection_M,
compress,
cameraTs,
cube_D_resol,
_cube_D_,
pts_xyz
):
'''
:param projection_M:
shape:(N_views, 3, 4)
:param compress
turple: (compress_h, compress_w)
:param cameraTs:
shape:(N_views, 3)
:param cube_D_resol: resolution of each voxel
float
:param _cube_D_: length of cube
int
:param pts_xyz: points of voxel
shape: (N_points, 3)
:return:
meta_vector: the array of each vector represent camera position
shape: (N_views, N_points, 10)
wrapping_vector: the map from each voxel to image
shape: (N_views, N_points, 3)
'''
compress_h, compress_w = compress
N_points = pts_xyz.shape[0]
ones = np.ones((N_points, 1))
cube_xyz_matmul = np.concatenate((pts_xyz, ones), axis=1)[None, :, :, None] # shape:(1, N_points, 4, 1)
projection_M_matmul = projection_M[:, None, :, :] # shape:(N_view, 1, 3, 4)
project_cube_xyz = np.matmul(projection_M_matmul,
cube_xyz_matmul) # shape:(N_view, N_points, 3, 1)
(gx, gy) = count_gx_gy(projection_M, h_length=compress_h, w_length=compress_w)
Z = project_cube_xyz[:, :, 2,
0] # the depth of each cubic points shape:(N_view, N_points,)
alpha_x = (Z * gx[:, None] / cube_D_resol)[:, :, None] # shape:(N_view, N_points, 1)
alpha_y = (Z * gy[:, None] / cube_D_resol)[:, :, None] # shape:(N_view, N_points, 1)
print('the average pixel a cubic can get on x axis', alpha_x.mean())
print('the average pixel a cubic can get on y axis', alpha_y.mean())
tau = project_cube_xyz[:, :, :, 0] / np.linalg.norm(project_cube_xyz[:, :, :, 0], axis=2)[:, :,
None] # shape:(N_view, N_points, 3)
vector_xyz = pts_xyz[None, :, :] - cameraTs[:, None, :] # shape: (N_view, N_points, 3)
theta = vector_xyz / np.linalg.norm(vector_xyz, axis=2)[:, :, None] # shape: (N_view, N_points, 3)
YX = project_cube_xyz[:, :, :2, 0] / project_cube_xyz[:, :, 2, 0][:, :, None] # shape: (N_view, N_points, 2)
H = YX[:, :, 1][:, :, None] / compress_h # shape: (N_view, N_points, 1)
W = YX[:, :, 0][:, :, None] / compress_w # shape: (N_view, N_points, 1)
D = np.zeros(np.shape(H))
X = H - np.floor(H) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
Y = W - np.floor(W) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
meta_vector = np.concatenate((alpha_x, alpha_y, tau, theta, X, Y), axis=2)
wrapping_vector = np.concatenate((W, H, D),
axis=2) # To avoid confusion in notation, let’s note that x corresponds to the width dimension IW, y corresponds to the height dimension IH and z corresponds to the depth dimension ID.
return (meta_vector, wrapping_vector)
def generateMetaVector(
projection_M,
compress,
cube_xyz_min,
cameraTs,
cube_D_resol,
_cube_D_,
):
'''
:param projection_M:
shape:(N_views, 3, 4)
:param compress
turple: (compress_h, compress_w)
:param cube_xyz_min:
shape:(,3)
:param cameraTs:
shape:(N_views, 3)
:param cube_D_resol: resolution of each voxel
float
:param _cube_D_: length of cube
int
:return:
meta_vector: the array of each vector represent camera position
shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 10)
wrapping_vector: the map from each voxel to image
shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 3)
'''
compress_h, compress_w = compress
x = np.arange(0, _cube_D_, 1.0)
y = np.arange(0, _cube_D_, 1.0)
z = np.arange(0, _cube_D_, 1.0)
if not (x.shape[0] == _cube_D_):
print('shape of Meta vector went wrong')
raise TypeError
xx, yy, zz = np.meshgrid(x, y, z)
XYZ = np.array([yy.flatten(), xx.flatten(), zz.flatten()]).reshape(3, _cube_D_, _cube_D_, _cube_D_)
XYZ = np.moveaxis(XYZ, 0, 3)
if not (list(XYZ[0, 1, 3, :]) == [0.0, 1.0, 3.0]):
print('index of Meta vector went wrong')
raise TypeError
cube_xyz = cube_xyz_min[None, None, None, :] + XYZ * cube_D_resol # shape:(_cube_D_, _cube_D_, _cube_D_, 3)
# print('cube_xyz_min[None, None, None, :]', cube_xyz_min[None, None, None, :])
# print('@(*#@!#!@(*$&!@(*')
# print('cube_xyz[2,3,1,:]', cube_xyz[2,3,1,:])
# print('cube_xyz[2,3,2,:]', cube_xyz[2, 3, 2, :])
# print('cube_xyz[2,4,1,:]', cube_xyz[2, 4, 1, :])
ones = np.ones((_cube_D_, _cube_D_, _cube_D_, 1))
cube_xyz_matmul = np.concatenate((cube_xyz, ones), axis=3)[None, :, :, :, :,
None] # shape:(1, _cube_D_, _cube_D_, _cube_D_, 4, 1)
projection_M_matmul = projection_M[:, None, None, None, :, :] # shape:(N_view, 1, 1, 1, 3, 4)
project_cube_xyz = np.matmul(projection_M_matmul,
cube_xyz_matmul) # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 3, 1)
# print('@(*#@!#!@(*$&!@(*')
# print(project_cube_xyz.shape)
# print('project_cube_xyz[2,3,1,:]', project_cube_xyz[44, 2, 3, 1, :])
# print('project_cube_xyz[2,3,2,:]', project_cube_xyz[44, 2, 3, 2, :])
# print('project_cube_xyz[2,4,1,:]', project_cube_xyz[44, 2, 4, 1, :])
(gx, gy) = count_gx_gy(projection_M, h_length=compress_h, w_length=compress_w)
Z = project_cube_xyz[:, :, :, :, 2,
0] # the depth of each cubic points shape:(N_view, _cube_D_, _cube_D_, _cube_D_)
alpha_x = (Z * gx[:, None, None, None] / cube_D_resol)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 1)
alpha_y = (Z * gy[:, None, None, None] / cube_D_resol)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 1)
print('the average pixel a cubic can get on x axis', alpha_x.mean())
print('the average pixel a cubic can get on y axis', alpha_y.mean())
tau = project_cube_xyz[:, :, :, :, :, 0] / np.linalg.norm(project_cube_xyz[:, :, :, :, :, 0], axis=4)[:, :, :, :,
None] # shape:(N_view, _cube_D_, _cube_D_, _cube_D_, 3)
vector_xyz = cube_xyz[None, :, :, :, :] - cameraTs[:, None, None, None,
:] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 3)
theta = vector_xyz / np.linalg.norm(vector_xyz, axis=4)[:, :, :, :,
None] # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 3)
YX = project_cube_xyz[:, :, :, :, :2, 0] / project_cube_xyz[:, :, :, :, 2, 0][:, :, :, :, None]
H = YX[:, :, :, :, 1][:, :, :, :, None] / compress_h # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
W = YX[:, :, :, :, 0][:, :, :, :, None] / compress_w # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
D = np.zeros(np.shape(H))
X = H - np.floor(H) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
Y = W - np.floor(W) # shape: (N_view, _cube_D_, _cube_D_, _cube_D_, 1)
meta_vector = np.concatenate((alpha_x, alpha_y, tau, theta, X, Y), axis=4)
wrapping_vector = np.concatenate((W, H, D),
axis=4) # To avoid confusion in notation, let’s note that x corresponds to the width dimension IW, y corresponds to the height dimension IH and z corresponds to the depth dimension ID.
return (meta_vector, wrapping_vector)
def generate_multiImageMetaVector(
projection_M,
compress,
xyz_3D,
stage_num,
cameraTs,
images_resized,
angles,
Ts,
):
'''
:param projection_M:
shape:(N_views, 3, 4)
:param compress
turple: (compress_h, compress_w)
:param stage_num
int
:param cameraTs:
shape:(N_views, 3)
:param images_resized:resized images
list
:return:
meta_list: list of meta\wrapping vector
len: stage_num
ele:
(vector_image, cameraTs)
'''
meta_list = []
for i in range(stage_num):
(vector_image) = generateImageMetaVector(
projection_M,
compress,
cameraTs,
image_size=images_resized[i].shape[1:3]
)
# direction_transfer = generateDirectionMetaVector(vector_image,
# cameraTs,
# xyz_3D,
# angles,
# Ts,
# )
meta_list.append(vector_image)
# meta_list.append(direction_transfer)
compress = (compress[0] * 2, compress[1] * 2)
return meta_list
def generate_matrix(
angles,
ts
):
(alpha, beta, gamma) = angles
ratio = 180 / 3.14159
alpha /= ratio
beta /= ratio
gamma /= ratio
(t_x, t_y, t_z) = ts
R_z = np.array([[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]])
R_x = np.array([[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, | np.sin(alpha) | numpy.sin |
# SIPEC
# MARKUS MARKS
# Dataloader
import keras
import numpy as np
from imblearn.under_sampling import RandomUnderSampler
from sklearn import preprocessing
from tqdm import tqdm
def create_dataset(dataset, look_back=5, oneD=False):
dataX = []
print("creating recurrency")
for i in tqdm(range(look_back, len(dataset) - look_back)):
if oneD:
a = dataset[i - look_back : i + look_back]
else:
a = dataset[i - look_back : i + look_back, :]
dataX.append(a)
return np.array(dataX)
class Dataloader:
# FIXME: for now just pass
def __init__(
self,
x_train,
y_train,
x_test,
y_test,
look_back=5,
with_dlc=False,
dlc_train=None,
dlc_test=None,
):
self.with_dlc = with_dlc
self.dlc_train = dlc_train
self.dlc_test = dlc_test
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.look_back = look_back
self.label_encoder = None
self.x_train_recurrent = None
self.x_test_recurrent = None
self.y_train_recurrent = None
self.y_test_recurrent = None
def encode_labels(self):
self.label_encoder = preprocessing.LabelEncoder()
self.y_train = self.label_encoder.fit_transform(self.y_train)
self.y_test = self.label_encoder.transform(self.y_test)
# FIXME: nicer all
def encode_label(self, label):
return self.label_encoder.transform(label)
def decode_labels(self, labels):
decoded = self.label_encoder.inverse_transform(labels)
return decoded
def categorize_data(self, num_classes, recurrent=False):
self.y_train = self.y_train.astype(int)
# TODO: parametrize num behaviors
self.y_train = keras.utils.to_categorical(
self.y_train, num_classes=num_classes, dtype="int"
)
self.y_test = self.y_test.astype(int)
self.y_test = keras.utils.to_categorical(
self.y_test, num_classes=num_classes, dtype="int"
)
if recurrent:
self.y_train_recurrent = self.y_train_recurrent.astype(int)
# TODO: parametrize num behaviors
self.y_train_recurrent = keras.utils.to_categorical(
self.y_train_recurrent, num_classes=num_classes, dtype="int"
)
self.y_test_recurrent = self.y_test_recurrent.astype(int)
self.y_test_recurrent = keras.utils.to_categorical(
self.y_test_recurrent, num_classes=num_classes, dtype="int"
)
def normalize_data(self):
# TODO: double check this here
# self.mean = self.x_train[1000:-1000].mean(axis=0)
# self.std = np.std(self.x_train[1000:-1000], axis=0)
self.mean = self.x_train.mean(axis=0)
self.std = np.std(self.x_train, axis=0)
self.x_train = self.x_train - self.mean
self.x_train /= self.std
self.x_test = self.x_test - self.mean
self.x_test /= self.std
if not self.dlc_train is None:
self.mean_dlc = self.dlc_train.mean(axis=0)
self.std_dlc = self.dlc_train.std(axis=0)
self.dlc_train -= self.mean_dlc
self.dlc_test -= self.mean
self.dlc_train /= self.std_dlc
self.dlc_test /= self.std_dlc
def create_dataset(dataset, oneD, look_back=5):
dataX = []
for i in range(look_back, len(dataset) - look_back):
if oneD:
a = dataset[i - look_back : i + look_back]
else:
a = dataset[i - look_back : i + look_back, :]
dataX.append(a)
return np.array(dataX)
def create_recurrent_data(self, oneD=False):
self.x_train_recurrent = create_dataset(self.x_train, self.look_back, oneD=oneD)
self.x_test_recurrent = create_dataset(self.x_test, self.look_back, oneD=oneD)
self.y_train_recurrent = self.y_train[self.look_back : -self.look_back]
self.y_test_recurrent = self.y_test[self.look_back : -self.look_back]
# also shorten normal data so all same length
self.x_train = self.x_train[self.look_back : -self.look_back]
self.x_test = self.x_test[self.look_back : -self.look_back]
self.y_train = self.y_train[self.look_back : -self.look_back]
self.y_test = self.y_test[self.look_back : -self.look_back]
def create_recurrent_data_dlc(self):
self.y_train_recurrent = self.y_train[self.look_back : -self.look_back]
self.y_test_recurrent = self.y_test[self.look_back : -self.look_back]
self.dlc_train_recurrent = create_dataset(self.dlc_train, self.look_back)
self.dlc_test_recurrent = create_dataset(self.dlc_test, self.look_back)
# also shorten normal data so all same length
self.dlc_train = self.dlc_train[self.look_back : -self.look_back]
self.dlc_test = self.dlc_test[self.look_back : -self.look_back]
self.y_train = self.y_train[self.look_back : -self.look_back]
self.y_test = self.y_test[self.look_back : -self.look_back]
# TODO: redo all like this, i.e. gettters instead of changing data
def expand_dims(self):
self.x_train = np.expand_dims(self.x_train, axis=-1)
self.x_test = np.expand_dims(self.x_test, axis=-1)
if self.x_test_recurrent is not None:
self.x_train_recurrent = np.expand_dims(self.x_train_recurrent, axis=-1)
self.x_test_recurrent = np.expand_dims(self.x_test_recurrent, axis=-1)
def create_flattened_data(self):
if self.with_dlc:
_shape = self.dlc_train.shape
self.dlc_train_flat = self.dlc_train.reshape(
(_shape[0], _shape[1] * _shape[2])
)
_shape = self.dlc_test.shape
self.dlc_test_flat = self.dlc_test.reshape(
(_shape[0], _shape[1] * _shape[2])
)
_shape = self.dlc_train_recurrent.shape
self.dlc_train_recurrent_flat = self.dlc_train_recurrent.reshape(
(_shape[0], _shape[1] * _shape[2] * _shape[3])
)
_shape = self.dlc_test_recurrent.shape
self.dlc_test_recurrent_flat = self.dlc_test_recurrent.reshape(
(_shape[0], _shape[1] * _shape[2] * _shape[3])
)
def decimate_labels(self, percentage, balanced=False):
"""
decimate labels to a given percentate
percentage in [0,1]
:return:
"""
if balanced:
# TODO: do w class weights and probability in choice fcn
raise NotImplementedError
if self.x_train_recurrent is not None:
num_labels = int(len(self.x_train_recurrent) * percentage)
indices = np.arange(0, len(self.x_train_recurrent))
random_idxs = np.random.choice(indices, size=num_labels, replace=False)
self.x_train = self.x_train[random_idxs]
self.y_train = self.y_train[random_idxs]
self.x_train_recurrent = self.x_train_recurrent[random_idxs]
self.y_train_recurrent = self.y_train_recurrent[random_idxs]
else:
num_labels = int(len(self.x_train) * percentage)
indices = np.arange(0, len(self.x_train))
random_idxs = np.random.choice(indices, size=num_labels, replace=False)
self.x_train = self.x_train[random_idxs]
self.y_train = self.y_train[random_idxs]
# old
def reduce_labels(self, behavior, num_labels):
idx_behavior = self.y_train == behavior
idx_behavior = | np.asarray(idx_behavior) | numpy.asarray |
from __future__ import division
import unittest
import shutil
import os
import time
import warnings
import copy
import pytest
import netCDF4
import numpy as np
from numpy.testing import assert_allclose
from salem.tests import (requires_travis, requires_geopandas, requires_dask,
requires_matplotlib, requires_cartopy)
from salem import utils, transform_geopandas, GeoTiff, read_shapefile, sio
from salem import read_shapefile_to_grid
from salem.utils import get_demo_file
current_dir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(current_dir, 'tmp')
def is_cartopy_rotated_working():
from salem.gis import proj_to_cartopy
from cartopy.crs import PlateCarree
import pyproj
cp = pyproj.Proj('+ellps=WGS84 +proj=ob_tran +o_proj=latlon '
'+to_meter=0.0174532925199433 +o_lon_p=0.0 +o_lat_p=80.5 '
'+lon_0=357.5 +no_defs')
cp = proj_to_cartopy(cp)
out = PlateCarree().transform_points(cp, np.array([-20]), np.array([-9]))
if not (np.allclose(out[0, 0], -22.243473889042903, atol=1e-5) and
np.allclose(out[0, 1], -0.06328365194179102, atol=1e-5)):
# Cartopy also had issues
return False
return True
@requires_geopandas
def create_dummy_shp(fname):
import shapely.geometry as shpg
import geopandas as gpd
e_line = shpg.LinearRing([(1.5, 1), (2., 1.5), (1.5, 2.), (1, 1.5)])
i_line = shpg.LinearRing([(1.4, 1.4), (1.6, 1.4), (1.6, 1.6), (1.4, 1.6)])
p1 = shpg.Polygon(e_line, [i_line])
p2 = shpg.Polygon([(2.5, 1.3), (3., 1.8), (2.5, 2.3), (2, 1.8)])
p3 = shpg.Point(0.5, 0.5)
p4 = shpg.Point(1, 1)
df = gpd.GeoDataFrame()
df['name'] = ['Polygon', 'Line']
df['geometry'] = gpd.GeoSeries([p1, p2])
of = os.path.join(testdir, fname)
df.to_file(of)
return of
def delete_test_dir():
if os.path.exists(testdir):
shutil.rmtree(testdir)
class TestUtils(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_travis
def test_empty_cache(self):
utils.empty_cache()
def test_hash_cache_dir(self):
h1 = utils._hash_cache_dir()
h2 = utils._hash_cache_dir()
self.assertEqual(h1, h2)
def test_demofiles(self):
self.assertTrue(os.path.exists(utils.get_demo_file('dem_wgs84.nc')))
self.assertTrue(utils.get_demo_file('dummy') is None)
def test_read_colormap(self):
cl = utils.read_colormap('topo') * 256
assert_allclose(cl[4, :], (177, 242, 196))
assert_allclose(cl[-1, :], (235, 233, 235))
cl = utils.read_colormap('dem') * 256
assert_allclose(cl[4, :], (153,100, 43))
assert_allclose(cl[-1, :], (255,255,255))
def test_reduce(self):
arr = [[1, 1, 2, 2], [1, 1, 2, 2]]
assert_allclose(utils.reduce(arr, 1), arr)
assert_allclose(utils.reduce(arr, 2), [[1, 2]])
assert_allclose(utils.reduce(arr, 2, how=np.sum), [[4, 8]])
arr = np.stack([arr, arr, arr])
assert_allclose(arr.shape, (3, 2, 4))
assert_allclose(utils.reduce(arr, 1), arr)
assert_allclose(utils.reduce(arr, 2), [[[1, 2]], [[1, 2]], [[1, 2]]])
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[4, 8]], [[4, 8]], [[4, 8]]])
arr[0, ...] = 0
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[0, 0]], [[4, 8]], [[4, 8]]])
arr[1, ...] = 1
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[0, 0]], [[4, 4]], [[4, 8]]])
class TestIO(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_geopandas
def test_cache_working(self):
f1 = 'f1.shp'
f1 = create_dummy_shp(f1)
cf1 = utils.cached_shapefile_path(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1, cached=True)
self.assertTrue(os.path.exists(cf1))
# nested calls
self.assertTrue(cf1 == utils.cached_shapefile_path(cf1))
# wait a bit
time.sleep(0.1)
f1 = create_dummy_shp(f1)
cf2 = utils.cached_shapefile_path(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1, cached=True)
self.assertFalse(os.path.exists(cf1))
self.assertTrue(os.path.exists(cf2))
df = read_shapefile(f1, cached=True)
np.testing.assert_allclose(df.min_x, [1., 2.])
| np.testing.assert_allclose(df.max_x, [2., 3.]) | numpy.testing.assert_allclose |
from coopihc.base.StateElement import StateElement
from coopihc.base.utils import (
StateNotContainedError,
StateNotContainedWarning,
)
from coopihc.base.elements import integer_set, box_space
import numpy
import pytest
import json
import copy
from tabulate import tabulate
def test_array_init_integer():
x = StateElement(2, integer_set(3))
assert hasattr(x, "space")
assert x.shape == ()
assert x == 2
def test_array_init_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
assert hasattr(x, "space")
assert x.shape == (2, 2)
assert (x == numpy.zeros((2, 2))).all()
def test_array_init():
test_array_init_integer()
test_array_init_numeric()
def test_array_init_error_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(4, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="error")
def test_array_init_error_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
with pytest.raises(StateNotContainedError):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
def test_array_init_error():
test_array_init_error_integer()
test_array_init_error_numeric()
def test_array_init_warning_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(4, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="warning")
def test_array_init_warning_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="warning"
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
| numpy.array([[0, 0], [-2, 0]]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 11:33:01 2019
@author: nikos
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import h5py
from keras.preprocessing import image# for RGB images
import os
#import imageio
from sklearn.model_selection import train_test_split
import cv2# cv2.imread() for grayscale images
import matplotlib.pyplot as plt
from mpl_toolkits import axes_grid1
def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):
"""
Add a vertical color bar to an image plot.
https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
"""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
#%% load the images
img_folder = './data/BBBC010_v2_images'
msk_folder = './data/BBBC010_v1_foreground'
target_height = 400
target_width = 400
Nimages = 100#100 images, each image has 2 channels
# load the filenames of all images
# Note: delete the __MACOSX folder in the img_folder first
img_filenames = np.array(sorted(os.listdir(img_folder)))#sort to alphabetical order
assert len(img_filenames)==Nimages*2#2 channels
wells = [f.split('_')[6] for f in img_filenames]
wells = np.sort(np.unique(wells))#e.g. A01, A02, ..., E04
channels = [1,2]
#%%load the images
#images, 2 channels
X = np.zeros(shape=(Nimages,target_height,target_width,2),dtype='float32')
Y = np.zeros(shape=(Nimages,target_height,target_width,1),dtype='float32')
i=0
for w in wells:
print('loading image ',i+1)
for c in channels:
key = w+'_w'+str(c)
img_file = None
for f in img_filenames:
if key in f:
img_file=f
break;
print(img_file)
#cv2 is better for grayscale images, use
#load the image
img = cv2.imread(img_folder+'/'+img_file,-1)
#resize
img=cv2.resize(img,(target_width,target_height))
#normalize to 0-1
img=img/img.max()
X[i,:,:,c-1]=img
print('loading mask')
img = cv2.imread(msk_folder+'/'+w+'_binary.png',cv2.IMREAD_GRAYSCALE)
#resize
img=cv2.resize(img,(target_width,target_height))
#normalize to 0-1
img=img/img.max()
#create binary image from [0,1] to {0,1}, using 0.5 as threshold
img[img<0.5]=0
img[img>=0.5]=1
Y[i,:,:,0]=img
i=i+1
print()#add a blank line for readability
#double-check that the masks are binary
assert np.array_equal(np.unique(Y), [0,1])
#%% plot as a sanity check
#plot channel 0
img=0
fig, axes = plt.subplots(10,10)
for i in range(10):
for j in range(10):
axes[i,j].imshow(X[img,:,:,0],cmap='gray')
axes[i,j].set_title(wells[img])
img=img+1
#plot channel 1
img=0
fig, axes = plt.subplots(10,10)
for i in range(10):
for j in range(10):
axes[i,j].imshow(X[img,:,:,1],cmap='gray')
axes[i,j].set_title(wells[img])
img=img+1
#%%
i=4
plt.figure(figsize=(3*6,6))
plt.subplot(1,3,1)
im=plt.imshow(X[i,:,:,0],cmap='gray')
add_colorbar(im)
plt.title(wells[i]+' w1')
plt.subplot(1,3,2)
im=plt.imshow(X[i,:,:,1],cmap='gray')
add_colorbar(im)
plt.title(wells[i]+' w2')
plt.subplot(1,3,3)
im=plt.imshow(Y[i,:,:,0],cmap='gray')
add_colorbar(im)
plt.title(wells[i]+' mask')
plt.savefig('example_image.png',dpi=100,bbox_inches='tight')
#%% split into train, validation and test sets
ix = np.arange(len(wells))
ix_tr, ix_val_ts = train_test_split(ix,train_size=60, random_state=0)
ix_val, ix_ts = train_test_split(ix_val_ts,train_size=20, random_state=0)
#sanity check, no overlap between train, validation and test sets
assert len(np.intersect1d(ix_tr,ix_val))==0
assert len(np.intersect1d(ix_tr,ix_ts))==0
assert len(np.intersect1d(ix_val,ix_ts))==0
X_tr = X[ix_tr,:]
Y_tr = Y[ix_tr,:]
X_val = X[ix_val,:]
Y_val = Y[ix_val,:]
X_ts = X[ix_ts,:]
Y_ts = Y[ix_ts,:]
fnames_tr = wells[ix_tr].tolist()
fnames_val = wells[ix_val].tolist()
fnames_ts = wells[ix_ts].tolist()
fname_split = ['train']*len(fnames_tr)+['validation']*len(fnames_val)+['test']*len(fnames_ts)
df=pd.DataFrame({'well':fnames_tr+fnames_val+fnames_ts,
'split':fname_split})
#save to disk
df.to_csv('./data/training_validation_test_splits.csv',index=False)
| np.save('./Data/X_tr.npy',X_tr) | numpy.save |
# -*- coding: utf-8 -*-
# _mapCtoD.py
# Module providing the mapCtoD function
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the mapCtoD() function
"""
from __future__ import division, print_function
import collections
from warnings import warn
import numpy as np
from numpy.linalg import cond
from numpy.random import rand
from scipy.linalg import expm, inv, norm
from scipy.signal import cont2discrete, lti, ss2zpk
from ._constants import eps
from ._evalMixedTF import evalMixedTF
from ._padb import padb
from ._padr import padr
from ._utils import _getABCD
def mapCtoD(sys_c, t=(0, 1), f0=0.):
"""Map a MIMO continuous-time to an equiv. SIMO discrete-time system.
The criterion for equivalence is that the sampled pulse response
of the CT system must be identical to the impulse response of the DT system.
i.e. If ``yc`` is the output of the CT system with an input ``vc`` taken
from a set of DACs fed with a single DT input ``v``, then ``y``, the output
of the equivalent DT system with input ``v`` satisfies:
``y(n) = yc(n-)`` for integer ``n``. The DACs are characterized by
rectangular impulse responses with edge times specified in the t list.
**Input:**
sys_c : object
the LTI description of the CT system, which can be:
* the ABCD matrix,
* a list-like containing the A, B, C, D matrices,
* a list of zpk tuples (internally converted to SS representation).
* a list of LTI objects
t : array_like
The edge times of the DAC pulse used to make CT waveforms
from DT inputs. Each row corresponds to one of the system
inputs; [-1 -1] denotes a CT input. The default is [0 1],
for all inputs except the first.
f0 : float
The (normalized) frequency at which the Gp filters' gains are
to be set to unity. Default 0 (DC).
**Output:**
sys : tuple
the LTI description for the DT equivalent, in A, B, C, D
representation.
Gp : list of lists
the mixed CT/DT prefilters which form the samples
fed to each state for the CT inputs.
**Example:**
Map the standard second order CT modulator shown below to its CT
equivalent and verify that its NTF is :math:`(1-z^{-1})^2`.
.. image:: ../doc/_static/mapCtoD.png
:align: center
:alt: mapCtoD block diagram
It can be done as follows::
from __future__ import print_function
import numpy as np
from scipy.signal import lti
from deltasigma import *
LFc = lti([[0, 0], [1, 0]], [[1, -1], [0, -1.5]], [[0, 1]], [[0, 0]])
tdac = [0, 1]
LF, Gp = mapCtoD(LFc, tdac)
LF = lti(*LF)
ABCD = np.vstack((
np.hstack((LF.A, LF.B)),
np.hstack((LF.C, LF.D))
))
NTF, STF = calculateTF(ABCD)
print("NTF:") # after rounding to a 1e-6 resolution
print("Zeros:", np.real_if_close(np.round(NTF.zeros, 6)))
print("Poles:", np.real_if_close(np.round(NTF.poles, 6)))
Prints::
Zeros: [ 1. 1.]
Poles: [ 0. 0.]
Equivalent to::
(z -1)^2
NTF = ----------
z^2
.. seealso:: <NAME> and <NAME>, "Delta-sigma modulators employing \
continuous-time circuitry," IEEE Transactions on Circuits and Systems I, \
vol. 43, no. 4, pp. 324-332, April 1996.
"""
# You need to have A, B, C, D specification of the system
Ac, Bc, Cc, Dc = _getABCD(sys_c)
ni = Bc.shape[1]
# Sanitize t
if hasattr(t, 'tolist'):
t = t.tolist()
if (type(t) == tuple or type(t) == list) and np.isscalar(t[0]):
t = [t] # we got a simple list, like the default value
if not (type(t) == tuple or type(t) == list) and \
not (type(t[0]) == tuple or type(t[0]) == list):
raise ValueError("The t argument has an unrecognized shape")
# back to business
t = np.array(t)
if t.shape == (1, 2) and ni > 1:
t = np.vstack((np.array([[-1, -1]]), np.dot(np.ones((ni - 1, 1)), t)))
if t.shape != (ni, 2):
raise ValueError('The t argument has the wrong dimensions.')
di = np.ones(ni).astype(bool)
for i in range(ni):
if t[i, 0] == -1 and t[i, 1] == -1:
di[i] = False
# c2d assumes t1=0, t2=1.
# Also c2d often complains about poor scaling and can even produce
# incorrect results.
A, B, C, D, _ = cont2discrete((Ac, Bc, Cc, Dc), 1, method='zoh')
Bc1 = Bc[:, ~di]
# Examine the discrete-time inputs to see how big the
# augmented matrices need to be.
B1 = B[:, ~di]
D1 = D[:, ~di]
n = A.shape[0]
t2 = np.ceil(t[di, 1]).astype(np.int_)
esn = (t2 == t[di, 1]) and (D[0, di] != 0).T # extra states needed?
npp = n + np.max(t2 - 1 + 1*esn)
# Augment A to npp x npp, B to np x 1, C to 1 x np.
Ap = padb(padr(A, npp), npp)
for i in range(n + 1, npp):
Ap[i, i - 1] = 1
Bp = np.zeros((npp, 1))
if npp > n:
Bp[n, 0] = 1
Cp = padr(C, npp)
Dp = np.zeros((1, 1))
# Add in the contributions from each DAC
for i in np.flatnonzero(di):
t1 = t[i, 0]
t2 = t[i, 1]
B2 = B[:, i]
D2 = D[:, i]
if t1 == 0 and t2 == 1 and D2 == 0: # No fancy stuff necessary
Bp = Bp + padb(B2, npp)
else:
n1 = np.floor(t1)
n2 = np.ceil(t2) - n1 - 1
t1 = t1 - n1
t2 = t2 - n2 - n1
if t2 == 1 and D2 != 0:
n2 = n2 + 1
extraStateNeeded = 1
else:
extraStateNeeded = 0
nt = n + n1 + n2
if n2 > 0:
if t2 == 1:
Ap[:n, nt - n2:nt] = Ap[:n, nt - n2:nt] + np.tile(B2, (1, n2))
else:
Ap[:n, nt - n2:nt - 1] = Ap[:n, nt - n2:nt - 1] + np.tile(B2, (1, n2 - 1))
Ap[:n, (nt-1)] = Ap[:n, (nt-1)] + _B2formula(Ac, 0, t2, B2)
if n2 > 0: # pulse extends to the next period
Btmp = _B2formula(Ac, t1, 1, B2)
else: # pulse ends in this period
Btmp = _B2formula(Ac, t1, t2, B2)
if n1 > 0:
Ap[:n, n + n1 - 1] = Ap[:n, n + n1 - 1] + Btmp
else:
Bp = Bp + padb(Btmp, npp)
if n2 > 0:
Cp = Cp + padr(np.hstack((np.zeros((D2.shape[0], n + n1)), D2*np.ones((1, n2)))), npp)
sys = (Ap, Bp, Cp, Dp)
if np.any(~di):
# Compute the prefilters and add in the CT feed-ins.
# Gp = inv(sI - Ac)*(zI - A)/z*Bc1
n, m = Bc1.shape
Gp = np.empty_like(np.zeros((n, m)), dtype=object)
# !!Make this like stf: an array of zpk objects
ztf = np.empty_like(Bc1, dtype=object)
# Compute the z-domain portions of the filters
ABc1 = np.dot(A, Bc1)
for h in range(m):
for i in range(n):
if Bc1[i, h] == 0:
ztf[i, h] = (np.array([]),
np.array([0.]),
-ABc1[i, h]) # dt=1
else:
ztf[i, h] = (np.atleast_1d(ABc1[i, h]/Bc1[i, h]),
np.array([0.]),
Bc1[i, h]) # dt = 1
# Compute the s-domain portions of each of the filters
stf = np.empty_like(np.zeros((n, n)), dtype=object) # stf[out, in] = zpk
for oi in range(n):
for ii in range(n):
# Doesn't do pole-zero cancellation
stf[oi, ii] = ss2zpk(Ac, np.eye(n), np.eye(n)[oi, :],
np.zeros((1, n)), input=ii)
# scipy as of v 0.13 has no support for LTI MIMO systems
# only 'MISO', therefore you can't write:
# stf = ss2zpk(Ac, eye(n), eye(n), np.zeros(n, n)))
for h in range(m):
for i in range(n):
# k = 1 unneded, see below
for j in range(n):
# check the k values for a non-zero term
if stf[i, j][2] != 0 and ztf[j, h][2] != 0:
if Gp[i, h] is None:
Gp[i, h] = {}
Gp[i, h].update({'Hs':[list(stf[i, j])]})
Gp[i, h].update({'Hz':[list(ztf[j, h])]})
else:
Gp[i, h].update({'Hs':Gp[i, h]['Hs'] + [list(stf[i, j])]})
Gp[i, h].update({'Hz':Gp[i, h]['Hz'] + [list(ztf[j, h])]})
# the MATLAB-like cell code for the above statements would have
# been:
#Gp[i, h](k).Hs = stf[i, j]
#Gp[i, h](k).Hz = ztf[j, h]
#k = k + 1
if f0 != 0: # Need to correct the gain terms calculated by c2d
# B1 = gains of Gp @f0;
for h in range(m):
for i in range(n):
B1ih = np.real_if_close(evalMixedTF(Gp[i, h], f0))
# abs() used because ss() whines if B has complex entries...
# This is clearly incorrect.
# I've fudged the complex stuff by including a sign....
B1[i, h] = np.abs(B1ih) * np.sign(np.real(B1ih))
if np.abs(B1[i, h]) < 1e-09:
B1[i, h] = 1e-09 # This prevents NaN in "line 174" below
# Adjust the gains of the pre-filters
for h in range(m):
for i in range(n):
for j in range(max(len(Gp[i, h]['Hs']), len(Gp[i, h]['Hz']))):
# The next is "line 174"
Gp[i, h]['Hs'][j][2] = Gp[i, h]['Hs'][j][2]/B1[i, h]
sys = (sys[0], # Ap
np.hstack((padb(B1, npp), sys[1])), # new B
sys[2], # Cp
np.hstack((D1, sys[3]))) # new D
return sys, Gp
def _B2formula(Ac, t1, t2, B2):
if t1 == 0 and t2 == 0:
term = B2
return term
n = Ac.shape[0]
tmp = np.eye(n) - expm(-Ac)
if cond(tmp) < 1000000.0:
term = np.dot(((expm(-Ac*t1) - expm(-Ac*t2))*inv(tmp)), B2)
return term
# Numerical trouble. Perturb slightly and check the result
ntry = 0
k = np.sqrt(eps)
Ac0 = Ac
while ntry < 2:
Ac = Ac0 + k*rand(n,n)
tmp = np.eye(n) - expm(-Ac)
if | cond(tmp) | numpy.linalg.cond |
#
# verr_mc.py
# estimating velocity error using MC sampling
#
# History
# 5 November 2018 - <NAME>
# add option to use new DM from Matsunaga in Nov. 2018
# 15 May 2018 - <NAME>
# combine DR2 and Genovali+Melnik data.
# 22 November 2017 - written <NAME>
# use only Genovali+Melnik data
#
#
import pyfits
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
from scipy import stats
from galpy.util import bovy_coords
# flag
# GaiaData = 'DR1'
# GaiaData = 'DR2'
GaiaData = 'M18DsxDR2'
if GaiaData == 'DR1':
# read the data with velocity info.
infile='/Users/dkawata/work/obs/Cepheids/Genovali14/G14T34+TGAS+Melnik15.fits'
star_hdus=pyfits.open(infile)
star=star_hdus[1].data
star_hdus.close()
# select stars with HRV info
sindx= | np.where(star['r_HRV']>0) | numpy.where |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = np.array([back])
liststrgvarb = []
if iele != 'none':
for l in gmod.indxpopl:
if iele == 'full':
listiele = np.arange(gmod.maxmpara.numbelem)
else:
listiele = [iele]
for k in listiele:
liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))
if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':
for l in indxpopltemp:
liststrgvarb.append(strgvarb + 'pop%d' % l)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':
for e in indxisfrtemp:
liststrgvarb.append(strgvarb + 'isf%d' % e)
if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':
for i in indxenertemp:
for m in indxevtttemp:
liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))
if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))
if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
liststrgvarb.append(strgvarb + 'back%04d' % c)
if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'en%02d' % i)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':
liststrgvarb.append(strgvarb)
if gdat.booldiagmode:
for strgvarb in liststrgvarb:
if liststrgvarb.count(strgvarb) != 1:
print('liststrgvarb')
print(liststrgvarb)
print('popl')
print(popl)
print('ener')
print(ener)
print('evtt')
print(evtt)
print('back')
print(back)
print('isfr')
print(isfr)
print('iele')
print(iele)
raise Exception('')
return liststrgvarb
def setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \
popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \
boolinvr=False, \
strgmodl=None, strgstat=None, \
):
'''
Set up variable values across all models (true and fitting) as well as all populations, energy bins,
event bins, background components, and Sersic components
'''
# determine the list of models
if strgmodl is None:
if gdat.typedata == 'mock':
liststrgmodl = ['true', 'fitt', 'plot']
else:
liststrgmodl = ['fitt', 'plot']
else:
if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':
liststrgmodl = [strgmodl]
else:
liststrgmodl = ['fitt', 'plot']
print('liststrgmodl')
print(liststrgmodl)
for strgmodl in liststrgmodl:
if strgmodl == 'plot':
gmod = gdat.fitt
gmodoutp = gdat
else:
gmod = getattr(gdat, strgmodl)
gmodoutp = gmod
# get the list of names of the variable
liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')
if iele != 'none':
liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)
else:
liststrgvarb = liststrgvarbnone
# set the values of each variable in the list
for strgvarb in liststrgvarb:
if minm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)
if maxm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)
if mean is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)
if stdv is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)
if valu is not None:
if strgstat is None:
print('strgvarb')
print(strgvarb)
print('strgmodl')
print(strgmodl)
print('valu')
print(valu)
print('')
setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)
elif strgstat == 'this':
setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)
if scal is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)
if lablroot is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)
if lablunit is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)
if cmap is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)
setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)
# create limt, bins, mean, and delt
if minm is not None and maxm is not None or mean is not None and stdv is not None:
# determine minima and maxima for Gaussian or log-Gaussian distributed parameters
if mean is not None:
minm = mean - gdat.numbstdvgaus * stdv
maxm = mean + gdat.numbstdvgaus * stdv
# uniformly-distributed
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsunif = np.linspace(minm, maxm, numbbins + 1)
if scal == 'logt' or scal == 'powr':
binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsunif = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numbbins + 1)
if boolinvr:
binsunif = binsunif[::-1]
meanparaunif = (binsunif[1:] + binsunif[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanpara = meanparaunif
bins = binsunif
minmunif = minm
maxmunif = maxm
if scal == 'logt' or scal == 'powr':
meanpara = 10**meanparaunif
bins = 10**binsunif
minmunif = np.log10(minm)
maxmunif = np.log10(maxm)
if scal == 'asnh':
meanpara = np.sinh(meanparaunif)
bins = np.sinh(binsunif)
minmunif = np.arcsinh(minm)
maxmunif = np.arcsinh(maxm)
delt = np.diff(bins)
limt = np.array([minm, maxm])
# 'self' is not yet defined
if scal == 'asnh' or scal == 'logt' or scal == 'powr':
listvalutickmajr, listlabltickmajr, listvalutickminr, listlabltickminr = tdpy.retr_valulabltick(minm, maxm, scal)
setattr(gmodoutp.labltickmajrpara, strgvarb, listlabltickmajr)
setattr(gmodoutp.valutickmajrpara, strgvarb, listvalutickmajr)
setattr(gmodoutp.labltickminrpara, strgvarb, listlabltickminr)
setattr(gmodoutp.valutickminrpara, strgvarb, listvalutickminr)
#labltick = np.empty(gdat.numbtickcbar, dtype=object)
#for k in range(gdat.numbtickcbar):
# if scal == 'asnh':
# valutick[k] = np.sinh(tickunif[k])
# if scal == 'logt' or scal == 'powr':
# valutick[k] = 10**(tickunif[k])
# # avoid very small, but nonzero central values in the residual count color maps
# if strgcbar == 'cntpresi' and np.fabs(valutick[k]) < 1e-5:
# valutick[k] = 0.
# if strgcbar == 'cntpdata' and np.amax(valutick) > 1e3:
# labltick[k] = '%d' % valutick[k]
# else:
# labltick[k] = '%.3g' % valutick[k]
setattr(gmodoutp.limtpara, strgvarb, limt)
setattr(gmodoutp.binspara, strgvarb, bins)
setattr(gmodoutp.meanpara, strgvarb, meanpara)
setattr(gmodoutp.deltpara, strgvarb, delt)
def retr_ticklabltemp(gdat, strgcbar):
minm = getattr(gdat.minmpara, strgcbar)
maxm = getattr(gdat.maxmpara, strgcbar)
scal = getattr(gdat.scalpara, strgcbar)
numb = gdat.numbtickcbar - 1
retr_axis(gdat, strgcbar, numb=numb)
minmscal = minm
if scal == 'asnh':
minmscal = np.arcsinh(minmscal)
if scal == 'logt':
minmscal = np.log10(minmscal)
maxmscal = maxm
if scal == 'asnh':
maxmscal = np.arcsinh(maxmscal)
if scal == 'logt':
maxmscal = np.log10(maxmscal)
tickscal = np.linspace(minmscal, maxmscal, gdat.numbtickcbar)
labl = np.empty(gdat.numbtickcbar, dtype=object)
tick = np.copy(tickscal)
for k in range(gdat.numbtickcbar):
if scal == 'asnh':
tick[k] = np.sinh(tickscal[k])
elif scal == 'logt':
tick[k] = 10**(tickscal[k])
# avoid very small, but nonzero central values in the residual count color maps
if strgcbar == 'cntpresi' and np.fabs(tick[k]) < 1e-5:
tick[k] = 0.
if strgcbar == 'cntpdata' and np.amax(tick) > 1e3:
labl[k] = '%d' % tick[k]
else:
labl[k] = '%.3g' % tick[k]
setattr(gdat.tickpara, strgcbar, tick)
def retr_axistemp(gdat, strgvarb, strgmodl=None, boolinvr=False):
if strgmodl is None:
listgdattemp = [gdat]
for strgmodl in gdat.liststrgmodl:
listgdattemp.append(getattr(gdat, strgmodl))
elif strgmodl == 'fitt' or strgmodl == 'true':
listgdattemp = [getattr(gdat, strgmodl)]
elif strgmodl == 'allm':
listgdattemp = []
for strgmodl in gdat.liststrgmodl:
listgdattemp = getattr(gdat, strgmodl)
for gdattemp in listgdattemp:
minm = getattr(gdattemp.minmpara, strgvarb)
maxm = getattr(gdattemp.maxmpara, strgvarb)
numb = getattr(gdattemp.numbbinspara, strgvarb)
scal = getattr(gdattemp.scalpara, strgvarb)
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsscal = np.linspace(minm, maxm, numb + 1)
if scal == 'logt':
print('minm')
print(minm)
print('maxm')
print(maxm)
print('strgvarb')
print(strgvarb)
binsscal = np.linspace(np.log10(minm), np.log10(maxm), numb + 1)
print('')
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsscal = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numb + 1)
if boolinvr:
binsscal = binsscal[::-1]
meanvarbscal = (binsscal[1:] + binsscal[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanvarb = meanvarbscal
bins = binsscal
if scal == 'logt':
meanvarb = 10**meanvarbscal
bins = 10**binsscal
if scal == 'asnh':
meanvarb = np.sinh(meanvarbscal)
bins = np.sinh(binsscal)
delt = np.diff(bins)
limt = np.array([np.amin(bins), np.amax(bins)])
setattr(gdattemp.limtpara, strgvarb, limt)
setattr(gdattemp.binspara, strgvarb, bins)
setattr(gdattemp.meanpara, strgvarb, meanvarb)
setattr(gdattemp.deltpara, strgvarb, delt)
def setp_varbcore(gdat, strgmodl, gdattemp, strgvarbtemp, valu):
# check if the variable is defined by the user
try:
valutemp = getattr(gdattemp, strgvarbtemp)
if valutemp is None:
raise
if gdat.typeverb > 0:
print('Received custom value for %s, %s: %s' % (strgvarbtemp, strgmodl, valutemp))
# if not defined or defined as None, define it
except:
setattr(gdattemp, strgvarbtemp, valu)
def intp_sinc(gdat, lgal, bgal):
intpsinc = 4. * gdat.numbsidepsfn**2 * np.sum(gdat.temppsfn * sinc(gdat.numbsidepsfn * (gdat.gridpsfnlgal + lgal) - gdat.gridpsfnlgal) * \
sinc(gdat.numbsidepsfn * (gdat.gridpsfnbgal + bgal) - gdat.gridpsfnbgal))
return intpsinc
def retr_fluxbrgt(gdat, lgal, bgal, flux):
if lgal.size == 0:
fluxbrgt = np.array([0.])
fluxbrgtassc = np.array([0.])
else:
indxbrgt = np.argmax(flux)
fluxbrgt = flux[indxbrgt]
return fluxbrgt, fluxbrgtassc
def init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot):
figrsize = (gdat.sizeimag, gdat.sizeimag)
figr, axis = plt.subplots(figsize=figrsize)
nameplot = strgplot
if gdat.numbener > 1:
nameplot += 'en%02d' % gdat.indxenerincl[indxenerplot]
if gdat.numbener > 1:
if indxevttplot == -1:
nameplot += 'evtA'
else:
nameplot += 'evt%d' % gdat.indxevttincl[indxevttplot]
if gdat.fitt.numbpopl > 1:
if indxpoplplot == -1:
nameplot += 'popA'
else:
nameplot += 'pop%d' % indxpoplplot
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
print('gdat.fitt.labltotlpara.lgalpop0')
print(gdat.fitt.labltotlpara.lgalpop0)
print('gdat.fitt.labltotlpara.bgalpop0')
print(gdat.fitt.labltotlpara.bgalpop0)
axis.set_xlabel(gdat.fitt.labltotlpara.lgalpop0)
axis.set_ylabel(gdat.fitt.labltotlpara.bgalpop0)
titl = ''
if indxenerplot is not None and gdat.numbener > 1 and strgplot.endswith('cnts'):
titl = gdat.strgener[indxenerplot]
if indxevttplot is not None and gdat.numbevtt > 1 and strgplot.endswith('cnts'):
titl += ' ' + gdat.strgevtt[indxevttplot]
axis.set_title(titl)
return figr, axis, path
def draw_frambndr(gdat, axis):
outr = max(gdat.frambndrmodl, gdat.frambndrdata)
axis.set_xlim([-outr, outr])
axis.set_ylim([-outr, outr])
innr = min(gdat.frambndrmodl, gdat.frambndrdata)
axis.axvline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axvline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
def retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot=None, indxevttplot=-1, booltdim=False, imag=None):
draw_frambndr(gdat, axis)
# take the relevant energy and PSF bins
if indxenerplot is not None:
if indxevttplot == -1:
maps = np.sum(maps[indxenerplot, ...], axis=1)
else:
maps = maps[indxenerplot, :, indxevttplot]
# project the map to 2D
if gdat.typepixl == 'heal':
maps = tdpy.retr_cart(maps, indxpixlrofi=gdat.indxpixlrofi, numbsideinpt=gdat.numbsideheal, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata)
if gdat.typepixl == 'cart':
shap = [gdat.numbsidecart] + list(maps.shape)
shap[1] = gdat.numbsidecart
shapflat = list(maps.shape)
shapflat[0] = gdat.numbpixlfull
mapstemp = np.zeros(shapflat)
if maps.size == gdat.indxpixlrofi.size:
mapstemp[gdat.indxpixlrofi, ...] = maps
else:
mapstemp[:, ...] = maps
maps = mapstemp.reshape(shap).swapaxes(0, 1)
# temp -- this is needed to bring the Fermi-LAT map to the right direction
#maps = fliplr(maps)
# rescale the map
if strgmodl is not None:
gmod = getattr(gdat, strgmodl)
else:
gmod = gdat
scal = getattr(gdat.scalpara, strgcbar)
cmap = getattr(gdat.cmappara, strgcbar)
vmin = getattr(gdat.minmpara, strgcbar)
vmax = getattr(gdat.maxmpara, strgcbar)
if scal == 'asnh':
maps = np.arcsinh(maps)
if scal == 'logt':
maps = np.log10(maps)
if imag is None:
imag = axis.imshow(maps, cmap=cmap, origin='lower', extent=gdat.exttrofi, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=gdat.alphmaps)
return imag
else:
imag.set_data(maps)
def make_cbar(gdat, axis, imag, strgvarb):
# make a color bar
valutickmajr = getattr(gdat.valutickmajrpara, strgvarb)
labltickmajr = getattr(gdat.labltickmajrpara, strgvarb)
print('valutickmajr')
print(valutickmajr)
print('labltickmajr')
print(labltickmajr)
cbar = plt.colorbar(imag, ax=axis, fraction=0.05, aspect=15)
cbar.set_ticks(valutickmajr)
cbar.set_ticklabels(labltickmajr)
return cbar
def make_legdmaps(gdat, strgstat, strgmodl, axis, mosa=False, assc=False):
gmod = getattr(gdat, strgmodl)
# transdimensional elements
if strgmodl == 'fitt' and (strgstat == 'pdfn' and gdat.boolcondcatl or strgstat == 'this') and gmod.numbparaelem > 0:
for l in gmod.indxpopl:
colr = retr_colr(gdat, strgstat, strgmodl, l)
if strgstat == 'pdfn':
labl = 'Condensed %s %s' % (gmod.legd, gmod.legdpopl[l])
else:
labl = 'Sample %s %s' % (gmod.legd, gmod.legdpopl[l])
if not gmod.maxmpara.numbelem[l] == 0:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=labl, marker=gmod.listelemmrkr[l], lw=gdat.mrkrlinewdth, color=colr)
for q in gdat.indxrefr:
if not np.amax(gdat.refr.numbelem[q]) == 0:
if assc:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=gdat.refr.lablhits[q], marker=gdat.refr.listmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablmiss[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
else:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablelem[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
# fixed-dimensional objects
if strgmodl == 'fitt':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gmod.lablmodl, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gmod.lablmodl, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gdat.typedata == 'mock':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gdat.refr.labl, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gdat.refr.labl, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
temphand, temp = axis.get_legend_handles_labels()
numblabl = len(temp)
if numblabl == 4:
numbcols = 2
else:
numbcols = 3
if mosa:
axis.legend(bbox_to_anchor=[1., 1.15], loc='center', ncol=numbcols)
else:
axis.legend(bbox_to_anchor=[0.5, 1.15], loc='center', ncol=numbcols)
def supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot=-1, assc=False):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
# associations with the reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] > 0:
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
reframpl = gdat.refr.dictelem[q][gdat.refr.nameparagenrelemampl[q]][0, :]
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl, gdat.refr.nameparagenrelemampl[q])
lgal = np.copy(gdat.refr.dictelem[q]['lgal'][0, :])
bgal = np.copy(gdat.refr.dictelem[q]['bgal'][0, :])
numbelem = int(gdat.refr.numbelem[q])
if gdatmodi is not None and gmod.numbparaelem > 0 and assc:
### hit
indx = gdatmodi.this.indxelemrefrasschits[q][l]
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, label=gdat.refr.lablhits, \
marker=gdat.refrlistmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
### missed
indx = gdatmodi.this.indxelemrefrasscmiss[q][l]
else:
indx = np.arange(lgal.size)
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.listlablmiss, marker=gdat.refr.listmrkrmiss[q], \
lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
sizexoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
sizeyoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
axis.text(gdat.anglfact * lgal[indx[k]] + sizexoff, gdat.anglfact * bgal[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], \
verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
# temp -- generalize this to input refrlgalhost vs.
if gdat.typedata == 'mock':
## host galaxy position
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
lgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost, gdat.anglfact * bgalhost, facecolor='none', alpha=0.7, \
label='%s Host %d' % (gdat.refr.labl, e), s=300, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.boollens:
## host galaxy Einstein radius
for e in gmod.indxsersfgrd:
truelgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
truebgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
truebeinhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * truelgalhost, \
gdat.anglfact * truebgalhost), \
gdat.anglfact * truebeinhost, \
edgecolor=gdat.refr.colr, facecolor='none', lw=gdat.mrkrlinewdth))
if gmod.boollens:
## source galaxy position
axis.scatter(gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.lgalsour], \
gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.bgalsour], \
facecolor='none', \
alpha=0.7, \
#alpha=gdat.alphelem, \
label='%s Source' % gdat.refr.labl, s=300, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
# model catalog
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
if gdatmodi is not None:
if gmod.numbparaelem > 0:
colr = retr_colr(gdat, strgstat, strgmodl, l)
mrkrsize = retr_mrkrsize(gdat, strgmodl, gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]], gmod.nameparagenrelemampl[l])
if 'lgal' in gdatmodi.this.indxparagenrfullelem:
lgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['lgal']]
bgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['bgal']]
else:
gang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['gang']]
aang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['aang']]
lgal, bgal = retr_lgalbgal(gang, aang)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, alpha=gdat.alphelem, label='Sample', marker=gmod.listelemmrkr[l], \
lw=gdat.mrkrlinewdth, color=colr)
## source
if gmod.boollens:
lgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.bgalsour]
axis.scatter(gdat.anglfact * lgalsour, gdat.anglfact * bgalsour, facecolor='none', \
alpha=gdat.alphelem, \
label='%s Source' % gmod.lablpara, s=300, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
## host
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e], facecolor='none', \
alpha=gdat.alphelem, \
label='%s Host' % gmod.lablpara, s=300, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.boollens:
beinhost = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e]), \
gdat.anglfact * beinhost, edgecolor=gmod.colr, facecolor='none', \
lw=gdat.mrkrlinewdth, ls='--'))
# temp
if strgstat == 'pdfn' and gdat.boolcondcatl and gmod.numbparaelem > 0:
lgal = np.zeros(gdat.numbprvlhigh)
bgal = np.zeros(gdat.numbprvlhigh)
ampl = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
lgal[cntr] = gdat.dictglob['poststkscond'][r]['lgal'][0]
bgal[cntr] = gdat.dictglob['poststkscond'][r]['bgal'][0]
# temp -- this does not allow sources with different spectra to be assigned to the same stacked sample
ampl[cntr] = gdat.dictglob['poststkscond'][r][gmod.nameparagenrelemampl[l]][0]
cntr += 1
mrkrsize = retr_mrkrsize(gdat, strgmodl, ampl, gmod.nameparagenrelemampl[l])
colr = retr_colr(gdat, strgstat, strgmodl, l)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
label='Condensed', marker=gmod.listelemmrkr[l], color='black', lw=gdat.mrkrlinewdth)
for r in gdat.indxstkscond:
lgal = np.array([gdat.dictglob['liststkscond'][r]['lgal']])
bgal = np.array([gdat.dictglob['liststkscond'][r]['bgal']])
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
marker=gmod.listelemmrkr[l], color='black', alpha=0.1, lw=gdat.mrkrlinewdth)
def retr_colr(gdat, strgstat, strgmodl, indxpopl=None):
if strgmodl == 'true':
if indxpopl is None:
colr = gdat.refr.colr
else:
colr = gdat.refr.colrelem[indxpopl]
if strgmodl == 'fitt':
if strgstat == 'this' or strgstat == 'pdfn':
if indxpopl is None:
colr = gmod.colr
else:
colr = gmod.colrelem[indxpopl]
if strgstat == 'mlik':
colr = 'r'
return colr
def retr_levipost(listllik):
minmlistllik = np.amin(listllik)
levipost = np.log(np.mean(1. / np.exp(listllik - minmlistllik))) + minmlistllik
return levipost
def retr_infofromlevi(pmeallik, levi):
info = pmeallik - levi
return info
def retr_jcbn():
fluxpare, lgalpare, bgalpare, fluxauxi, lgalauxi, bgalauxi = sympy.symbols('fluxpare lgalpare bgalpare fluxauxi lgalauxi bgalauxi')
matr = sympy.Matrix([[ fluxpare, fluxauxi, 0, 0, 0, 0], \
[-fluxpare, 1 - fluxauxi, 0, 0, 0, 0], \
[-lgalauxi, 0, 1, 1 - fluxauxi, 0, 0], \
[-lgalauxi, 0, 1, -fluxauxi, 0, 0], \
[-bgalauxi, 0, 0, 0, 1, 1 - fluxauxi], \
[-bgalauxi, 0, 0, 0, 1, -fluxauxi]])
jcbn = matr.det()
return jcbn
# f1 = uf f0
# f2 = (1 - uf) f0
# x1 = x0 + (1 - uf) ux
# x2 = x0 - uf ux
# y1 = y0 + (1 - uf) uy
# y2 = y0 - uf uy
# f1/uf f1/f0 f1/x0 f1/ux f1/y0 f1/uy
# f2/uf f2/f0 f2/x0 f2/ux f2/y0 f2/uy
# x1/uf x1/f0 x1/x0 x1/ux x1/y0 x1/uy
# x2/uf x2/f0 x2/x0 x2/ux x2/y0 x2/uy
# y1/uf y1/f0 y1/x0 y1/ux y1/y0 y1/uy
# y2/uf y2/f0 y2/x0 y2/ux y2/y0 y2/uy
# f0 uf 0 0 0 0
# -f0 1 - uf 0 0 0 0
# -ux 0 1 1 - uf 0 0
# -ux 0 1 -uf 0 0
# -uy 0 0 0 1 1 - uf
# -uy 0 0 0 1 -uf
# f0
#retr_jcbn()
def retr_angldist(gdat, lgalfrst, bgalfrst, lgalseco, bgalseco):
# temp -- heal does not work when the dimension of lgalfrst is 1
if gdat.typepixl == 'heal':
dir1 = np.array([lgalfrst, bgalfrst])
dir2 = np.array([lgalseco, bgalseco])
angldist = hp.rotator.angdist(dir1, dir2)
else:
angldist = np.sqrt((lgalfrst - lgalseco)**2 + (bgalfrst - bgalseco)**2)
return angldist
def retr_deflextr(gdat, indxpixlelem, sher, sang):
factcosi = sher * np.cos(2. * sang)
factsine = sher * np.cos(2. * sang)
defllgal = factcosi * gdat.lgalgrid[indxpixlelem] + factsine * gdat.bgalgrid[indxpixlelem]
deflbgal = factsine * gdat.lgalgrid[indxpixlelem] - factcosi * gdat.bgalgrid[indxpixlelem]
return np.vstack((defllgal, deflbgal)).T
def readfile(path):
print('Reading %s...' % path)
filepick = open(path + '.p', 'rb')
filearry = h5py.File(path + '.h5', 'r')
gdattemptemp = pickle.load(filepick)
for attr in filearry:
setattr(gdattemptemp, attr, filearry[attr][()])
filepick.close()
filearry.close()
if 'gdatfinl' in path or 'gdatinit' in path:
if hasattr(gdattemptemp, 'edis') and gdattemptemp.edis is not None and hasattr(gdattemptemp, 'binsener'):
gdattemptemp.edisintp = sp.interpolate.interp1d(gdattemptemp.binsener, gdattemptemp.edis, fill_value='extrapolate')
gdattemptemp.adisobjt = sp.interpolate.interp1d(gdattemptemp.redsintp, gdattemptemp.adisintp, fill_value='extrapolate')
gdattemptemp.redsfromdlosobjt = sp.interpolate.interp1d(gdattemptemp.adisintp * gdattemptemp.redsintp, \
gdattemptemp.redsintp, fill_value='extrapolate')
return gdattemptemp
def init_stat(gdat):
# construct the initial state
if gdat.typeverb > 0:
print('Initializing the sampler state...')
print('inittype')
print(gdat.inittype)
gmod = gdat.fitt
## initialization
### initialize the unit sample vector randomly
gmod.this.paragenrunitfull = np.random.rand(gmod.numbparagenrfull)
gmod.this.paragenrscalfull = np.empty(gmod.numbparagenrfull)
## impose user-specified initial state
### number of elements
## create dummy indxparagenrfullelem
gmod.this.indxparagenrfullelem = None
if gmod.numbparaelem > 0:
if gdat.inittype == 'refr':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gmod.paragenrunitfull[gmod.indxpara.numbelem[l]]
else:
for l in gmod.indxpopl:
if gmod.typemodltran == 'pois':
meanelemtemp = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, \
gmod.this.indxparagenrfullelem)[gmod.indxpara.meanelem[l]]
print('temp -- user input is not working for numbelem')
#namevarb = 'numbelempop%d' % l
#initvalu = getattr(gmod.init, namevarb)
#if initvalu > gmod.maxmpara.numbelem[l] or initvalu < gmod.minmpara.numbelem[l]:
# raise Exception('Bad initial number of elements...')
#gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = initvalu
if gmod.typemodltran == 'pois':
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = np.random.poisson(meanelemtemp)
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = round(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
min(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.maxmpara.numbelem[l])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
max(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.minmpara.numbelem[l])
gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]] = gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]]
if gdat.booldiagmode:
if gdat.typedata == 'mock' and gdat.inittype == 'refr':
for l in gmod.indxpopl:
if gmod.paragenrunitfull[gmod.indxpara.numbelem[l]] > gmod.maxmpara.numbelem[l]:
raise Exception('')
if gmod.numbparaelem > 0:
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.inittype == 'reco':
if gdat.namerecostat is not None:
strgcnfg = gdat.namerecostat
else:
strgcnfg = gdat.strgcnfg
path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'
if os.path.exists(path):
boolinitreco = True
thisfile = h5py.File(path, 'r')
if gdat.typeverb > 0:
print('Initializing from the state %s...' % path)
print('Likelihood:')
print(thisfile['lliktotl'][...])
# find the number of populations provided
maxmindxpopl = 0
for l in range(10):
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
if gmod.indxpopl > maxmindxpopl:
maxmindxpopl = gmod.indxpopl
numbpoplinpt = maxmindxpopl + 1
if numbpoplinpt != gmod.numbpopl:
print('State file and fitting metamodel have different number of populations.')
# find the number of elements provided
cntr = np.zeros(gmod.numbpoplinpt, dtype=int)
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
cntr[indxpopl] += 1
if gdat.typeverb > 0:
print('Number of elements found:')
print(cntr)
for attr in thisfile:
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase == attr:
if gmod.nameparagenrbase.startswith('numbelem'):
try:
indxpopltemp = int(gmod.nameparagenrbase[-1])
initnumbelem = getattr(gdat, 'initnumbelempop%d' % indxpopltemp)
print('Initial condition for the number of elements conflicts with the state file. Defaulting to the argument...')
except:
initnumbelem = thisfile[attr][()]
gmod.this.paragenrunitfull[k] = initnumbelem
else:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', thisfile[attr][()], k)
if gmod.this.paragenrunitfull[k] == 0.:
print('Warning CDF is zero.')
if not np.isfinite(thisfile[attr][()]):
raise Exception('Retreived state parameter is not finite.')
if (gmod.numbparaelem == 0 or gmod.numbparaelem > 0 and not k in gmod.indxpara.numbelem) and \
(not np.isfinite(gmod.this.paragenrunitfull[k]) or gmod.this.paragenrunitfull[k] < 0. or \
gmod.this.paragenrunitfull[k] > 1.):
raise Exception('CDF of the retreived state parameter is bad.')
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
maxm.numbelem = getattr(gdat.fitt.maxm, 'numbelempop%d' % l)
if gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] > maxm.numbelem:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = maxm.numbelem
if gdat.typeverb > 0:
print('Tapering off the element list...')
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
if gdat.typeverb > 0:
print('gmod.this.paragenrunitfull[gmod.indxpara.numbelem]')
print(gmod.this.paragenrunitfull[gmod.indxpara.numbelem])
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if (gmod.this.paragenrunitfull == 0).all():
raise Exception('Bad initialization.')
if gmod.numbparaelem > 0 and gmod.this.indxparagenrfullelem is not None:
for nameparagenrelem in gmod.namepara.elem:
initcomp = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
initcomp[l] = np.empty(len(gmod.this.indxelemfull[l]))
for k in range(len(gmod.this.indxelemfull[l])):
namefiel = '%spop%d%04d' % (nameparagenrelem, l, k)
for attr in thisfile:
if namefiel == attr:
initcomp[l][k] = thisfile[namefiel][()]
setattr(gdat, 'init' + nameparagenrelem, initcomp)
initcompfromstat(gdat, gdatmodi, 'init')
thisfile.close()
else:
boolinitreco = False
if gdat.typeverb > 0:
print('Could not find the state file, %s, to initialize the sampler.' % path)
if gdat.inittype == 'refr':
if gdat.typedata == 'inpt':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gdat.refr.numbelem[l]
if gdat.typedata == 'mock':
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if not (gdat.inittype == 'pert' and gmod.nameparagenrbase.startswith('numbelem')) and \
gmod.nameparagenrbase in gmod.nameparagenrbase:
gmod.indxpara.true = np.where(gmod.nameparagenrbase == gmod.nameparagenrbase)[0]
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmodstat.paragenrscalfull[gmod.indxpara.true], k)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.typeverb > 1:
show_paragenrscalfull(gdat, gdatmodi)
if gmod.this.indxparagenrfullelem is not None:
print('Initializing elements from the reference element parameters...')
show_paragenrscalfull(gdat, gdatmodi)
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
show_paragenrscalfull(gdat, gdatmodi)
initcompfromstat(gdat, gdatmodi, 'refr')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
## impose user-specified individual initial values
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase.startswith('numbelem'):
continue
if gdat.inittype == 'reco' or gdat.inittype == 'refr' or gdat.inittype == 'pert':
try:
getattr(gdat, 'init' + gmod.nameparagenrbase)
print('Conflicting initial state arguments detected, init keyword takes precedence.')
except:
pass
try:
raise Exception('')
initvalu = getattr(gdat, 'init' + gmod.nameparagenrbase)
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', initvalu, k)
if gdat.typeverb > 0:
print('Received initial condition for %s: %.3g' % (gmod.nameparagenrbase, initvalu))
except:
pass
## PSF
if gdat.initpsfp is not None:
print('Initializing the metamodel PSF from the provided initial state...')
if gdat.initpsfp.size != gmod.indxpara.psfp.size:
raise Exception('')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gdat.initpsfp[k-gmod.indxpara.psfp[0]], k)
if gdat.initpsfprefr:
print('Initializing the metamodel PSF from the reference state...')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmod.psfpexpr[k-gmod.indxpara.psfp[0]], k)
if gdat.inittype == 'rand' or gdat.inittype == 'reco' and not boolinitreco:
if gdat.typeverb > 0:
print('Initializing from a random state...')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
# check the initial unit sample vector for bad entries
if gmod.numbparaelem > 0:
indxsampdiff = np.setdiff1d(gmod.indxparagenrfull, gmod.indxpara.numbelem)
if np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])).any():
raise Exception('')
indxsampbaddlowr = np.where((gmod.this.paragenrunitfull[indxsampdiff] <= 0.) | np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])))[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull[indxsampdiff] >= 1.)[0]
indxsampbaddlowr = indxsampdiff[indxsampbaddlowr]
indxsampbadduppr = indxsampdiff[indxsampbadduppr]
else:
indxsampbaddlowr = np.where(gmod.this.paragenrunitfull <= 0.)[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull >= 1.)[0]
indxsampbadd = np.concatenate((indxsampbaddlowr, indxsampbadduppr))
if indxsampbadd.size > 0:
print('Initial value caused unit sample vector to go outside the unit interval...')
show_paragenrscalfull(gdat, gdatmodi, indxsampshow=indxsampbadd)
gmod.this.paragenrunitfull[indxsampbadd] = np.random.rand(indxsampbadd.size)
raise Exception('')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
indxbadd = np.where(np.logical_not(np.isfinite(gmod.this.paragenrscalfull)))[0]
if indxbadd.size > 0:
raise Exception('')
def writfile(gdattemp, path):
filepick = open(path + '.p', 'wb')
filearry = h5py.File(path + '.h5', 'w')
gdattemptemp = tdpy.gdatstrt()
for attr, valu in gdattemp.__dict__.items():
if attr.endswith('psfnintp'):
continue
if isinstance(valu, np.ndarray) and valu.dtype != np.dtype('O') and valu.dtype != np.dtype('<U4'):# or isinstance(valu, str) or \
#isinstance(valu, float) or isinstance(valu, bool) or isinstance(valu, int) or isinstance(valu, np.float):
filearry.create_dataset(attr, data=valu)
else:
# temp -- make sure interpolation objects are not written.
if attr != 'adisobjt' and attr != 'redsfromdlosobjt' and attr != 'edisintp':
setattr(gdattemptemp, attr, valu)
print('Writing to %s...' % path)
pickle.dump(gdattemptemp, filepick, protocol=pickle.HIGHEST_PROTOCOL)
filepick.close()
filearry.close()
def retr_deflcutf(angl, defs, asca, acut, asym=False):
fracanglasca = angl / asca
deflcutf = defs / fracanglasca
# second term in the NFW deflection profile
fact = np.ones_like(fracanglasca)
indxlowr = np.where(fracanglasca < 1.)[0]
indxuppr = np.where(fracanglasca > 1.)[0]
fact[indxlowr] = np.arccosh(1. / fracanglasca[indxlowr]) / np.sqrt(1. - fracanglasca[indxlowr]**2)
fact[indxuppr] = np.arccos(1. / fracanglasca[indxuppr]) / np.sqrt(fracanglasca[indxuppr]**2 - 1.)
if asym:
deflcutf *= np.log(fracanglasca / 2.) + fact
else:
fracacutasca = acut / asca
factcutf = fracacutasca**2 / (fracacutasca**2 + 1)**2 * ((fracacutasca**2 + 1. + 2. * (fracanglasca**2 - 1.)) * fact + \
np.pi * fracacutasca + (fracacutasca**2 - 1.) * np.log(fracacutasca) + np.sqrt(fracanglasca**2 + fracacutasca**2) * (-np.pi + (fracacutasca**2 - 1.) / fracacutasca * \
np.log(fracanglasca / (np.sqrt(fracanglasca**2 + fracacutasca**2) + fracacutasca))))
deflcutf *= factcutf
return deflcutf
def initchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime())
def stopchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime() - getattr(gdatmodi.this, 'chro' + name))
def retr_defl(gdat, indxpixlelem, lgal, bgal, angllens, ellp=None, angl=None, rcor=None, asca=None, acut=None):
# translate the grid
lgaltran = gdat.lgalgrid[indxpixlelem] - lgal
bgaltran = gdat.bgalgrid[indxpixlelem] - bgal
if acut is not None:
defs = angllens
angl = np.sqrt(lgaltran**2 + bgaltran**2)
defl = retr_deflcutf(angl, defs, asca, acut)
defllgal = lgaltran / angl * defl
deflbgal = bgaltran / angl * defl
else:
bein = angllens
# rotate the grid
lgalrttr = np.cos(angl) * lgaltran - np.sin(angl) * bgaltran
bgalrttr = np.sin(angl) * lgaltran + np.cos(angl) * bgaltran
axisrati = 1. - ellp
facteccc = np.sqrt(1. - axisrati**2)
factrcor = np.sqrt(axisrati**2 * lgalrttr**2 + bgalrttr**2)
defllgalrttr = bein * axisrati / facteccc * np.arctan(facteccc * lgalrttr / factrcor)
deflbgalrttr = bein * axisrati / facteccc * np.arctanh(facteccc * bgalrttr / factrcor)
# totate back vector to original basis
defllgal = np.cos(angl) * defllgalrttr + np.sin(angl) * deflbgalrttr
deflbgal = -np.sin(angl) * defllgalrttr + np.cos(angl) * deflbgalrttr
defl = np.vstack((defllgal, deflbgal)).T
return defl
def retr_lpriselfdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_self(feat, minm, maxm)))
return lpri
def retr_lprilogtdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_logt(feat, minm, maxm)))
return lpri
def retr_lpripowrdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
gmod = getattr(gdat, strgmodl)
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + strgfeat + 'pop%d' % l)]
lpri = np.sum(np.log(pdfn_powr(feat, minm, maxm, slop)))
return lpri
def retr_lpridpowdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
brek = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distbrek')[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + strgfeat)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + strgfeat)[l]]
lpri = np.sum(np.log(pdfn_dpow(feat, minm, maxm, brek, sloplowr, slopuppr)))
return lpri
def retr_lprigausdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
distmean = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'diststdv')[l]]
lpri = np.sum(np.log(pdfn_gaus(feat, distmean, diststdv)))
return lpri
def retr_lpriigamdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
slop = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'slop')[l]]
cutf = getattr(gmod, 'cutf' + strgfeat)
lpri = np.sum(np.log(pdfn_igam(feat, slop, cutf)))
return lpri
def traptdim(gdat, arry):
s1 = arry[0, 0] + arry[-1, 0] + arry[0, -1] + arry[-1, -1]
s2 = np.sum(arry[1:-1, 0]) + np.sum(arry[1:-1, -1]) + np.sum(arry[0, 1:-1]) + np.sum(arry[-1, 1:-1])
s3 = np.sum(arry[1:-1, 1:-1])
summ = (s1 + 2*s2 + 4*s3) * gdat.apix
return summ
def retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons=None):
pdfnspatprio = pdfnspatpriotemp
if spatdistcons is not None:
pdfnspatprio += spatdistcons
summ = traptdim(gdat, pdfnspatprio)
pdfnspatprio /= summ
lpdfspatprio = np.log(pdfnspatprio)
lpdfspatprioobjt = sp.interpolate.RectBivariateSpline(gdat.binspara.bgalcart, gdat.binspara.lgalcart, lpdfspatprio)
return lpdfspatprio, lpdfspatprioobjt
def retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=False):
if strgmodl == 'true':
gdatobjt = gdat.true
elif strgmodl == 'fitt' and boolinit:
gdatobjt = gdat.fitt
else:
gdatobjt = gdatmodi
return gdatobjt
def proc_samp(gdat, gdatmodi, strgstat, strgmodl, fast=False, boolinit=False):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=boolinit)
gmodstat = getattr(gdatobjt, strgstat)
initchro(gdat, gdatmodi, 'pars')
# grab the sample vector
indxpara = np.arange(gmodstat.paragenrscalfull.size)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.paragenrscalfull).all():
raise Exception('')
if gmod.typeevalpsfn != 'none' and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
psfp = gmodstat.paragenrscalfull[gmod.indxpara.psfp]
if gdat.booldiagmode:
if np.where(psfp == 0)[0].size == psfp.size:
raise Exception('')
setattr(gmodstat, 'psfp', psfp)
bacp = gmodstat.paragenrscalfull[gmod.indxpara.bacp]
if gmod.numbparaelem > 0:
# temp -- this may slow down execution
gmodstat.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodstat.indxelemfull, strgmodl)
gmodstat.numbelem = np.empty(gmod.numbpopl, dtype=int)
indxelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
indxelem[l] = np.arange(gmodstat.numbelem[l])
gmodstat.numbelem[l] = np.sum(gmodstat.numbelem[l])
gmodstat.numbelemtotl = np.sum(gmodstat.numbelem)
gmodstat.dictelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.dictelem[l] = dict()
for strgfeat in gmod.namepara.genrelemdefa:
gmodstat.dictelem[l][strgfeat] = []
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem] = gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]
if gdat.booldiagmode:
if ((abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) < 1e-100 ) & (abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) > 0.)).any():
raise Exception('')
if gmodstat.numbelem[l] != len(gmodstat.dictelem[l][nameparagenrelem]):
print('l')
print(l)
print('numbelem')
print(numbelem)
print('gmodstat.dictelem')
print(gmodstat.dictelem)
print('nameparagenrelem')
print(nameparagenrelem)
raise Exception('')
if gdat.boolbinsener:
if gdat.typeverb > 2:
print('Calculating element spectra...')
initchro(gdat, gdatmodi, 'spec')
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], \
edisintp=gdat.edisintp, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'spec')
if gdat.typeverb > 2:
print('Element features:')
for l in gmod.indxpopl:
print('l')
print(l)
for strgfeat in gmod.namepara.genrelem[l]:
print(strgfeat)
print(gmodstat.dictelem[l][strgfeat])
if gdat.booldiagmode:
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if (gmod.listscalparagenrelem[l][g] != 'gaus' and not gmod.listscalparagenrelem[l][g].startswith('lnor')) and \
(gmod.listscalparagenrelem[l][g] != 'expo' and (gmodstat.dictelem[l][nameparagenrelem] < getattr(gmod.minmpara, nameparagenrelem)).any()) or \
(gmodstat.dictelem[l][nameparagenrelem] > getattr(gmod.maxmpara, nameparagenrelem)).any():
print('l, g')
print(l, g)
print('nameparagenrelem')
print(nameparagenrelem)
print('gmodstat.dictelem[l][nameparagenrelem]')
summgene(gmodstat.dictelem[l][nameparagenrelem])
print('getattr(gmod, minm + nameparagenrelem)')
print(getattr(gmod.minmpara, nameparagenrelem))
print('getattr(gmod, maxm + nameparagenrelem)')
print(getattr(gmod.maxmpara, nameparagenrelem))
print('gmod.listscalparagenrelem[l][g]')
print(gmod.listscalparagenrelem[l][g])
raise Exception('')
# calculate element spectra
# temp
if gdat.booldiagmode:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['acut']] < 0.)[0]
if indx.size > 0:
raise Exception('')
if gdat.variacut:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['asca']] < 0.)[0]
if indx.size > 0:
raise Exception('')
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
# evaluate horizontal and vertical position for elements whose position is a power law in image-centric radius
if gmod.typespatdist[l] == 'glc3':
gmodstat.dictelem[l]['dlos'], gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'] = retr_glc3(gmodstat.dictelem[l]['dglc'], \
gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'])
if gmod.typespatdist[l] == 'gangexpo':
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], = retr_lgalbgal(gmodstat.dictelem[l]['gang'], \
gmodstat.dictelem[l]['aang'])
if gdat.booldiagmode:
if gmodstat.numbelem[l] > 0:
if np.amin(gmodstat.dictelem[l]['lgal']) < gmod.minmlgal or \
np.amax(gmodstat.dictelem[l]['lgal']) > gmod.maxmlgal or \
np.amin(gmodstat.dictelem[l]['bgal']) < gmod.minmbgal or \
np.amax(gmodstat.dictelem[l]['bgal']) > gmod.maxmbgal:
raise Exception('Bad coordinates!')
if gmod.typespatdist[l] == 'los3':
gmodstat.dictelem[l]['dglc'], gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'] = retr_los3(gmodstat.dictelem[l]['dlos'], \
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
# evaluate flux for pulsars
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['lumi'] = retr_lumipuls(gmodstat.dictelem[l]['geff'], gmodstat.dictelem[l]['magf'], gmodstat.dictelem[l]['per0'])
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
gmodstat.dictelem[l]['lumi'] = gmodstat.dictelem[l]['lum0'] * (1. + gmodstat.dictelem[l]['reds'])**4
if gmod.typeelem[l] == 'lghtpntspuls' or gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['flux'] = retr_flux(gdat, gmodstat.dictelem[l]['lumi'], gmodstat.dictelem[l]['dlos'])
# evaluate spectra
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], edisintp=gdat.edisintp, spectype=gmod.spectype[l])
else:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'pars')
### loglikelihood
initchro(gdat, gdatmodi, 'modl')
if gmod.boollens:
lgalsour = gmodstat.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gmodstat.paragenrscalfull[gmod.indxpara.bgalsour]
if gdat.typeverb > 2:
print('Evaluating the likelihood...')
# process a sample vector and the occupancy list to calculate secondary variables
if gmod.boollens:
fluxsour = gmodstat.paragenrscalfull[gmod.indxpara.fluxsour]
if gdat.numbener > 1:
sindsour = gmodstat.paragenrscalfull[gmod.indxpara.sindsour]
sizesour = gmodstat.paragenrscalfull[gmod.indxpara.sizesour]
ellpsour = gmodstat.paragenrscalfull[gmod.indxpara.ellpsour]
anglsour = gmodstat.paragenrscalfull[gmod.indxpara.anglsour]
if gmod.typeemishost != 'none':
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
fluxhost = [[] for e in gmod.indxsersfgrd]
if gdat.numbener > 1:
sindhost = [[] for e in gmod.indxsersfgrd]
sizehost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % e)]
bgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % e)]
fluxhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'fluxhostisf%d' % e)]
if gdat.numbener > 1:
sindhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sindhostisf%d' % e)]
sizehost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sizehostisf%d' % e)]
if gmod.boollens:
beinhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
beinhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % e)]
if gmod.typeemishost != 'none':
ellphost = [[] for e in gmod.indxsersfgrd]
anglhost = [[] for e in gmod.indxsersfgrd]
serihost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
ellphost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'ellphostisf%d' % e)]
anglhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'anglhostisf%d' % e)]
serihost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'serihostisf%d' % e)]
if gmod.boollens:
numbpixltemp = gdat.numbpixlcart
defl = np.zeros((numbpixltemp, 2))
# determine the indices of the pixels over which element kernels will be evaluated
if gdat.boolbinsspat:
if gmod.numbparaelem > 0:
listindxpixlelem = [[] for l in gmod.indxpopl]
listindxpixlelemconc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
listindxpixlelem[l], listindxpixlelemconc[l] = retr_indxpixlelemconc(gdat, strgmodl, gmodstat.dictelem, l)
if gmod.boollens:
sherextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sherextr')]
sangextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sangextr')]
## host halo deflection
initchro(gdat, gdatmodi, 'deflhost')
deflhost = [[] for e in gmod.indxsersfgrd]
indxpixlmiss = gdat.indxpixlcart
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the deflection field due to host galaxy %d' % e)
print('lgalhost[e]')
print(lgalhost[e])
print('bgalhost[e]')
print(bgalhost[e])
print('beinhost[e]')
print(beinhost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
deflhost[e] = retr_defl(gdat, indxpixlmiss, lgalhost[e], bgalhost[e], beinhost[e], ellp=ellphost[e], angl=anglhost[e])
if gdat.booldiagmode:
indxpixltemp = slice(None)
setattr(gmodstat, 'deflhostisf%d' % e, deflhost[e])
if gdat.typeverb > 2:
print('deflhost[e]')
summgene(deflhost[e])
defl += deflhost[e]
if gdat.typeverb > 2:
print('After adding the host deflection...')
print('defl')
summgene(defl)
if gdat.booldiagmode:
if not np.isfinite(deflhost).all():
raise Exception('')
stopchro(gdat, gdatmodi, 'deflhost')
## external shear
initchro(gdat, gdatmodi, 'deflextr')
deflextr = []
indxpixltemp = gdat.indxpixlcart
deflextr = retr_deflextr(gdat, indxpixltemp, sherextr, sangextr)
defl += deflextr
if gdat.typeverb > 2:
print('After adding the external deflection...')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'deflextr')
# Boolean flag to indicate that the object to convolve the image will be needed
boolneedpsfnconv = gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full')
## Boolean flag to indicate that the object to convolve the image will be constructed
boolcalcpsfnconv = strgmodl == 'true' or boolinit or gdat.boolmodipsfn
# get the convolution object
if boolneedpsfnconv and boolcalcpsfnconv:
initchro(gdat, gdatmodi, 'psfnconv')
if gdat.typeverb > 2:
print('Evaluating the PSF convolution kernel...')
psfnconv = [[[] for i in gdat.indxener] for m in gdat.indxevtt]
if gdat.typepixl == 'cart':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
for mm, m in enumerate(gdat.indxevtt):
for ii, i in enumerate(gdat.indxener):
if gmod.typemodlpsfn == 'singgaus':
sigm = psfp[i+m*gdat.numbener]
else:
sigm = fwhm[i, m] / 2.355
gmodstat.psfnconv[mm][ii] = AiryDisk2DKernel(sigm / gdat.sizepixl)
stopchro(gdat, gdatmodi, 'psfnconv')
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and gmod.numbparaelem > 0:
if strgmodl == 'true' or boolinit or gdat.boolmodipsfn:
if gdat.typepixl == 'heal':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gdat.typepixl == 'cart':
if gdat.kernevaltype == 'ulip':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
if gdat.booldiagmode:
if not np.isfinite(gmodstat.psfnintp(0.05)).all():
raise Exception('')
if gdat.kernevaltype == 'bspx':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.anglcart.flatten(), gmod.typemodlpsfn, strgmodl)
# side length of the upsampled kernel
gdat.numbsidekernusam = 100
# side length of the original kernel
gdat.numbsidekern = gdat.numbsidekernusam / factkernusam
gdat.indxsidekern = np.arange(gdat.numbsidekern)
# pad by one row and one column
#psf = np.zeros((gdat.numbsidekernusam+1, gdat.numbsidekernusam+1))
#psf[0:gdat.numbsidekernusam, 0:gdat.numbsidekernusam] = psf0
# make design matrix for each factkernusam x factkernusam region
nx = factkernusam + 1
y, x = mgrid[0:nx, 0:nx] / float(factkernusam)
x = x.flatten()
y = y.flatten()
kernmatrdesi = np.array([full(nx*nx, 1), x, y, x*x, x*y, y*y, x*x*x, x*x*y, x*y*y, y*y*y]).T
# output np.array of coefficients
gmodstat.psfnintp = np.empty((gdat.numbsidekern, gdat.numbsidekern, kernmatrdesi.shape[1]))
# solve p = kernmatrdesi psfnintp for psfnintp
for iy in gdat.indxsidekern:
for ix in gdat.indxsidekern:
p = psf[iy*factkernusam:(iy+1)*factkernusam+1, ix*factkernusam:(ix+1)*factkernusam+1].flatten()
gmodstat.psfnintp[iy, ix, :] = dot(linalg.inv(dot(kernmatrdesi.T, kernmatrdesi)), dot(kernmatrdesi.T, p))
else:
gmodstat.psfnintp = gdat.fitt.this.psfnintp
sbrt = dict()
for name in gmod.listnamediff:
sbrt[name] = []
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = []
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = []
if gmod.boolelemdeflsubhanyy:
deflsubh = []
# retrieve or initialize state variable
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = np.zeros_like(gdat.expo)
if gmod.boolelemdeflsubhanyy:
deflsubh = np.zeros((gdat.numbpixl, 2))
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = np.zeros_like(gdat.expo)
# element kernel evaluation
if gmod.boolelemsbrtdfncanyy:
initchro(gdat, gdatmodi, 'elemsbrtdfnc')
sbrt['dfnc'] = []
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
for k in range(gmodstat.numbelem[l]):
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
if gmod.typeelem[l] == 'clusvari':
sbrtdfnc[0, listindxpixlelem[l][k], 0] += gmodstat.dictelem[l]['nobj'][k] / 2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[listindxpixlelem[l][k]])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[listindxpixlelem[l][k]])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
if gmod.boolelempsfn[l]:
print('sbrtdfnc')
summgene(sbrtdfnc)
sbrtdfnc[:, listindxpixlelem[l][k], :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
if gmod.typeelem[l].startswith('lghtline'):
sbrtdfnc[:, 0, 0] += gmodstat.dictelem[l]['spec'][:, k]
sbrt['dfnc'] = sbrtdfnc
if gdat.booldiagmode:
if not np.isfinite(sbrtdfnc).all():
raise Exception('Element delta function brightness not finite.')
setattr(gmodstat, 'sbrtdfnc', sbrt['dfnc'])
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['dfnc'])
numbelemtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
numbelemtemp += np.sum(gmodstat.numbelem[l])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtdfnc')
if gmod.boolelemdeflsubhanyy:
initchro(gdat, gdatmodi, 'elemdeflsubh')
if gdat.typeverb > 2:
print('Perturbing subhalo deflection field')
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
for kk, k in enumerate(indxelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
if gmod.typeelemspateval[l] == 'locl':
indxpixl = listindxpixlelem[l][kk]
else:
indxpixl = gdat.indxpixl
deflsubh[indxpixl, :] += retr_defl(gdat, indxpixl, \
gmodstat.dictelem[l]['lgal'][kk], gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['defs'][kk], \
asca=asca, acut=acut)
# temp -- find out what is causing the features in the element convergence maps
#for kk, k in enumerate(indxelem[l]):
# indxpixlpnts = retr_indxpixl(gdat, gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['lgal'][kk])
# if deflsubh[listindxpixlelem[l][kk], :]
if gdat.typeverb > 2:
print('deflsubh')
summgene(deflsubh)
setattr(gmodstat, 'deflsubh', deflsubh)
if gdat.booldiagmode:
if not np.isfinite(deflsubh).all():
raise Exception('Element deflection is not finite.')
defl += deflsubh
if gdat.typeverb > 2:
print('After adding subhalo deflection to the total deflection')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'elemdeflsubh')
if gmod.boolelemsbrtextsbgrdanyy:
initchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if strgstat == 'this':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtgausbgrd':
for k in range(gmodstat.numbelem[l]):
sbrtextsbgrd[:, listindxpixlelem[l][k], :] += gmodstat.dictelem[l]['spec'][:, k, None, None] / \
2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[None, listindxpixlelem[l][k], None])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[None, listindxpixlelem[l][k], None])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
setattr(gmodstat, 'sbrtextsbgrd', sbrtextsbgrd)
sbrt['extsbgrd'] = []
sbrt['extsbgrd'] = sbrtextsbgrd
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['extsbgrd'])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if gdat.typeverb > 2:
print('Element related state variables after perturbations...')
if gmod.boolelemsbrtdfncanyy:
print('sbrtdfnc')
summgene(sbrtdfnc)
if gmod.boolelemdeflsubhanyy:
print('deflsubh')
summgene(deflsubh)
if gmod.boolelemsbrtextsbgrdanyy:
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
if gmod.boollens:
# lensed surface brightness
initchro(gdat, gdatmodi, 'sbrtlens')
if gdat.typeverb > 2:
print('Evaluating lensed surface brightness...')
if strgstat == 'this' or gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrd'] = []
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrdgalx'] = []
if gdat.numbener > 1:
specsour = retr_spec(gdat, np.array([fluxsour]), sind=np.array([sindsour]))
if gdat.typeverb > 2:
print('sindsour')
print(sindsour)
else:
specsour = np.array([fluxsour])
if gdat.typeverb > 2:
print('lgalsour')
print(lgalsour)
print('bgalsour')
print(bgalsour)
print('sizesour')
print(sizesour)
print('ellpsour')
print(ellpsour)
print('anglsour')
print(anglsour)
print('fluxsour')
print(fluxsour)
print('specsour')
print(specsour)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.typeverb > 2:
print('Interpolating the background emission...')
sbrt['bgrdgalx'] = retr_sbrtsers(gdat, gdat.lgalgrid[indxpixlelem[0]], gdat.bgalgrid[indxpixlelem[0]], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
if gdat.typeverb > 2:
print('sbrt[bgrdgalx]')
summgene(sbrt['bgrdgalx'])
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
sbrt['bgrd'] = sbrt['bgrdgalx'] + sbrtextsbgrd
sbrt['lens'] = np.empty_like(gdat.cntpdata)
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
sbrtbgrdobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
sbrt['bgrd'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
bgalprim = gdat.bgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 1]
lgalprim = gdat.lgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 0]
# temp -- T?
sbrt['lens'][ii, :, m] = sbrtbgrdobjt(bgalprim, lgalprim, grid=False).flatten()
else:
if gdat.typeverb > 2:
print('Not interpolating the background emission...')
sbrt['lens'] = retr_sbrtsers(gdat, gdat.lgalgrid - defl[gdat.indxpixl, 0], \
gdat.bgalgrid - defl[gdat.indxpixl, 1], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
sbrt['bgrd'] = retr_sbrtsers(gdat, gdat.lgalgrid, \
gdat.bgalgrid, \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
setattr(gmodthis, 'sbrtlens', sbrt['lens'])
if gdat.booldiagmode:
if not np.isfinite(sbrt['lens']).all():
raise Exception('Lensed emission is not finite.')
if (sbrt['lens'] == 0).all():
raise Exception('Lensed emission is zero everynp.where.')
stopchro(gdat, gdatmodi, 'sbrtlens')
### background surface brightness
sbrtback = []
# temp
#sbrtback = np.empty((numbback, gdat.numbener, indxpixlelem[yy].size, gdat.numbevtt))
# evaluate host galaxy surface brightness
if gmod.typeemishost != 'none':
initchro(gdat, gdatmodi, 'sbrthost')
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the host galaxy surface brightness...')
if gdat.numbener > 1:
spechost = retr_spec(gdat, np.array([fluxhost[e]]), sind=np.array([sindhost[e]]))
else:
spechost = np.array([fluxhost[e]])
if gdat.typeverb > 2:
print('lgalhost[e]')
print(lgalhost[e] * gdat.anglfact)
print('bgalhost[e]')
print(bgalhost[e] * gdat.anglfact)
print('spechost')
print(spechost)
print('sizehost[e]')
print(sizehost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
print('serihost[e]')
print(serihost[e])
sbrt['hostisf%d' % e] = retr_sbrtsers(gdat, gdat.lgalgrid, gdat.bgalgrid, lgalhost[e], \
bgalhost[e], spechost, sizehost[e], ellphost[e], anglhost[e], serihost[e])
setattr(gmodstat, 'sbrthostisf%d' % e, sbrt['hostisf%d' % e])
#sbrthost = sbrt['host']
if gdat.typeverb > 2:
for e in gmod.indxsersfgrd:
print('e')
print(e)
print('sbrt[hostisf%d]')
summgene(sbrt['hostisf%d' % e])
stopchro(gdat, gdatmodi, 'sbrthost')
## model emission
initchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('Summing up the model emission...')
sbrt['modlraww'] = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))
for name in gmod.listnamediff:
if name.startswith('back'):
gmod.indxbacktemp = int(name[4:8])
if gdat.typepixl == 'heal' and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv') and not gmod.boolunifback[gmod.indxbacktemp]:
sbrttemp = getattr(gmod, 'sbrtbackhealfull')[gmod.indxbacktemp]
else:
sbrttemp = gmod.sbrtbacknorm[gmod.indxbacktemp]
if gmod.boolspecback[gmod.indxbacktemp]:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp]]
else:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp][gdat.indxener]][:, None, None]
sbrt['modlraww'] += sbrt[name]
if gdat.booldiagmode:
if np.amax(sbrttemp) == 0.:
raise Exception('')
if gdat.typeverb > 2:
print('name')
print(name)
print('sbrt[name]')
summgene(sbrt[name])
if gdat.typeverb > 2:
for ii, i in enumerate(gdat.indxener):
print('ii, i')
print(ii, i)
for mm, m in enumerate(gdat.indxevtt):
print('mm, m')
print(mm, m)
print('sbrt[modlraww][ii, :, mm]')
summgene(sbrt['modlraww'][ii, :, mm])
# convolve the model with the PSF
if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):
sbrt['modlconv'] = []
# temp -- isotropic background proposals are unnecessarily entering this clause
if gdat.typeverb > 2:
print('Convolving the model image with the PSF...')
sbrt['modlconv'] = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
if gdat.strgcnfg == 'pcat_ferm_igal_mock_test':
print('Convolving ii, i, mm, m')
print(ii, i, mm, m)
if gdat.typepixl == 'cart':
if gdat.numbpixl == gdat.numbpixlcart:
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrt['modlraww'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)), \
psfnconv[mm][ii]).flatten()
else:
sbrtfull = np.zeros(gdat.numbpixlcart)
sbrtfull[gdat.indxpixlrofi] = sbrt['modlraww'][ii, :, mm]
sbrtfull = sbrtfull.reshape((gdat.numbsidecart, gdat.numbsidecart))
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrtfull, psfnconv[mm][ii]).flatten()[gdat.indxpixlrofi]
indx = np.where(sbrt['modlconv'][ii, :, mm] < 1e-50)
sbrt['modlconv'][ii, indx, mm] = 1e-50
if gdat.typepixl == 'heal':
sbrt['modlconv'][ii, :, mm] = hp.smoothing(sbrt['modlraww'][ii, :, mm], fwhm=fwhm[i, m])[gdat.indxpixlrofi]
sbrt['modlconv'][ii, :, mm][np.where(sbrt['modlraww'][ii, :, mm] <= 1e-50)] = 1e-50
setattr(gmodstat, 'sbrtmodlconv', sbrt['modlconv'])
# temp -- this could be made faster -- need the copy() statement because sbrtdfnc gets added to sbrtmodl afterwards
sbrt['modl'] = np.copy(sbrt['modlconv'])
else:
if gdat.typeverb > 2:
print('Skipping PSF convolution of the model...')
sbrt['modl'] = np.copy(sbrt['modlraww'])
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
## add PSF-convolved delta functions to the model
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gdat.typeverb > 2:
print('Adding delta functions into the model...')
print('sbrt[dfnc]')
summgene(sbrt['dfnc'])
sbrt['modl'] += sbrt['dfnc']
stopchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
### count map
initchro(gdat, gdatmodi, 'expo')
cntp = dict()
cntp['modl'] = retr_cntp(gdat, sbrt['modl'])
if gdat.booldiagmode:
setattr(gmodstat, 'cntpmodl', cntp['modl'])
stopchro(gdat, gdatmodi, 'expo')
# mock data specific
if strgmodl == 'true' and strgstat == 'this':
# generate count data
cntptemp = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for i in gdat.indxener:
for j in gdat.indxpixl:
for m in gdat.indxevtt:
cntptemp[i, j, m] = np.random.poisson(cntp['modl'][i, j, m])
setattr(gdat, 'cntpdata', cntptemp)
if not gdat.boolsqzeexpo and np.amax(cntptemp) == 0:
print('cntp[modl]')
summgene(cntp['modl'])
print('gdat.boolsqzeexpo')
print(gdat.boolsqzeexpo)
print('cntptemp')
summgene(cntptemp)
raise Exception('Data is zero.')
proc_cntpdata(gdat)
## diagnostics
if gdat.booldiagmode:
frac = cntp['modl'] / np.mean(cntp['modl'])
if np.amin(frac) < -1e-3 and np.amin(cntp['modl']) < -0.1:
raise Exception('')
indxcubebadd = np.where(cntp['modl'] < 0.)[0]
if indxcubebadd.size > 0:
print('Warning! Model prediction is negative. Correcting to 1e-20...')
cntp['modl'][indxcubebadd] = 1e-20
stopchro(gdat, gdatmodi, 'modl')
# log-prior
initchro(gdat, gdatmodi, 'lpri')
if gdat.typeverb > 2:
print('Evaluating the prior...')
lpri = np.zeros(gmod.numblpri)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
lpri[0] -= 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[l] * gmodstat.numbelem[l]
if gdat.penalpridiff:
sbrtdatapnts = gdat.sbrtdata - sbrt['dfnc']
if gdat.typepixl == 'heal':
raise Exception('')
if gdat.typepixl == 'cart':
psecodimdatapnts = np.empty((gdat.numbener, gdat.numbsidecarthalf, gdat.numbevtt))
psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
sigm = fwhm / 2.355
psecodimdatapntsprio = np.exp(-2. * gdat.meanpara.mpolodim[None, :, None] / (0.1 / sigm[:, None, :]))
lpridiff = 0.
for i in gdat.indxener:
for m in gdat.indxevtt:
psecdatapnts = retr_psec(gdat, sbrtdatapnts[i, :, m])
psecodimdatapnts[i, :, m] = retr_psecodim(gdat, psecdatapnts)
psecodimdatapnts[i, :, m] /= psecodimdatapnts[i, 0, m]
lpridiff += -0.5 * np.sum((psecodimdatapnts[i, :, m] - psecodimdatapntsprio[i, :, m])**2)
setattr(gmodstat, 'psecodimdatapntsen%02devt%d' % (i, m), psecodimdatapnts[i, :, m])
setattr(gmodstat, 'psecodimdatapntsprioen%02devt%d'% (i, m), psecodimdatapntsprio[i, :, m])
lpri[1] = lpridiff
setattr(gmodstat, 'lpridiff', lpridiff)
if gmod.typemodltran == 'pois':
meanelem = gmodstat.paragenrscalfull[gmod.indxpara.meanelem]
for l in gmod.indxpopl:
lpri[2] += retr_lprbpois(gmodstat.numbelem[l], meanelem[l])
for l in gmod.indxpopl:
for g, (strgfeat, strgpdfn) in enumerate(zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l])):
indxlpritemp = 3 + l * gmod.numbparagenrelem + g
lpri[indxlpritemp] = retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, gmodstat.paragenrscalfull, gmodstat.dictelem, gmodstat.numbelem)
lpritotl = np.sum(lpri)
if gdat.typeverb > 1:
print('lpritotl')
print(lpritotl)
### log-likelihood
initchro(gdat, gdatmodi, 'llik')
llik = retr_llik(gdat, strgmodl, cntp['modl'])
if gdat.typeverb > 2:
print('cntp[modl]')
summgene(cntp['modl'])
print('np.sum(cntp[modl], (1, 2))')
print(np.sum(cntp['modl'], (1, 2)))
print('np.sum(gdat.cntpdata, (1, 2))')
print(np.sum(gdat.cntpdata, (1, 2)))
if gdat.booldiagmode:
if not np.isfinite(llik).all():
raise Exception('Likelihood is not finite.')
gmodstat.lliktotl = np.sum(llik)
if gdat.booldiagmode:
if isinstance(gmodstat.lliktotl, np.ndarray):
raise Exception('')
if not np.isfinite(gmodstat.lliktotl).all():
raise Exception('')
numbdoff = gdat.numbdata - gmod.numbparagenrbase
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
numbdoff -= len(gmodstat.indxparagenrfullelem[l]['full'])
setattr(gmodstat, 'llik', llik)
setattr(gmodstat, 'llikmean', gmodstat.lliktotl / gdat.numbdata)
setattr(gmodstat, 'llikcmea', gmodstat.lliktotl / (gdat.numbdata - numbdoff))
if gdat.typeverb > 2:
print('llik')
summgene(llik)
if gdat.typeverb > 1:
print('gmodstat.lliktotl')
print(gmodstat.lliktotl)
stopchro(gdat, gdatmodi, 'llik')
lpostotl = lpritotl + gmodstat.lliktotl
if gdat.typeverb > 1:
print('lpostotl')
print(lpostotl)
setattr(gmodstat, 'lpritotl', lpritotl)
setattr(gmodstat, 'gmodstat.lliktotl', gmodstat.lliktotl)
setattr(gmodstat, 'lpostotl', lpostotl)
stopchro(gdat, gdatmodi, 'lpri')
if strgstat == 'next':
return
initchro(gdat, gdatmodi, 'tert')
setattr(gmodstat, 'lpri', lpri)
if gmod.numbparaelem > 0:
setattr(gmodstat, 'lpripena', lpri[0])
dicttert = {}
## load necessary variables
## derived variables
## residual count map
cntp['resi'] = []
cntp['resi'] = gdat.cntpdata - cntp['modl']
setattr(gmodstat, 'cntpmodl', cntp['modl'])
setattr(gmodstat, 'cntpresi', cntp['resi'])
setattr(gmodstat, 'llik', llik)
#if gmod.boollens:
# setattr(gmodstat, 'deflhost', deflhost)
if gmod.boollens:
setattr(gmodstat, 'defl', defl)
for e in gmod.indxsersfgrd:
masshostbein = massfrombein * beinhost[e]**2
setattr(gmodstat, 'masshostisf%dbein' % e, masshostbein)
### sort with respect to deflection at scale radius
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
indxelemsortampl = np.argsort(gmodstat.dictelem[l][nameparaelemsort[l]])[::-1]
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem + 'sort'] = gmodstat.dictelem[l][nameparagenrelem][indxelemsortampl]
deflsing = np.zeros((gdat.numbpixlcart, 2, numbdeflsingplot))
conv = np.zeros((gdat.numbpixlcart))
convpsec = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecodim = np.zeros((gdat.numbsidecarthalf))
if gmod.numbparaelem > 0:
if boolelemlens:
gmod.indxpopllens = gmod.typeelem.index('lens')
numbdeflsing = 2
if gmod.numbparaelem > 0:
if boolelemlens:
if numbelem[indxpopllens] > 0:
numbdeflsing += min(numbdeflsubhplot, numbelem[indxpopllens])
numbdeflsing += 1
for k in range(numbdeflsing):
indxpixltemp = gdat.indxpixlcart
if k == 0:
# temp -- should take other sersics into account
deflsing[indxpixltemp, :, k] = deflhost[0]
elif k == 1:
deflsing[indxpixltemp, :, k] = deflextr
elif k == 2:
deflsing[indxpixltemp, :, k] = defl - deflextr - deflhost[0]
else:
asca = gmodstat.dictelem[indxpopllens]['ascasort'][None, k-3]
acut = gmodstat.dictelem[indxpopllens]['acutsort'][None, k-3]
deflsing[listindxpixlelem[indxpopllens][k], :, k] = retr_defl(gdat, listindxpixlelem[indxpopllens][k], \
gmodstat.dictelem[indxpopllens]['lgalsort'][None, k-3], gmodstat.dictelem[indxpopllens]['bgalsort'][None, k-3], \
gmodstat.dictelem[indxpopllens]['defssort'][None, k-3], asca=asca, acut=acut)
# convergence
## total
conv[:] = retr_conv(gdat, defl)
convhost = np.zeros((gmod.numbsersfgrd, gdat.numbpixlcart))
for e in gmod.indxsersfgrd:
convhost[e, :] = retr_conv(gdat, deflhost[e])
### power spectrum
#### two dimensional
convpsec[:] = retr_psec(gdat, conv[:])
#### one dimensional
convpsecodim[:] = retr_psecodim(gdat, convpsec[:])
setattr(gmodstat, 'convpsec', convpsec)
setattr(gmodstat, 'convpsecodim', convpsecodim)
setattr(gmodstat, 'conv', conv[...])
for e in gmod.indxsersfgrd:
setattr(gmodstat, 'convisf%d' % e, convhost[e, ...])
## subhalos
if gmod.numbparaelem > 0:
if boolelemlens:
convelem = np.zeros((gdat.numbpixl))
convpsecelem = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecelemodim = np.zeros((gdat.numbsidecarthalf))
### convergence
convelem[:] = retr_conv(gdat, deflsubh)
### power spectrum
##### two dimensional
convpsecelem[:] = retr_psec(gdat, convelem[:])
##### one dimensional
convpsecelemodim[:] = retr_psecodim(gdat, convpsecelem[:])
setattr(gmodstat, 'convpsecelem', convpsecelem)
setattr(gmodstat, 'convpsecelemodim', convpsecelemodim)
setattr(gmodstat, 'convelem', convelem[...])
setattr(gmodstat, 'defl', defl)
### magnification
magn = np.empty((gdat.numbpixlcart))
histdefl = np.empty((gdat.numbdefl))
if gmod.numbparaelem > 0 and boolelemlens:
histdeflsubh = np.empty((gdat.numbdefl))
deflsingmgtd = np.zeros((gdat.numbpixlcart, numbdeflsingplot))
magn[:] = 1. / retr_invm(gdat, defl)
histdefl[:] = np.histogram(defl, bins=gdat.binspara.defl)[0]
if gmod.numbparaelem > 0:
if boolelemlens:
histdeflsubh[:] = np.histogram(deflsubh, bins=gdat.binspara.deflsubh)[0]
deflsingmgtd[:, :] = np.sqrt(np.sum(deflsing[...]**2, axis=1))
if gmod.numbparaelem > 0:
if boolelemlens:
setattr(gmodstat, 'histdeflsubh', histdeflsubh)
setattr(gmodstat, 'histdefl', histdefl)
setattr(gmodstat, 'magn', magn[...])
setattr(gmodstat, 'deflsing', deflsing[...])
setattr(gmodstat, 'deflsingmgtd', deflsingmgtd[...])
## element related
if gmod.numbparaelem > 0:
if gdat.numbpixl == 1:
for l in gmod.indxpopl:
for k in range(gmodstat.numbelem[l]):
setattr(gmodstat, 'speclinepop%d%04d' % (l, k), gmodstat.dictelem[l]['spec'][:, k])
if gdat.typedata == 'mock' and strgmodl == 'true' and gdat.numbpixl > 1:
gdat.refrlgal = [[] for l in gmod.indxpopl]
gdat.refrbgal = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gdat.refrlgal[l] = np.tile(gmodstat.dictelem[l]['lgal'], [3] + list(np.ones(gmodstat.dictelem[l]['lgal'].ndim, dtype=int)))
gdat.refrbgal[l] = np.tile(gmodstat.dictelem[l]['bgal'], [3] + list(np.ones(gmodstat.dictelem[l]['bgal'].ndim, dtype=int)))
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['per1'] = retr_per1(gmodstat.dictelem[l]['per0'], gmodstat.dictelem[l]['magf'])
if gmod.numbparaelem > 0:
if strgstat == 'this' or gdat.boolrefeforc and strgmodl == 'fitt':
# correlate the fitting model elements with the reference elements
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
indxelemrefrasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] == 0:
continue
indxelemfittmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
indxelemrefrmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
matrdist = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]))
for k in range(gmodstat.numbelem[l]):
# construct a matrix of angular distances between reference and fitting elements
if gmod.typeelem[l].startswith('lghtline'):
matrdist[:, k] = abs(gdat.refrelin[q][0, :] - gmodstat.dictelem[l]['elin'][k]) / gdat.refrelin[q][0, :]
else:
matrdist[:, k] = retr_angldist(gdat, gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k])
indxelemrefrmatr[:, k] = np.arange(gdat.refr.numbelem[q])
indxelemfittmatr[:, k] = k
matrdist = matrdist.flatten()
indxelemrefrmatr = indxelemrefrmatr.flatten()
indxelemfittmatr = indxelemfittmatr.flatten()
# take only angular separations smaller than some threshold
indxmatrthrs = np.where(matrdist < gdat.anglassc)
matrdist = matrdist[indxmatrthrs]
indxelemrefrmatr = indxelemrefrmatr[indxmatrthrs]
indxelemfittmatr = indxelemfittmatr[indxmatrthrs]
# sort the remaining associations with respect to distance
indxmatrsort = np.argsort(matrdist)
matrdist = matrdist[indxmatrsort]
indxelemrefrmatr = indxelemrefrmatr[indxmatrsort]
indxelemfittmatr = indxelemfittmatr[indxmatrsort]
for c in range(matrdist.size):
if indxelemrefrmatr[c] in indxelemrefrasschits[q][l] or indxelemfittmatr[c] in indxelemfittasschits[q][l]:
continue
indxelemrefrasschits[q][l].append(indxelemrefrmatr[c])
indxelemfittasschits[q][l].append(indxelemfittmatr[c])
indxelemrefrasschits[q][l] = np.array(indxelemrefrasschits[q][l])
indxelemfittasschits[q][l] = np.array(indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasschits', indxelemrefrasschits)
setattr(gmodstat, 'indxelemfittasschits', indxelemfittasschits)
indxelemrefrasscmiss = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasscfals = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
# indices of the reference elements not associated with the fitting model elements
if gdat.refr.numbelem[q] > 0:
indxelemrefrasscmiss[q][l] = np.setdiff1d(np.arange(gdat.refr.numbelem[q]), indxelemrefrasschits[q][l])
# indices of the fitting model elements not associated with the reference elements
if gmodstat.numbelem[l] > 0:
indxelemfittasscfals[q][l] = np.setdiff1d(np.arange(gmodstat.numbelem[l]), indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasscmiss', indxelemrefrasscmiss)
setattr(gmodstat, 'indxelemfittasscfals', indxelemfittasscfals)
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for l in gmod.indxpopl:
# collect the associated reference element parameter for each fitting element
for strgfeat in gdat.refr.namepara.elemonly[q][l]:
name = strgfeat + gdat.listnamerefr[q]
if strgfeat != 'spec' and strgfeat != 'specplot':
refrfeat = getattr(gdat.refr, strgfeat)
gmodstat.dictelem[l][name] = np.zeros(gmodstat.numbelem[l])
if len(refrfeat[q]) > 0 and len(indxelemrefrasschits[q][l]) > 0:
gmodstat.dictelem[l][name][indxelemfittasschits[q][l]] = refrfeat[q][0, indxelemrefrasschits[q][l]]
print('temp')
continue
# collect the error in the associated reference element amplitude
for strgfeat in gdat.listnameparaetotlelemcomm[q][l]:
refrfeat = getattr(gdat.refr, strgfeat)
if strgfeat == gmod.nameparagenrelemampl[l] and len(indxelemfittasschits[q][l]) > 0:
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]] = np.zeros(gmodstat.numbelem[l])
fittfeattemp = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
refrfeattemp = refrfeat[q][0, indxelemrefrasschits[q][l]]
if gdat.booldiagmode:
if not np.isfinite(refrfeattemp).all():
raise Exception('')
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]][indxelemfittasschits[q][l]] = 100. * (fittfeattemp - refrfeattemp) / refrfeattemp
if gdat.boolrefeforc and strgmodl == 'fitt':
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat in gdat.refr.namepara.elem[gdat.indxrefrforc[l]]:
if len(indxelemrefrasschits[gdat.indxrefrforc[l]][l]) == 0:
continue
refrfeat = getattr(gdat.refr, strgfeat)[gdat.indxrefrforc[l]][0, indxelemrefrasschits[gdat.indxrefrforc[l]][l]]
if len(gmodstat.dictelem[l][strgfeat]) == 0:
continue
lpritotl += -2. * np.sum(1e6 * (gmodstat.dictelem[l][strgfeat][indxelemfittasschits[gdat.indxrefrforc[l]][l]] - refrfeat)**2 / refrfeat**2)
# other tertiary variables continues
## number of degrees of freedom
chi2doff = np.sum(cntp['resi']**2 / gdat.varidata) / numbdoff
if gdat.booldiagmode:
if not np.isfinite(cntp['resi']).all():
raise Exception('')
if not np.isfinite(numbdoff):
raise Exception('')
if not np.isfinite(chi2doff):
raise Exception('')
setattr(gmodstat, 'numbdoff', numbdoff)
setattr(gmodstat, 'chi2doff', chi2doff)
if gmod.boolelempsfn and gmod.numbparaelem > 0:
gmodstat.fwhmpsfn = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gmod.numbparaelem > 0:
### derived parameters
for l in gmod.indxpopl:
# luminosity
if gmod.boolelemlght[l] and 'flux' in gmod.namepara.genrelem[l]:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat.startswith('reds') and strgfeat != 'reds':
namerefr = strgfeat[-4:]
gmodstat.dictelem[l]['lumi' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
gmodstat.dictelem[l]['dlos' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
reds = gmodstat.dictelem[l]['reds' + namerefr]
indxgood = np.where(np.isfinite(gmodstat.dictelem[l]['reds' + namerefr]))[0]
if indxgood.size > 0:
# temp -- these units only work for energy units of keV
dlos = gdat.adisobjt(reds)
gmodstat.dictelem[l]['dlos' + namerefr][indxgood] = dlos
lumi = retr_lumi(gdat, gmodstat.dictelem[l]['flux'], dlos, reds)
gmodstat.dictelem[l]['lumi' + namerefr][indxgood] = lumi
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['mass'] = full([numbelem[l]], 3.)
if gdat.typeverb > 2:
print('l')
print(l)
if gdat.boolbinsspat:
#### radial and angular coordinates
gmodstat.dictelem[l]['gang'] = retr_gang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
gmodstat.dictelem[l]['aang'] = retr_aang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
if gmod.boolelemlght[l]:
#### number of expected counts
if gdat.boolbinsspat:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal']], gmodstat.dictelem[l]['spec'])
else:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['elin']], gmodstat.dictelem[l]['spec'])
#### delta log-likelihood
gmodstat.dictelem[l]['deltllik'] = np.zeros(gmodstat.numbelem[l])
if not (strgmodl == 'true' and gdat.checprio):
if gdat.typeverb > 2:
print('Calculating log-likelihood differences when removing elements from the model.')
for k in range(gmodstat.numbelem[l]):
# construct gdatmodi
gdatmoditemp = tdpy.gdatstrt()
gdatmoditemp.this = tdpy.gdatstrt()
gdatmoditemp.next = tdpy.gdatstrt()
gdatmoditemp.this.indxelemfull = gmodstat.indxelemfull
gdatmoditemp.this.paragenrscalfull = gmodstat.paragenrscalfull
gdatmoditemp.this.paragenrunitfull = gmodstat.paragenrunitfull
prop_stat(gdat, gdatmoditemp, strgmodl, deth=True, thisindxpopl=l, thisindxelem=k)
proc_samp(gdat, gdatmoditemp, 'next', strgmodl)#, boolinit=boolinit)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.lliktotl):
raise Exception('')
gdatobjttemp = retr_gdatobjt(gdat, gdatmoditemp, strgmodl)#, boolinit=boolinit)
nextlliktotl = gdatobjttemp.next.lliktotl
gmodstat.dictelem[l]['deltllik'][k] = gmodstat.lliktotl - nextlliktotl
if gdat.typeverb > 2:
print('deltllik calculation ended.')
# more derived parameters
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
### PSF FWHM
if gdat.typepixl == 'cart':
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
setattr(gmodstat, 'fwhm', fwhm)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gmod.numbparaelem > 0:
sbrt['dfnctotl'] = np.zeros_like(gdat.expo)
sbrt['dfncsubt'] = np.zeros_like(gdat.expo)
sbrt['dfncsupt'] = np.zeros_like(gdat.expo)
for l in gmod.indxpopl:
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'] = np.zeros_like(gdat.expo)
if gmod.boolelemsbrt[l]:
for k in range(gmodstat.numbelem[l]):
# read normalization from the element dictionary
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
# calculate imprint on the element surface brightness state variable
if gmod.boolelempsfn[l]:
sbrttemp = retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
indxpixltemp = listindxpixlelem[l][k]
if gmod.typeelem[l].startswith('lghtline'):
sbrttemp = gmodstat.dictelem[l]['spec'][:, k, None, None]
# add it to the state variable depending on the significance
sbrt['dfnctotl'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] > 35:
sbrt['dfncsupt'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] < 35:
sbrt['dfncsubt'][:, indxpixltemp, :] += sbrttemp
# calculate imprint without PSF truncation to calculate approximation errors
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'][:, :, :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, gdat.indxpixl)
setattr(gmodstat, 'sbrtdfncsubtpop%d' % l, sbrt['dfncsubt'])
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.booldiagmode:
numbtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtextsbgrd[l]:
numbtemp += np.sum(gmodstat.numbelem[l])
if numbtemp > 0 and (sbrtextsbgrd == 0.).all():
raise Exception('')
sbrt['bgrdexts'] = sbrtextsbgrd
#### count maps
cntp = dict()
for name in gmod.listnamegcom:
cntp[name] = retr_cntp(gdat, sbrt[name])
setattr(gmodstat, 'cntp' + name, cntp[name])
### spatial averages
sbrtmean = dict()
sbrtstdv = dict()
for name in gmod.listnamegcom:
sbrtmean[name], sbrtstdv[name] = retr_spatmean(gdat, sbrt[name])
for b in gdat.indxspatmean:
setattr(gmodstat, 'sbrt%smea%d' % (name, b), sbrtmean[name][b])
setattr(gmodstat, 'sbrt%sstd%d' % (name, b), sbrtstdv[name][b])
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
for i in gdat.indxener:
if 'dark' in gmod.listnamegcom:
fracsdenmeandarkdfncsubt = sbrtmean['dfncsubt'][0][0][i] / (sbrtmean['dfncsubt'][0][0][i] + sbrtmean['dark'][0][0][i])
else:
fracsdenmeandarkdfncsubt = 1.
setattr(gmodstat, 'fracsdenmeandarkdfncsubten%02d' % i, np.array([fracsdenmeandarkdfncsubt]))
if 'dark' in gmod.listnamegcom:
booldfncsubt = float(np.where(sbrtmean['dfncsubt'][0][0] > sbrtmean['dark'][0][0])[0].any())
else:
booldfncsubt = 1.
setattr(gmodstat, 'booldfncsubt', np.array([booldfncsubt]))
# find the 1-point function of the count maps of all emission components including the total emission
for name in gmod.listnamegcom:
namehistcntp = 'histcntp' + name
for m in gdat.indxevtt:
if gdat.numbevtt > 1:
namehistcntp += 'evt%d' % m
for i in gdat.indxener:
if gdat.numbener > 1:
namehistcntp += 'en%02d' % i
histcntp = np.histogram(cntp[name][i, :, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, namehistcntp, histcntp)
if False and i == 0 and m == 0 and (name == 'dfnc' or name == 'dfncsubt'):
for strgbins in ['lowr', 'higr']:
strgtemp = 'histcntp' + strgbins + name + 'en%02devt%d' % (i, m)
if strgbins == 'lowr':
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[:gdat.numbtickcbar-1]))]))
else:
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[gdat.numbtickcbar-1:]))]))
else:
histcntp = np.histogram(cntp[name][:, 0, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, 'histcntp' + name + 'evt%d' % m, histcntp)
if gmod.boollens:
if strgmodl == 'true':
s2nr = []
s2nr = cntp['lens'] / np.sqrt(cntp['modl'])
setattr(gmodstat, 's2nr', s2nr)
cntplensgrad = np.empty((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt, 2))
for i in gdat.indxener:
for m in gdat.indxevtt:
cntplenstemp = np.zeros(gdat.numbpixlcart)
cntplenstemp[gdat.indxpixlrofi] = cntp['lens'][i, :, m]
cntplensgrad[i, :, m, :] = retr_gradmaps(gdat, cntplenstemp) * gdat.sizepixl
cntplensgradmgtd = np.sqrt(np.sum(cntplensgrad**2, axis=3))
cntplensgrad *= gdat.sizepixl
indx = np.where(np.fabs(cntplensgrad) > 1. * gdat.sizepixl)
cntplensgrad[indx] = np.sign(cntplensgrad[indx]) * 1. * gdat.sizepixl
deflmgtd = np.sqrt(np.sum(defl**2, axis=1))
setattr(gmodstat, 'deflmgtd', deflmgtd)
setattr(gmodstat, 'cntplensgrad', cntplensgrad)
setattr(gmodstat, 'cntplensgradmgtd', cntplensgradmgtd)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
#### spectra
if gdat.boolbinsspat:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['specplot'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], \
curv=gmodstat.dictelem[l]['curv'], expc=gmodstat.dictelem[l]['expc'], \
sindcolr=sindcolr, spectype=gmod.spectype[l], plot=True)
if gdat.typedata == 'inpt':
if gdat.typeexpr == 'ferm':
# temp
try:
gmodstat.dictelem[l]['sbrt0018'] = gdat.sbrt0018objt(gmodstat.dictelem[l]['bgal'], gmodstat.dictelem[l]['lgal'])
except:
gmodstat.dictelem[l]['sbrt0018'] = gmodstat.dictelem[l]['bgal'] * 0.
if gmod.typeelem[l] == 'lens':
#### distance to the source
if gmod.boollens:
gmodstat.dictelem[l]['diss'] = retr_angldist(gdat, gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], lgalsour, bgalsour)
if gmod.boollenssubh:
gmodstat.dictelem[l]['deflprof'] = np.empty((gdat.numbanglfull, gmodstat.numbelem[l]))
gmodstat.dictelem[l]['mcut'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['rele'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reln'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relk'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relf'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reld'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relc'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relm'] = np.empty(gmodstat.numbelem[l])
# temp -- this can be placed earlier in the code
cntplensobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
cntp['lens'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
for k in np.arange(gmodstat.numbelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
#### deflection profiles
gmodstat.dictelem[l]['deflprof'][:, k] = retr_deflcutf(gdat.meanpara.anglfull, gmodstat.dictelem[l]['defs'][k], asca, acut)
### truncated mass
gmodstat.dictelem[l]['mcut'][k] = retr_mcut(gdat, gmodstat.dictelem[l]['defs'][k], asca, acut, adishost, mdencrit)
#### dot product with the source flux gradient
# temp -- weigh the energy and PSF bins
gmodstat.dictelem[l]['rele'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relf'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, cntpmodl=cntp['modl'][0, :, 0])
deflelem = retr_defl(gdat, gdat.indxpixl, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], gmodstat.dictelem[l]['defs'][k], asca=asca, acut=acut)
bgalprim = gdat.bgalgrid - deflelem[:, 1]
lgalprim = gdat.lgalgrid - deflelem[:, 0]
gmodstat.dictelem[l]['relm'][k] = np.mean(abs(cntp['lens'][0, :, 0] - cntplensobjt(bgalprim, lgalprim, grid=False).flatten()))
gmodstat.dictelem[l]['relk'][k] = gmodstat.dictelem[l]['relm'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reln'][k] = gmodstat.dictelem[l]['rele'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reld'][k] = retr_rele(gdat, gdat.cntpdata[0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relc'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, absv=False) / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
### distribution of element parameters and features
#### calculate the model filter
listindxelemfilt = [[[] for l in gmod.indxpopl] for namefilt in gdat.listnamefilt]
for k, namefilt in enumerate(gdat.listnamefilt):
for l in gmod.indxpopl:
if namefilt == '':
listindxelemfilt[k][l] = np.arange(gmodstat.numbelem[l])
if namefilt == 'imagbndr':
listindxelemfilt[k][l] = np.where((np.fabs(gmodstat.dictelem[l]['lgal']) < gdat.maxmgangdata) & (np.fabs(gmodstat.dictelem[l]['bgal']) < gdat.maxmgangdata))[0]
if namefilt == 'deltllik':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['deltllik'] > 0.5 * gmod.numbparagenrelemsing[l])[0]
if namefilt == 'nrel':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['reln'] > 0.3)[0]
for l in gmod.indxpopl:
# histograms of element parameters
for namefrst in gmod.namepara.elem[l]:
## one dimensional
if namefrst[:-4] == 'etag':
continue
if namefrst == 'specplot' or namefrst == 'deflprof':
continue
elif namefrst == 'spec':
histfrst = np.zeros((gdat.numbbinsplot, gdat.numbener))
for i in gdat.indxener:
histfrst[:, i] = np.histogram(gmodstat.dictelem[l]['spec'][i, listindxelemfilt[0][l]], gdat.binspara.spec)[0]
elif namefrst == 'cnts':
histfrst = np.histogram(gmodstat.dictelem[l]['cnts'][listindxelemfilt[0][l]], gdat.binspara.cnts)[0]
else:
#elif not (namefrst == 'curv' and gmod.spectype[l] != 'curv' or namefrst == 'expc' \
# and gmod.spectype[l] != 'expc' or namefrst.startswith('sindarry') and \
# gmod.spectype[l] != 'colr'):
binsfrst = getattr(gdat.binspara, namefrst)
#if len(gmodstat.dictelem[l][namefrst]) > 0 and len(listindxelemfilt[0][l]) > 0:
histfrst = np.histogram(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], binsfrst)[0]
strgvarb = 'hist' + namefrst + 'pop%d' % l
setattr(gmodstat, strgvarb, histfrst)
#### two dimensional
for nameseco in gmod.namepara.elem[l]:
if namefrst == 'spec' or namefrst == 'specplot' or namefrst == 'deflprof' or \
nameseco == 'spec' or nameseco == 'specplot' or nameseco == 'deflprof':
continue
if not checstrgfeat(namefrst, nameseco):
continue
binsseco = getattr(gdat.binspara, nameseco)
histtdim = np.histogram2d(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], \
gmodstat.dictelem[l][nameseco][listindxelemfilt[0][l]], [binsfrst, binsseco])[0]
setattr(gmodstat, 'hist' + namefrst + nameseco + 'pop%d' % l, histtdim)
### priors on element parameters and features
for nameparagenrelem in gmod.namepara.genrelem[l]:
xdat = gmodstat.dictelem[l][nameparagenrelem]
minm = getattr(gmod.minmpara, nameparagenrelem + 'pop%d' % l)
maxm = getattr(gmod.maxmpara, nameparagenrelem + 'pop%d' % l)
scal = getattr(gmod.scalpara, nameparagenrelem + 'pop%d' % l)
booltemp = False
if scal.startswith('expo') or scal.startswith('dexp'):
if scal.startswith('expo'):
if scal == 'expo':
sexp = getattr(gmod, 'gangdistsexppop%d' % l)
else:
sexp = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
pdfn = pdfn_expo(xdat, maxm, sexp)
if scal.startswith('dexp'):
pdfn = pdfn_dnp.exp(xdat, maxm, scal)
booltemp = True
if scal.startswith('self') or scal.startswith('logt'):
if scal.startswith('self'):
pdfn = 1. / (maxm - minm) + np.zeros_like(xdat)
else:
pdfn = 1. / (np.log(maxm) - np.log(minm)) + np.zeros_like(xdat)
booltemp = True
# temp
if scal.startswith('powr'):
slop = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem + 'pop%d' % l)]
pdfn = pdfn_powr(xdat, minm, maxm, slop)
booltemp = True
if scal.startswith('dpowslopbrek'):
pdfn = pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr)
booltemp = True
if scal == 'lnormeanstdv':
pdfn = pdfn_lnor(xdat, meanlnor, stdvlnor)
booltemp = True
if scal.startswith('igam'):
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
pdfn = pdfn_igam(xdat, slop, cutf)
booltemp = True
if scal.startswith('gaus'):
# this does not work for mismodeling
meanvarb = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
stdv = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
if nameparagenrelem == 'expc' and gmod.spectype[l] == 'expc':
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
else:
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
booltemp = True
# temp -- meanelem will not be defined
#if booltemp:
# gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'] = gmodstat.numbelem[l] * pdfn * np.interp(xdat, xdatplot, delt)
#setattr(gmodstat, 'hist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
#if strgmodl == 'true':
# setattr(gmodstat, 'refrhist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gmodstat.numbelem[l] > 0:
## total truncated mass of the subhalo as a cross check
# temp -- generalize
asca = gmodstat.dictelem[l]['asca']
acut = gmodstat.dictelem[l]['acut']
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
masssubh = np.array([np.sum(factmcutfromdefs * gmodstat.dictelem[l]['defs'])])
## derived variables as a function of other derived variables
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpntspuls'):
massshel = np.empty(gdat.numbanglhalf)
for k in gdat.indxanglhalf:
indxelemshel = np.where((gdat.binspara.anglhalf[k] < gmodstat.dictelem[l]['gang']) & (gmodstat.dictelem[l]['gang'] < gdat.binspara.anglhalf[k+1]))
massshel[k] = np.sum(gmodstat.dictelem[l]['mass'][indxelemshel])
setattr(gmodstat, 'massshelpop%d' % l, massshel)
if gmod.boollens or gmod.numbparaelem > 0 and gmod.boollenssubh:
# find the host, subhalo masses and subhalo mass fraction as a function of halo-centric radius
listnametemp = gdat.liststrgcalcmasssubh
listnamevarbmass = []
listnamevarbmassscal = []
listnamevarbmassvect = []
for e in gmod.indxsersfgrd:
if boolllenshost:
listnamevarbmassscal += ['masshosttotl']
for strgtemp in listnametemp:
listnamevarbmassvect.append('masshostisf%d' % e + strgtemp)
listnamevarbmassscal.append('masshostisf%d' % e + strgtemp + 'bein')
if gmod.numbparaelem > 0 and gmod.boollenssubh:
listnamevarbmassscal.append('masssubhtotl')
listnamevarbmassscal.append('fracsubhtotl')
for strgtemp in listnametemp:
listnamevarbmassvect.append('masssubh' + strgtemp)
listnamevarbmassvect.append('fracsubh' + strgtemp)
listnamevarbmassscal.append('masssubh' + strgtemp + 'bein')
listnamevarbmassscal.append('fracsubh' + strgtemp + 'bein')
for name in listnamevarbmassvect:
dicttert[name] = np.zeros(gdat.numbanglhalf)
if 'isf' in name:
indxisfrtemp = int(name.split('isf')[1][0])
angl = np.sqrt((gdat.meanpara.lgalcartmesh - lgalhost[indxisfrtemp])**2 + (gdat.meanpara.bgalcartmesh - bgalhost[indxisfrtemp])**2).flatten()
for k in gdat.indxanglhalf:
if name[4:8] == 'host':
convtemp = conv[:]
if name[4:8] == 'subh':
convtemp = convelem[:]
if name.endswith('delt'):
indxpixl = np.where((gdat.binspara.anglhalf[k] < angl) & (angl < gdat.binspara.anglhalf[k+1]))[0]
dicttert[name][k] = 1e6 * np.sum(convtemp[indxpixl]) * mdencrit * \
gdat.apix * adishost**2 / 2. / np.pi * gdat.deltanglhalf[k] / gdat.meanpara.anglhalf[k]
if name.endswith('intg'):
indxpixl = np.where(angl < gdat.meanpara.anglhalf[k])[0]
dicttert[name][k] = np.sum(convtemp[indxpixl]) * mdencrit * gdat.apix * adishost**2
if name[:4] == 'frac':
masshosttotl = 0.
for e in gmod.indxsersfgrd:
masshosttotl += dicttert['masshostisf%d' % e + name[-4:]][k]
if masshosttotl != 0.:
dicttert['fracsubh' + name[8:]][k] = dicttert['masssubh' + name[8:]][k] / masshosttotl
setattr(gmodstat, name, dicttert[name])
# interpolate the host, subhalo masses and subhalo mass fraction at the Einstein radius and save it as a scalar variable
dicttert[name + 'bein'] = np.interp(beinhost, gdat.meanpara.anglhalf, dicttert[name])
setattr(gmodstat, name + 'bein', dicttert[name + 'bein'])
#if gmod.numbparaelem > 0:
# ## copy element parameters to the global object
# feat = [[] for l in gmod.indxpopl]
# for l in gmod.indxpopl:
# feat[l] = dict()
# for strgfeat in gmod.namepara.genrelem[l]:
# if strgfeat[:-4] == 'etag':
# continue
# if len(gmodstat.dictelem[l][strgfeat]) > 0:
# if strgmodl == 'true':
# shap = list(np.ones(gmodstat.dictelem[l][strgfeat].ndim, dtype=int))
# feat[l][strgfeat] = np.tile(gmodstat.dictelem[l][strgfeat], [3] + shap)
# if strgmodl == 'fitt':
# feat[l][strgfeat] = gmodstat.dictelem[l][strgfeat]
#
# #for strgfeat in gmod.namepara.elem:
# # feattemp = [[] for l in gmod.indxpopl]
# # for l in gmod.indxpopl:
# # if strgfeat in gmod.namepara.genrelem[l]:
# # if strgfeat in feat[l]:
# # feattemp[l] = feat[l][strgfeat]
# # else:
# # feattemp[l] = np.array([])
# # setattr(gmodstat, strgfeat, feattemp)
# copy true state to the reference state
#if strgmodl == 'true':
# for name, valu in deepcopy(gdat.__dict__).items():
# if name.startswith('true'):
# #indx = name.find('pop')
# #if indx != -1 and not name.endswith('pop') and name[indx+3].isdigit():
# # namerefr = name.replace('pop%s' % name[indx+3], 'ref%s' % name[indx+3])
# #else:
# # namerefr = name
# #namerefr = name
# #namerefr = namerefr.replace('true', 'refr')
# name = name.replace('true', 'refr')
# setattr(gdat, name, valu)
if gmod.numbparaelem > 0 and gdat.priofactdoff != 0.:
if strgmodl == 'true':
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':
continue
reca = np.zeros(gdat.numbbinsplot) - 1.
indxelempars = np.where(gmodstat.dictelem[q]['deltllik'] > 2.5)[0]
refrhistpars = np.zeros(gdat.numbbinsplot) - 1.
histparaelem = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
indxrefrgood = np.where(histparaelem > 0)[0]
reca[indxrefrgood] = 0.
refrhistpars[indxrefrgood] = 0.
refrhist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
bins = getattr(gdat.binspara, strgfeat)
if len(indxelempars) > 0:
refrhistpars = np.histogram(gmodstat.dictelem[q][strgfeat][indxelempars], bins=bins)[0].astype(float)
if indxrefrgood.size > 0:
reca[indxrefrgood] = refrhistpars[indxrefrgood] / refrhist[indxrefrgood]
setattr(gmodstat, 'histpars' + strgfeat + 'pop%d' % q, refrhistpars)
setattr(gmodstat, 'reca' + strgfeat + 'pop%d' % q, reca)
print('gdat.rtagmock')
print(gdat.rtagmock)
if gdat.rtagmock is not None:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':# or strgfeat.startswith('aerr'):
continue
if strgfeat in gmod.namepara.genrelem[l]:
hist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % l)
reca = getattr(gdat.true.this, 'reca' + strgfeat + 'pop%d' % l)
histcorrreca = hist / reca
setattr(gmodstat, 'histcorrreca' + strgfeat + 'pop%d' % l, histcorrreca)
### Exculusive comparison with the true state
if strgmodl == 'fitt' and gdat.typedata == 'mock':
if gmod.boollens:
numbsingcomm = min(deflsing.shape[2], gmod.deflsing.shape[2])
deflsingresi = deflsing[0, ..., :numbsingcomm] - gmod.deflsing[..., :numbsingcomm]
deflsingresimgtd = np.sqrt(np.sum(deflsingresi**2, axis=1))
deflsingresiperc = 100. * deflsingresimgtd / gmod.deflsingmgtd[..., :numbsingcomm]
setattr(gmodstat, 'numbsingcomm', numbsingcomm)
setattr(gmodstat, 'deflsingresi', deflsingresi)
truedeflmgtd = getattr(gdat.true.this, 'deflmgtd')
truedefl = getattr(gdat.true.this, 'defl')
deflresi = defl - truedefl
deflresimgtd = np.sqrt(np.sum(deflresi**2, axis=1))
deflresiperc = 100. * deflresimgtd / truedeflmgtd
setattr(gmodstat, 'deflresi', deflresi)
setattr(gmodstat, 'deflresimgtd', deflresimgtd)
if gmod.numbparaelem > 0:
trueconvelem = getattr(gdat.true.this, 'convelem')
convelemresi = convelem[:] - trueconvelem
convelemresiperc = 100. * convelemresi / trueconvelem
setattr(gmodstat, 'convelemresi', convelemresi)
setattr(gmodstat, 'convelemresiperc', convelemresiperc)
truemagn = getattr(gdat.true.this, 'magn')
magnresi = magn[:] - truemagn
magnresiperc = 100. * magnresi / truemagn
setattr(gmodstat, 'magnresi', magnresi)
setattr(gmodstat, 'magnresiperc', magnresiperc)
if gmod.numbparaelem > 0:
# correlate the catalog sample with the reference catalog
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] > 0:
cmpl = np.array([float(len(indxelemrefrasschits[q][l])) / gdat.refr.numbelem[q]])
if gdat.booldiagmode:
if cmpl > 1. or cmpl < 0.:
raise Exception('')
else:
cmpl = np.array([-1.])
setattr(gmodstat, 'cmplpop%dpop%d' % (l, q), cmpl)
if gmodstat.numbelem[l] > 0:
fdis = np.array([float(indxelemfittasscfals[q][l].size) / gmodstat.numbelem[l]])
if gdat.booldiagmode:
if fdis > 1. or fdis < 0.:
raise Exception('')
else:
fdis = np.array([-1.])
setattr(gmodstat, 'fdispop%dpop%d' % (q, l), fdis)
# collect the associated fitting element parameter for each reference element
featrefrassc = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
featrefrassc[q][l] = dict()
for strgfeat in gdat.refr.namepara.elem[q]:
if not strgfeat in gmod.namepara.genrelem[l] or strgfeat in gdat.refr.namepara.elemonly[q][l]:
continue
if isinstance(gmodstat.dictelem[l][strgfeat], np.ndarray) and gmodstat.dictelem[l][strgfeat].ndim > 1:
continue
featrefrassc[q][l][strgfeat] = np.zeros(gdat.refr.numbelem[q]) + np.nan
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][strgfeat]) > 0:
featrefrassc[q][l][strgfeat][indxelemrefrasschits[q][l]] = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
name = strgfeat + 'asscpop%dpop%d' % (q, l)
setattr(gmodstat, name, featrefrassc[q][l][strgfeat])
# completeness
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
l = gdat.refr.indxpoplfittassc[q]
for nameparaelemfrst in gdat.refr.namepara.elem[q]:
if nameparaelemfrst.startswith('etag'):
continue
if nameparaelemfrst == 'spec' or nameparaelemfrst == 'specplot':
continue
refrfeatfrst = gdat.refr.dictelem[q][nameparaelemfrst][0, :]
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gdat.refr.namepara.elem[q]:
if nameparaelemfrst == nameparaelemseco:
continue
if nameparaelemseco.startswith('etag'):
continue
if nameparaelemseco == 'spec' or nameparaelemseco == 'specplot':
continue
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the cmpl np.array should depend on strgmodl
cmpltdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot)) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeattdim = getattr(gdat.refr, 'hist%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q))
refrfeatseco = gdat.refr.dictelem[q][nameparaelemseco][0, :]
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
refrhistfeattdimassc = np.histogram2d(refrfeatfrst[indxelemrefrasschits[q][l]], \
refrfeatseco[indxelemrefrasschits[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(refrhistfeattdim != 0.)
if indxgood[0].size > 0:
cmpltdim[indxgood] = refrhistfeattdimassc[indxgood].astype(float) / refrhistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((cmpltdim[indxgood] > 1.) | (cmpltdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q), cmpltdim)
cmplfrst = np.zeros(gdat.numbbinsplot) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeatfrst = getattr(gdat.refr, 'hist' + nameparaelemfrst + 'pop%d' % q)
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
refrhistfeatfrstassc = np.histogram(refrfeatfrst[indxelemrefrasschits[q][l]], bins=binsfeatfrst)[0]
indxgood = np.where(refrhistfeatfrst != 0.)[0]
if indxgood.size > 0:
cmplfrst[indxgood] = refrhistfeatfrstassc[indxgood].astype(float) / refrhistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((cmplfrst[indxgood] > 1.) | (cmplfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%spop%d' % (nameparaelemfrst, q), cmplfrst)
# false discovery rate
for l in gmod.indxpopl:
q = gmod.indxpoplrefrassc[l]
for nameparaelemfrst in gmod.namepara.elem[l]:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gmod.namepara.elem[l]:
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the fdis np.array should depend on strgmodl
fdistdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot))
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemseco]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
strgfeattdim = nameparaelemfrst + nameparaelemseco + 'pop%d' % l
fitthistfeattdim = getattr(gmodstat, 'hist' + strgfeattdim)
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
fitthistfeattdimfals = np.histogram2d(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], \
gmodstat.dictelem[l][nameparaelemseco][indxelemfittasscfals[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(fitthistfeattdim != 0.)
if indxgood[0].size > 0:
fdistdim[indxgood] = fitthistfeattdimfals[indxgood].astype(float) / fitthistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((fdistdim[indxgood] > 1.) | (fdistdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%s%spop%d' % (nameparaelemfrst, nameparaelemseco, l), fdistdim)
fdisfrst = np.zeros(gdat.numbbinsplot)
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
fitthistfeatfrstfals = np.histogram(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], bins=binsfeatfrst)[0]
fitthistfeatfrst = getattr(gmodstat, 'hist' + nameparaelemfrst + 'pop%d' % l)
indxgood = np.where(fitthistfeatfrst != 0.)[0]
if indxgood.size > 0:
fdisfrst[indxgood] = fitthistfeatfrstfals[indxgood].astype(float) / fitthistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((fdisfrst[indxgood] > 1.) | (fdisfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%spop%d' % (nameparaelemfrst, l), fdisfrst)
# temp
if strgmodl == 'true' and gdat.typeverb > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
if np.where(minm > gmodstat.dictelem[l][strgfeat])[0].size > 0 or np.where(maxm < gmodstat.dictelem[l][strgfeat])[0].size > 0:
print('Warning: element parameter outside the plot limits.')
print('l')
print(l)
print('Feature: ')
print(strgfeat)
print('Plot minmimum')
print(minm)
print('Plot maxmimum')
print(maxm)
if strgfeat == gmod.nameparagenrelemampl[l] and strgfeat in gmod.namepara.genrelem[l]:
gmod.indxparagenrelemtemp = gmod.namepara.genrelem[l].index(strgfeat)
if (gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp] != 'gaus' and not gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp].startswith('lnor')):
raise Exception('')
stopchro(gdat, gdatmodi, 'tert')
def retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, paragenrscalfull, dictelem, numbelem):
gmod = getattr(gdat, strgmodl)
if strgpdfn == 'self':
minmfeat = getattr(gmod.minmpara, strgfeat)
maxmfeat = getattr(gmod.maxmpara, strgfeat)
lpri = numbelem[l] * np.log(1. / (maxmfeat - minmfeat))
if strgpdfn == 'logt':
lpri = retr_lprilogtdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'gaus':
lpri = retr_lprigausdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dexp':
maxmbgal = getattr(gmod, 'maxmbgal')
gmod.indxpara.bgaldistscal = getattr(gmod.indxpara, 'bgaldistscalpop%d' % l)
lpri = np.sum(np.log(pdfn_dnp.exp(dictelem[l]['bgal'], maxmbgal, paragenrscalfull[gmod.indxpara.bgaldistscal])))
if strgpdfn == 'expo':
maxmgang = getattr(gmod, 'maxmgang')
gang = retr_gang(dictelem[l]['lgal'], dictelem[l]['bgal'])
gmod.indxpara.gangdistscal = getattr(gmod.indxpara, 'gangdistscalpop%d' % l)
lpri = np.sum(np.log(pdfn_expo(gang, maxmgang, paragenrscalfull[gmod.indxpara.gangdistscal])))
lpri = -numbelem[l] * np.log(2. * pi)
if strgpdfn == 'tmpl':
lpri = np.sum(lpdfspatprioobjt(dictelem[l]['bgal'], dictelem[l]['lgal'], grid=False))
if strgpdfn == 'powr':
lpri = retr_lpripowrdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dpowslopbrek':
lpri = retr_lpridpowdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dsrcexpo':
lpri += -np.sum(np.sqrt((dictelem[l]['lgal'] - lgalsour)**2 + (dictelem[l]['bgal'] - bgalsour)**2) / \
getattr(gmod, 'dsrcdistsexppop%d' % l))
if strgpdfn == 'tmpl':
if strgpdfn.endswith('cons'):
pdfnspatpriotemp = getattr(gmod, 'pdfnspatpriotemp')
spatdistcons = paragenrscalfull[getattr(gmod.indxpara, 'spatdistcons')]
lpdfspatprio, lpdfspatprioobjt = retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons)
lpdfspatpriointp = lpdfspatprioobjt(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart)
lpdfspatpriointp = lpdfspatpriointp.T
setattr(gmodstat, 'lpdfspatpriointp', lpdfspatpriointp)
setattr(gmodstat, 'lpdfspatprioobjt', lpdfspatprioobjt)
else:
lpdfspatprioobjt = gmod.lpdfspatprioobjt
return lpri
def checstrgfeat(strgfrst, strgseco):
numbfrst = len(strgfrst)
numbseco = len(strgseco)
numb = min(numbfrst, numbseco)
if strgfrst[:numb] < strgseco[:numb]:
booltemp = True
elif strgfrst[:numb] == strgseco[:numb]:
if numbfrst >= numbseco:
booltemp = False
else:
booltemp = True
else:
booltemp = False
return booltemp
def retr_pathoutprtag(pathpcat, rtag):
pathoutprtag = pathpcat + '/data/outp/' + rtag + '/'
return pathoutprtag
def proc_finl(gdat=None, rtag=None, strgpdfn='post', listnamevarbproc=None, forcplot=False):
gdatmock = None
print('proc_finl()')
if rtag is None:
rtag = gdat.rtag
# determine if the final-processing if nominal or tiling
if isinstance(rtag, list):
listrtagmodi = rtag
rtagfinl = tdpy.retr_strgtimestmp() + rtag[0][15:] + 'tile'
booltile = True
else:
listrtagmodi = [rtag]
rtagfinl = rtag
booltile = False
# determine of the gdatfinl object is available
boolgdatfinl = chec_statfile(pathpcat, rtagfinl, 'gdatfinlpost')
boolgdatfinlgood = False
if boolgdatfinl:
print('Final-processing has been performed previously.')
pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
path = pathoutprtag + 'gdatfinl' + strgpdfn
try:
gdat = readfile(path)
boolgdatfinlgood = True
except:
print('gdatfinl object is corrupted.')
if boolgdatfinl and boolgdatfinlgood:
# read gdatfinl
pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
path = pathoutprtag + 'gdatfinl' + strgpdfn
gdatfinl = readfile(path)
if gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
if gdatfinl.rtagmock is not None:
path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
else:
if booltile:
gdatfinltile = tdpy.gdatstrt()
indxrtaggood = []
liststrgtile = []
listrtaggood = []
indxtiletemp = 0
for n, rtagmodi in enumerate(listrtagmodi):
# read gdatinit
boolgdatinit = chec_statfile(pathpcat, rtagmodi, 'gdatinit')
if not boolgdatinit:
if booltile:
print('Initial global object not found. Skipping...')
continue
else:
print('Initial global object not found. Quitting...')
return
pathoutprtag = retr_pathoutprtag(pathpcat, rtagmodi)
path = pathoutprtag + 'gdatinit'
gdatinit = readfile(path)
if booltile:
gdatfinltile = gdatinit
gdatfinl = gdatinit
else:
gdatfinl = gdatinit
pathoutprtagmodi = retr_pathoutprtag(pathpcat, rtagmodi)
listgdatmodi = []
for k in gdatinit.indxproc:
path = pathoutprtagmodi + 'gdatmodi%04d' % k + strgpdfn
listgdatmodi.append(readfile(path))
# erase
gdatdictcopy = deepcopy(gdatinit.__dict__)
for strg, valu in gdatdictcopy.items():
if strg.startswith('fitt.indxpara.'):
delattr(gdatinit, strg)
if gdatinit.boolmockonly:
print('Mock only run. Quitting final-processing...')
return
# read gdatmodi
print('rtagmodi')
print(rtagmodi)
boolgdatmodi = chec_statfile(pathpcat, rtagmodi, 'gdatmodipost')
if not boolgdatmodi:
print('Modified global object not found. Quitting final-processing...')
return
## list of other parameters to be flattened
gdatinit.liststrgvarbarryflat = deepcopy(listgdatmodi[0].liststrgvarbarry)
# temp
#for strg in ['memoresi']:
# gdatinit.liststrgvarbarryflat.remove(strg)
listparagenrscalfull = np.empty((gdatinit.numbsamptotl, gdatinit.fitt.maxmnumbpara))
if booltile:
gdatfinltile.pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
numbsamptotlrsmp = gdatinit.numbsamptotl
indxsamptotlrsmp = np.random.choice(gdatinit.indxsamptotl, size=gdatinit.numbsamptotl, replace=False)
# aggregate samples from the chains
if gdatinit.typeverb > 0:
print('Reading gdatmodi objects from all processes...')
timeinit = gdatinit.functime()
if gdatinit.typeverb > 0:
timefinl = gdatinit.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if gdatinit.fitt.numbparaelem > 0:
if len(getattr(listgdatmodi[0], 'list' + strgpdfn + 'gmodstat.indxelemfull')) == 0:
print('Found an empty element list. Skipping...')
continue
if gdatinit.typeverb > 0:
print('Accumulating np.arrays...')
timeinit = gdatinit.functime()
for strgvarb in gdatinit.liststrgvarbarryflat:
for k in gdatinit.indxproc:
if k == 0:
shap = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb).shape
shap = [shap[0], gdatinit.numbproc] + list(shap[1:])
temp = np.zeros(shap) - 1
if len(shap) > 2:
temp[:, k, :] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)
else:
temp[:, k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, temp)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if gdatfinl.typeverb > 0:
print('Accumulating lists...')
timeinit = gdatfinl.functime()
# lists of lists collected at each sample
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
listtemp = [[[] for k in gdatfinl.indxproc] for j in gdatfinl.indxsamp]
for j in gdatfinl.indxsamp:
for k in gdatfinl.indxproc:
listtemp[j][k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)[j]
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if not booltile:
## np.maximum likelihood sample
gdatfinl.maxmllikproc = np.empty(gdatfinl.numbproc)
gdatfinl.indxswepmaxmllikproc = np.empty(gdatfinl.numbproc, dtype=int)
gdatfinl.sampmaxmllikproc = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))
for k in gdatfinl.indxproc:
gdatfinl.maxmllikproc[k] = listgdatmodi[k].maxmllikswep
gdatfinl.indxswepmaxmllikproc[k] = listgdatmodi[k].indxswepmaxmllik
gdatfinl.sampmaxmllikproc[k, :] = listgdatmodi[k].sampmaxmllik
listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')
# Gelman-Rubin test
if gdatfinl.numbproc > 1:
if gdatfinl.typeverb > 0:
print('Computing the Gelman-Rubin TS...')
timeinit = gdatfinl.functime()
gdatfinl.gmrbparagenrscalbase = np.zeros(gdatfinl.fitt.numbparagenrbase)
gdatfinl.gmrbstat = np.zeros((gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))
for k in gdatfinl.fitt.indxparagenrbase:
gdatfinl.gmrbparagenrscalbase[k] = tdpy.mcmc.gmrb_test(listparagenrscalfull[:, :, k])
if not np.isfinite(gdatfinl.gmrbparagenrscalbase[k]):
gdatfinl.gmrbparagenrscalbase[k] = 0.
listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')
for i in gdatfinl.indxener:
for j in gdatfinl.indxpixl:
for m in gdatfinl.indxevtt:
gdatfinl.gmrbstat[i, j, m] = tdpy.mcmc.gmrb_test(listcntpmodl[:, :, i, j, m])
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# calculate the autocorrelation of the chains
if gdatfinl.typeverb > 0:
print('Computing the autocorrelation of the chains...')
timeinit = gdatfinl.functime()
gdatfinl.atcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt, int(gdatfinl.numbparagenrfull / 2)))
gdatfinl.timeatcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))
gdatfinl.atcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara, int(gdatfinl.numbparagenrfull / 2)))
gdatfinl.timeatcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))
for k in gdatfinl.indxproc:
gdatfinl.atcrpara[k, :, :], gdatfinl.timeatcrpara[k, :] = tdpy.mcmc.retr_timeatcr(listparagenrscalfull[:, k, :], typeverb=gdatfinl.typeverb)
listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')
gdatfinl.atcrcntp[k, :], gdatfinl.timeatcrcntp[k, :] = tdpy.mcmc.retr_timeatcr(listcntpmodl[:, k, :, :, :], typeverb=gdatfinl.typeverb)
timeatcrcntpmaxm = np.amax(gdatfinl.timeatcrcntp)
gdatfinl.timeatcrcntpmaxm = np.amax(timeatcrcntpmaxm)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
setattr(gdatfinl, 'list' + strgpdfn + 'sampproc', np.copy(getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')))
# flatten the list chains from different walkers
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
listtemp = []
listinpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for j in gdatfinl.indxsamp:
for k in gdatfinl.indxproc:
listtemp.append(listinpt[j][k])
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)
# flatten the np.array chains from different walkers
for strgvarb in gdatinit.liststrgvarbarryflat:
inpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
shap = [inpt.shape[0] * inpt.shape[1]] + list(inpt.shape[2:])
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, inpt.reshape(shap))
listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')
if booltile:
liststrgtile.append(rtagmodi.split('_')[-2][-4:])
listrtaggood.append(rtagmodi)
indxrtaggood.append(n)
indxtiletemp += 1
if len(liststrgtile) == 1:
for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
refrfeattile = [[] for q in gdatfinl.indxrefr]
setattr(gdatfinl, 'refr' + strgfeat, refrfeattile)
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
listvarb = []
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarb)
else:
hist = np.zeros_like(getattr(listgdatmodi[0], 'list' + strgpdfn + strgvarb))
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, hist)
for name, valu in gdatfinl.__dict__.items():
if name.startswith('refrhist'):
setattr(gdatfinl, name, np.zeros_like(getattr(gdatfinl, name)))
#for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
# refrfeattile = getattr(gdatfinl, 'refr' + strgfeat)
# #refrfeat = getattr(gdatfinl, 'refr' + strgfeat)
# refrfeat = [[] for q in gdatfinl.indxrefr]
# for q in gdatfinl.indxrefr:
# if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:
# refrfeat[q].append(refrfeattile[q])
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
# temp
if 'spec' in strgvarb:
continue
hist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
hist += getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for name, valu in gdatfinl.__dict__.items():
if name.startswith('refrhist'):
hist = getattr(gdatfinl, name)
hist += getattr(gdatfinl, name)
print('Done with the tile number %d, run number %d...' % (indxtiletemp, n))
if booltile:
gdatfinl.pathplotrtag = gdatfinl.pathimag + rtagfinl + '/'
make_fold(gdatfinl)
indxrtaggood = np.array(indxrtaggood).astype(int)
numbrtaggood = indxrtaggood.size
numbtile = numbrtaggood
print('Found %d tiles with run tags:' % numbrtaggood)
for indxrtaggoodtemp in indxrtaggood:
print(rtag[indxrtaggoodtemp])
# np.concatenate reference elements from different tiles
#for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
# refrfeat = getattr(gdatfinl, 'refr' + strgfeat, refrfeat)
# for q in gdatfinl.indxrefr:
# if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:
# refrfeat[q] = np.concatenate(refrfeat[q], axis=1)
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
listvarb = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
if 'assc' in strgvarb:
numbrefrelemtotl = 0
for k, varbrsmp in enumerate(listvarb):
numbrefrelemtotl += varbrsmp.shape[1]
shap = [gdatfinl.numbsamptotl, numbrefrelemtotl]
listvarbtemp = np.empty(shap)
cntr = 0
for k, varb in enumerate(listvarb):
listvarbtemp[:, cntr:cntr+varb.shape[1]] = varb
cntr += varb.shape[1]
else:
shap = [gdatfinl.numbsamptotl * numbtile] + list(listvarb[0].shape[1:])
listvarbtemp = np.empty(shap)
for k, varb in enumerate(listvarb):
listvarbtemp[k*gdatfinl.numbsamptotl:(k+1)*gdatfinl.numbsamptotl, ...] = varb
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarbtemp)
else:
# np.maximum likelihood sample
if gdatfinl.fitt.numbparaelem > 0:
listindxelemfull = getattr(gdatfinl, 'list' + strgpdfn + 'indxelemfull')
listllik = getattr(gdatfinl, 'list' + strgpdfn + 'llik')
listlliktotl = getattr(gdatfinl, 'list' + strgpdfn + 'lliktotl')
indxsamptotlmlik = np.argmax(np.sum(np.sum(np.sum(listllik, 3), 2), 1))
# copy the np.maximum likelihood sample
for strgvarb in listgdatmodi[0].liststrgvarbarrysamp:
setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik, ...])
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik])
# temp -- dont gdatfinl.listllik and gdatfinl.listparagenrscalfull have the same dimensions?
gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]
gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]
#if gdatfinl.fitt.numbparaelem > 0:
# gdatfinl.mlikindxelemfull = listindxelemfull[indxsamptotlmlik]
gdatfinl.mlikparagenrscalbase = gdatfinl.mlikparagenrscalfull[gdatfinl.fitt.indxparagenrbase]
for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):
setattr(gdatfinl, 'mlik' + gmod.nameparagenrbase, gdatfinl.mlikparagenrscalbase[k])
# add execution times to the chain output
gdatfinl.timereal = np.zeros(gdatfinl.numbproc)
gdatfinl.timeproc = np.zeros(gdatfinl.numbproc)
for k in gdatfinl.indxproc:
gdatfinl.timereal[k] = listgdatmodi[k].timereal
gdatfinl.timeproc[k] = listgdatmodi[k].timeproc
# find the np.maximum likelihood and posterior over the chains
gdatfinl.indxprocmaxmllik = np.argmax(gdatfinl.maxmllikproc)
#gdatfinl.maxmlliktotl = gdatfinl.maxmllikproc[gdatfinl.indxprocmaxmllik]
gdatfinl.indxswepmaxmllik = gdatfinl.indxprocmaxmllik * gdatfinl.numbparagenrfull + gdatfinl.indxswepmaxmllikproc[gdatfinl.indxprocmaxmllik]
gdatfinl.sampmaxmllik = gdatfinl.sampmaxmllikproc[gdatfinl.indxprocmaxmllik, :]
if strgpdfn == 'post':
levipost = retr_levipost(listlliktotl)
setattr(gdatfinl, strgpdfn + 'levipost', levipost)
if strgpdfn == 'prio':
leviprio = np.log(np.mean(np.exp(listlliktotl)))
setattr(gdatfinl, strgpdfn + 'leviprio', leviprio)
# parse the sample vector
listparagenrscalbase = listparagenrscalfull[:, gdatfinl.fitt.indxparagenrbase]
for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):
setattr(gdatfinl, 'list' + strgpdfn + gmod.nameparagenrbase, listparagenrscalbase[:, k])
setattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalbase', listparagenrscalbase)
if strgpdfn == 'post' and gdatfinl.checprio:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdatfinlprio'
try:
gdatprio = readfile(path)
except:
proc_finl(gdat=gdatfinl, strgpdfn='prio', listnamevarbproc=listnamevarbproc, forcplot=forcplot)
else:
gdatprio = None
# post process samples
## bin element parameters
if gdatfinl.typeverb > 0:
print('Binning the probabilistic catalog spatially...')
timeinit = gdatfinl.functime()
if not booltile:
if gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.boolbinsspat:
histlgalbgalelemstkd = [[] for l in gdatfinl.fittindxpopl]
listlgal = getattr(gdatfinl, 'list' + strgpdfn + 'lgal')
listbgal = getattr(gdatfinl, 'list' + strgpdfn + 'bgal')
for l in gdatfinl.fittindxpopl:
if gdatfinl.fitttypeelem[l] != 'lghtline':
histlgalbgalelemstkd[l] = np.zeros((gdatfinl.numbbgalpntsprob, gdatfinl.numblgalpntsprob, gdatfinl.numbbinsplot, numb))
temparry = np.concatenate([listlgal[n][l] for n in gdatfinl.indxsamptotl])
temp = np.empty((len(temparry), 3))
temp[:, 0] = temparry
temp[:, 1] = np.concatenate([listbgal[n][l] for n in gdatfinl.indxsamptotl])
temp[:, 2] = np.concatenate([getattr(gdatfinl, 'list' + strgpdfn + strgfeat)[n][l] for n in gdatfinl.indxsamptotl])
bins = getattr(gdatfinl, 'bins' + strgfeat)
histlgalbgalelemstkd[l][:, :, :, k] = np.histogramdd(temp, \
bins=(gdatfinl.binslgalpntsprob, gdatfinl.binsbgalpntsprob, bins))[0]
setattr(gdatfinl, strgpdfn + 'histlgalbgalelemstkd', histlgalbgalelemstkd)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
## construct a condensed catalog of elements
if gdatfinl.boolcondcatl and gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.typeverb > 0:
print('Constructing a condensed catalog...')
timeinit = gdatfinl.functime()
retr_condcatl(gdatfinl)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# construct lists of samples for each proposal type
listindxproptype = getattr(gdatfinl, 'list' + strgpdfn + 'indxproptype')
listboolpropaccp = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropaccp')
listboolpropfilt = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropfilt')
listindxsamptotlproptotl = []
listindxsamptotlpropfilt = []
listindxsamptotlpropaccp = []
listindxsamptotlpropreje = []
for n in gdatfinl.indxproptype:
indxsampproptype = np.where(listindxproptype == gdatfinl.indxproptype[n])[0]
listindxsamptotlproptotl.append(indxsampproptype)
listindxsamptotlpropaccp.append(np.intersect1d(indxsampproptype, np.where(listboolpropaccp)[0]))
listindxsamptotlpropfilt.append(np.intersect1d(indxsampproptype, np.where(listboolpropfilt)[0]))
listindxsamptotlpropreje.append(np.intersect1d(indxsampproptype, np.where(np.logical_not(listboolpropaccp))[0]))
if listindxsamptotlproptotl[n].size == 0:
accp = 0.
else:
accp = float(listindxsamptotlpropaccp[n].size) / listindxsamptotlproptotl[n].size
setattr(gdatfinl, 'accp' + gdatfinl.nameproptype[n], accp)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlproptotl', listindxsamptotlproptotl)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropaccp', listindxsamptotlpropaccp)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropreje', listindxsamptotlpropreje)
if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
if gdatfinl.rtagmock is not None:
path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
# posterior corrections
if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':
## perform corrections
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
for gmod.namepara.genrelemvarbhist in gdatfinl.liststrgvarbhist:
strgvarb = gmod.namepara.genrelemvarbhist[0]
if gmod.namepara.genrelemvarbhist[1].startswith('aerr') or len(gmod.namepara.genrelemvarbhist[2]) > 0 and gmod.namepara.genrelemvarbhist[2].startswith('aerr'):
continue
if gmod.namepara.genrelemvarbhist[1] == 'spec' or gmod.namepara.genrelemvarbhist[1] == 'deflprof' or gmod.namepara.genrelemvarbhist[1] == 'specplot':
continue
if len(gmod.namepara.genrelemvarbhist[2]) > 0 and (gmod.namepara.genrelemvarbhist[2] == 'spec' or \
gmod.namepara.genrelemvarbhist[2] == 'deflprof' or gmod.namepara.genrelemvarbhist[2] == 'specplot'):
continue
## internal correction
listhist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for qq in gdatmock.indxrefr:
l = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[1][0])
qq = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[2][0])
if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and \
(len(gmod.namepara.genrelemvarbhist[2]) == 0 or gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr):
listhistincr = listhist
else:
if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and len(gmod.namepara.genrelemvarbhist[2]) > 0:
listcmpltrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (l, qq))], 2)
listfdistrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (qq, l))], 2)
elif len(gmod.namepara.genrelemvarbhist[2][:-4]) > 0 and gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr:
listcmpltrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (l, qq))], 1)
listfdistrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (qq, l))], 1)
else:
listcmpltrue = getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[3][qq])
listfdistrue = getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[3][qq])
if len(gmod.namepara.genrelemvarbhist[2]) == 0:
listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
for k in gdatfinl.indxbinsplot:
listcmplboot[:, k] = np.random.choice(listcmpltrue[:, k], size=gdatfinl.numbsampboot)
listfdisboot[:, k] = np.random.choice(listfdistrue[:, k], size=gdatfinl.numbsampboot)
listhistboot[:, k] = np.random.choice(listhist[:, k], size=gdatfinl.numbsampboot)
else:
listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
for a in gdatfinl.indxbinsplot:
for b in gdatfinl.indxbinsplot:
listcmplboot[:, a, b] = np.random.choice(listcmpltrue[:, a, b], size=gdatfinl.numbsampboot)
listfdisboot[:, a, b] = np.random.choice(listfdistrue[:, a, b], size=gdatfinl.numbsampboot)
listhistboot[:, a, b] = np.random.choice(listhist[:, a, b], size=gdatfinl.numbsampboot)
indxbadd = np.where(listcmplboot == -1)
indxbaddzero = np.where(listcmplboot == 0.)
listhistincr = listhistboot / listcmplboot * (1. - listfdisboot)
listhistincr[indxbadd] = -1.5
listhistincr[indxbaddzero] = 1.5
listgdatmodi[0].liststrgchan += ['incr' + gmod.namepara.genrelemvarbhist[4][qq]]
setattr(gdatfinl, 'listpostincr' + gmod.namepara.genrelemvarbhist[4][qq], listhistincr)
## external correction
for q in gdatfinl.indxrefr:
nametemp = gmod.namepara.genrelemvarbhist[1]
if len(gmod.namepara.genrelemvarbhist[2]) > 0:
nametemp += gmod.namepara.genrelemvarbhist[2]
nametemp += 'pop%dpop%dpop%d' % (q, qq, l)
crexhist = getattr(gdatfinl, 'crex' + nametemp)
if crexhist is not None:
listhistexcr = listhistincr * crexhist
if crexhist.ndim == 1 and listhistincr.ndim == 3:
raise Exception('')
listgdatmodi[0].liststrgchan += ['excr' + nametemp]
setattr(gdatfinl, 'listpostexcr' + nametemp, listhistexcr)
# compute credible intervals
if gdatfinl.typeverb > 0:
print('Computing credible intervals...')
timeinit = gdatfinl.functime()
for strgchan in listgdatmodi[0].liststrgchan:
if booltile:
if strgchan in gdatfinl.liststrgvarbarryswep or strgchan in listgdatmodi[0].liststrgvarblistsamp:
continue
if not (strgchan.startswith('hist') or strgchan.startswith('incr') or strgchan.startswith('excr')):
continue
if gdatfinl.fitt.numbparaelem > 0 and strgchan in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
if 'spec' in strgchan:
continue
if strgchan == 'spec':
continue
listtemp = getattr(gdatfinl, 'list' + strgpdfn + strgchan)
if isinstance(listtemp, list):
if booltile:
continue
# ensure that transdimensional lists are not included
# temp
if strgchan in gdatfinl.fitt.namepara.genrelemtotl or strgchan == 'indxelemfull':
continue
pctltemp = []
pmeatemp = []
meditemp = []
errrtemp = []
stdvtemp = []
numb = len(listtemp[0])
for k in range(numb):
if isinstance(listtemp[0][k], list):
continue
shap = [gdatfinl.numbsamptotl] + list(listtemp[0][k].shape)
temp = np.zeros(shap)
for n in gdatfinl.indxsamptotl:
temp[n, ...] = listtemp[n][k]
pctltempsing = tdpy.retr_pctlvarb(temp)
pmeatempsing = np.mean(temp, axis=0)
meditempsing = pctltempsing[0, ...]
errrtempsing = tdpy.retr_errrvarb(pctltempsing)
stdvtempsing = np.std(temp)
pctltemp.append(pctltempsing)
pmeatemp.append(pmeatempsing)
meditemp.append(meditempsing)
errrtemp.append(errrtempsing)
stdvtemp.append(stdvtempsing)
else:
# this is needed for finding posterior moments of features of associated reference elements
if 'asscref' in strgchan:
if listtemp.ndim != 2:
raise Exception('')
pmeatemp = np.zeros(listtemp.shape[1])
pctltemp = np.zeros([3] + [listtemp.shape[1]])
# temp -- this only works for 2D listtemp
for k in range(listtemp.shape[1]):
indxassc = np.where(np.isfinite(listtemp[:, k]))[0]
if indxassc.size > 0:
pctltemp[:, k] = tdpy.retr_pctlvarb(listtemp[indxassc, k])
pmeatemp[k] = np.mean(listtemp[indxassc, k])
else:
pctltemp = tdpy.retr_pctlvarb(listtemp)
pmeatemp = np.mean(listtemp, axis=0)
errrtemp = tdpy.retr_errrvarb(pctltemp)
stdvtemp = np.std(pctltemp, axis=0)
meditemp = pctltemp[0, ...]
if strgchan in gdatfinl.listnamevarbcpct:
cpcttemp = np.empty([gdatfinl.numbsampcpct] + [3] + list(listtemp.shape[1:]))
for n in gdatfinl.indxsampcpct:
cpcttemp[n, ...] = tdpy.retr_pctlvarb(listtemp[:n+1, ...])
setattr(gdatfinl, 'pctl' + strgpdfn + strgchan, pctltemp)
setattr(gdatfinl, 'medi' + strgpdfn + strgchan, meditemp)
setattr(gdatfinl, 'pmea' + strgpdfn + strgchan, pmeatemp)
setattr(gdatfinl, 'errr' + strgpdfn + strgchan, errrtemp)
setattr(gdatfinl, 'stdv' + strgpdfn + strgchan, stdvtemp)
if strgchan in gdatfinl.listnamevarbcpct:
setattr(gdatfinl, 'cpct' + strgpdfn + strgchan, cpcttemp)
if not booltile:
pmealliktotl = getattr(gdatfinl, 'pmea' + strgpdfn + 'lliktotl')
stdvlliktotl = getattr(gdatfinl, 'stdv' + strgpdfn + 'lliktotl')
minmlliktotl = np.amin(listlliktotl)
maxmlliktotl = np.amax(listlliktotl)
skewlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**3)
kurtlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**4)
setattr(gdatfinl, 'minm' + strgpdfn + 'lliktotl', minmlliktotl)
setattr(gdatfinl, 'maxm' + strgpdfn + 'lliktotl', maxmlliktotl)
setattr(gdatfinl, 'skew' + strgpdfn + 'lliktotl', skewlliktotl)
setattr(gdatfinl, 'kurt' + strgpdfn + 'lliktotl', kurtlliktotl)
if strgpdfn == 'post':
infopost = retr_infofromlevi(pmealliktotl, levipost)
setattr(gdatfinl, strgpdfn + 'infopost', infopost)
if strgpdfn == 'post' and gdatfinl.checprio:
leviprio = getattr(gdatprio, 'prioleviprio')
infoprio = retr_infofromlevi(pmealliktotl, leviprio)
setattr(gdatfinl, strgpdfn + 'infoprio', infoprio)
bcom = maxmlliktotl - pmealliktotl
setattr(gdatfinl, strgpdfn + 'bcom', bcom)
listnametemp = ['lliktotl']
if gmod.numbparaelem > 0:
listnametemp += ['lpripena']
for namevarbscal in listnametemp:
listtemp = getattr(gdatfinl, 'list' + strgpdfn + namevarbscal)
minm = np.amin(listtemp)
maxm = np.amax(listtemp)
setattr(gdatfinl, 'minm' + namevarbscal, minm)
setattr(gdatfinl, 'maxm' + namevarbscal, maxm)
setattr(gdatfinl, 'scal' + namevarbscal, 'self')
retr_axis(gdat, namevarbscal)
if gdatfinl.checprio:
for strgvarb in gdatfinl.listnamevarbscal:
setp_pdfnvarb(gdatfinl, strgpdfn, strgvarb, strgvarb)
for l0 in gdatfinl.fittindxpopl:
for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)
for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)
# calculate information gain
if strgpdfn == 'post':
for namevarbscal in gdatfinl.listnamevarbscal:
setp_info(gdatfinl, gdatprio, namevarbscal, namevarbscal)
for l0 in gdatfinl.fittindxpopl:
for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)
for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# flatten the np.arrays which have been collected at each sweep
#setattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'flat', getattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'totl').flatten())
if not booltile:
# memory usage
listmemoresi = getattr(gdatfinl, 'list' + strgpdfn + 'memoresi')
gdatfinl.meanmemoresi = np.mean(listmemoresi, 1)
gdatfinl.derimemoresi = (gdatfinl.meanmemoresi[-1] - gdatfinl.meanmemoresi[0]) / gdatfinl.numbswep
gdatfinl.timerealtotl = time.time() - gdatfinl.timerealtotl
gdatfinl.timeproctotl = time.clock() - gdatfinl.timeproctotl
gdatfinl.timeproctotlswep = gdatfinl.timeproctotl / gdatfinl.numbswep
if gdatfinl.timeatcrcntpmaxm == 0.:
gdatfinl.timeprocnorm = 0.
else:
gdatfinl.timeprocnorm = gdatfinl.timeproctotlswep / gdatfinl.timeatcrcntpmaxm
# write the final gdat object
path = gdatfinl.pathoutprtag + 'gdatfinl' + strgpdfn
if gdatfinl.typeverb > 0:
print('Writing gdatfinl to %s...' % path)
writfile(gdatfinl, path)
filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')
filestat.write('gdatfinl%s written.\n' % strgpdfn)
filestat.close()
if not booltile:
if gdatfinl.typeverb > 0:
for k in gdatfinl.indxproc:
print('Process %d has been completed in %d real seconds, %d CPU seconds.' % (k, gdatfinl.timereal[k], gdatfinl.timeproc[k]))
print('Parent process has run in %d real seconds, %d CPU seconds.' % (gdatfinl.timerealtotl, gdatfinl.timeproctotl))
print('HACKING!!')
gdatfinl.strgpdfn = 'post'
print('Checking whether post-processing plots already exist.')
booltemp = chec_statfile(pathpcat, rtagfinl, 'plotfinl')
if booltemp:
print('Final plots already exist. Skipping...')
else:
if strgpdfn == 'post' and gdatfinl.checprio:
path = pathoutprtag + 'gdatfinlprio'
gdatprio = readfile(path)
else:
gdatprio = None
if gdatfinl.makeplot and getattr(gdatfinl, 'makeplotfinl' + strgpdfn) or forcplot:
plot_finl(gdatfinl, gdatprio=gdatprio, strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)
filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')
filestat.write('plotfinl%s written.\n' % strgpdfn)
filestat.close()
def retr_listgdat(listrtag, typegdat='finlpost'):
listgdat = []
for rtag in listrtag:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdat%s' % typegdat
listgdat.append(readfile(path))
return listgdat
def make_fold(gdat):
for strgpdfn in gdat.liststrgpdfn:
setattr(gdat, 'path' + strgpdfn, gdat.pathplotrtag + strgpdfn + '/')
path = getattr(gdat, 'path' + strgpdfn)
for nameseco in ['finl', 'fram', 'anim', 'opti']:
setattr(gdat, 'path' + strgpdfn + nameseco, path + nameseco + '/')
for nameseco in ['diag', 'lpac', 'varbscal', 'cond', 'varbscalproc']:
setattr(gdat, 'path' + strgpdfn + 'finl' + nameseco, path + 'finl/' + nameseco + '/')
for n in gdat.indxproptype:
setattr(gdat, 'path' + strgpdfn + 'finl' + gdat.nameproptype[n], path + 'finl/lpac/' + gdat.nameproptype[n] + '/')
for namethrd in ['hist', 'trac', 'join', 'cova']:
setattr(gdat, 'path' + strgpdfn + 'finlvarbscal' + namethrd, path + 'finl/varbscal/' + namethrd + '/')
for strgphas in gdat.liststrgphas + ['init']:
liststrgfold = getattr(gdat, 'liststrgfold' + strgphas)
for nameseco in liststrgfold:
if strgphas == 'init':
if nameseco == 'assc' or nameseco.startswith('cmpl') or nameseco.startswith('fdis'):
continue
setattr(gdat, 'path' + strgphas + nameseco[:-1], gdat.pathplotrtag + 'init/' + nameseco)
else:
setattr(gdat, 'path' + strgpdfn + strgphas + nameseco[:-1], path + strgphas + '/' + nameseco)
gdat.pathinfo = gdat.pathplotrtag + 'info/'
## make the directories
for attr, valu in gdat.__dict__.items():
if attr.startswith('path'):
os.system('mkdir -p %s' % valu)
def make_cmapdivg(strgcolrloww, strgcolrhigh):
funccolr = mpl.colors.ColorConverter().to_rgb
colrloww = funccolr(strgcolrloww)
colrhigh = funccolr(strgcolrhigh)
cmap = make_cmap([colrloww, funccolr('white'), 0.5, funccolr('white'), colrhigh])
return cmap
def make_cmap(seq):
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mpl.colors.LinearSegmentedColormap('CustomMap', cdict)
def setp_pdfnvarb(gdat, strgpdfn, name, namefull, nameseco=None):
if listvarb.ndim == 1:
shaptemp = [gdat.numbbinspdfn, 1]
else:
shaptemp = [gdat.numbbinspdfn] + list(listvarb.shape[1:])
pdfn = np.empty(shaptemp)
if listvarb.ndim == 1:
binsvarb = getattr(gdat.binspara, name)
deltvarb = getattr(gdat, 'delt' + name)
pdfn[:, 0] = np.histogram(listvarb, bins=binsvarb)[0].astype(float)
pdfn[:, 0] /= np.sum(pdfn[:, 0])
pdfn[:, 0] /= deltvarb
else:
binsvarb = np.linspace(0, gmod.maxmpara.numbelemtotl, 51)
if listvarb.ndim == 2:
for k in range(listvarb.shape[1]):
pdfn[:, k] = np.histogram(listvarb[:, k], bins=binsvarb)[0].astype(float)
pdfn[:, k] /= np.sum(pdfn[:, k])
pdfn *= 50.
if listvarb.ndim == 3:
for k in range(listvarb.shape[1]):
for m in range(listvarb.shape[2]):
pdfn[:, k, m] = np.histogram(listvarb[:, k, m], bins=binsvarb)[0].astype(float)
pdfn[:, k, m] /= np.sum(pdfn[:, k, m])
pdfn *= 2500.
pdfn[np.where(pdfn < 1e-50)[0]] = 1e-50
setattr(gdat, 'pdfn' + strgpdfn + namefull, pdfn)
def setp_info(gdat, gdatprio, name, namefull, nameseco=None, namesecofull=None):
listpost = getattr(gdat, 'listpost' + namefull)
listprio = getattr(gdatprio, 'listprio' + namefull)
pdfnpost = getattr(gdat, 'pdfnpost' + namefull)
pdfnprio = getattr(gdatprio, 'pdfnprio' + namefull)
if listpost.ndim == 3:
infodens = np.empty((gdat.numbbinspdfn, listpost.shape[1], listpost.shape[2]))
info = np.empty((listpost.shape[1], listpost.shape[2]))
pvks = np.empty((listpost.shape[1], listpost.shape[2]))
else:
if listpost.ndim == 1:
numbtemp = 1
else:
numbtemp = listpost.shape[1]
infodens = np.empty((gdat.numbbinspdfn, numbtemp))
info = np.empty(numbtemp)
pvks = np.empty(numbtemp)
if listpost.ndim == 1:
listpost = listpost[:, None]
listprio = listprio[:, None]
deltvarb = getattr(gdat, 'delt' + name)
else:
if listpost.ndim == 2:
deltvarb = 1. / 50
else:
deltvarb = 1. / 50**list2
if listpost.ndim == 1 or listpost.ndim == 2:
for k in range(listpost.shape[1]):
infodens[:, k] = retr_infodens(pdfnpost[:, k], pdfnprio[:, k])
info[k] = np.sum(infodens[:, k] * deltvarb)
temp, pvks[k] = sp.stats.ks_2samp(listpost[:, k], listprio[:, k])
if listpost.ndim == 3:
for k in range(listpost.shape[1]):
for m in range(listpost.shape[2]):
infodens[:, k, m] = retr_infodens(pdfnpost[:, k, m], pdfnprio[:, k, m])
info[k, m] = np.sum(infodens[:, k, m] * deltvarb)
temp, pvks[k, m] = sp.stats.ks_2samp(listpost[:, k, m], listprio[:, k, m])
setattr(gdat, 'pvks' + namefull, pvks)
setattr(gdat, 'infodens' + namefull, infodens)
setattr(gdat, 'info' + namefull, info)
# check the state file
def chec_statfile(pathpcat, rtag, strggdat, typeverb=1):
print('Checking the state file %s for %s...' % (strggdat, rtag))
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
# check the status file
if not os.path.isfile(pathoutprtag + 'stat.txt'):
if typeverb > 0:
print('pathoutprtag')
print(pathoutprtag)
print('stat.txt not found.')
return False
# check the global object
filestat = open(pathoutprtag + 'stat.txt', 'r')
booltemp = False
linesrch = strggdat + ' written.\n'
for line in filestat:
if line == linesrch:
booltemp = True
filestat.close()
if not booltemp:
if typeverb > 0:
print('bad %s status.' % (strggdat))
return False
else:
return True
def retr_los3(dlos, lgal, bgal):
dglc = np.sqrt(8.5e3**2 + dlos**2 - 2. * dlos * 8.5e3 * np.cos(bgal) * np.cos(lgal))
thet = np.arccos(np.sin(bgal) * dlos / dglc)
phii = np.arcsin(np.sqrt(np.cos(bgal)**2 * dlos**2 + 8.5e3**2 - 2 * dlos * np.cos(bgal) * 8.5e3) / dglc)
return dglc, thet, phii
def retr_glc3(dglc, thet, phii):
xpos = dglc * np.sin(thet) * np.cos(phii)
ypos = dglc * np.sin(thet) * np.sin(phii)
zpos = dglc * np.cos(thet)
dlos = np.sqrt(zpos**2 + xpos**2 + (8.5e3 - ypos)**2)
lgal = np.arctan2(8.5e3 - ypos, xpos) - np.pi / 2
bgal = np.arcsin(zpos / dlos)
return dlos, lgal, bgal
def retr_lumipuls(geff, magf, per0):
# temp -- this is bolometric luminosity np.whereas dictelem[l]['flux'] is differential!
lumi = 9.6e33 * (geff / 0.2) * (magf / 10**8.5)**2 * (3e-3 / per0)*4
return lumi
def retr_lumi(gdat, flux, dlos, reds=None):
lumi = flux * 4. * np.pi * dlos**2 * gdat.prsccmtr**2 / gdat.ergsgevv
# temp
# redshift correction
if reds is not None:
lumi *= (1. + reds)**2
return lumi
def retr_flux(gdat, lumi, dlos, reds=None):
flux = lumi / 4. / np.pi / dlos**2 / gdat.prsccmtr**2 * gdat.ergsgevv
# temp
# redshift correction
if reds is not None:
pass
return flux
def retr_per1(per0, magf):
per1 = 3.3e-20 * (magf / 10**8.5)**2 * (3e-3 / per0)
return per1
def retr_dlosgalx(lgal, bgal, dglc):
# temp -- this is obviously wrong
dlos = 8.5e3 - dglc
return dlos
def retr_arryfromlist(listtemp):
shap = [len(listtemp)] + list(listtemp[0].shape)
arry = np.empty(shap)
for k in range(len(listtemp)):
arry[k, ...] = listtemp[k]
return arry
def proc_cntpdata(gdat):
# exclude voxels with vanishing exposure
## data counts
if gdat.typedata == 'inpt':
gdat.cntpdata = retr_cntp(gdat, gdat.sbrtdata)
# data variance
gdat.varidata = np.maximum(gdat.cntpdata, 1.)
# correct the likelihoods for the constant data dependent factorial
gdat.llikoffs = -sp.special.gammaln(gdat.cntpdata + 1)
## spatial average
gdat.sbrtdatamean, gdat.sbrtdatastdv = retr_spatmean(gdat, gdat.cntpdata, boolcntp=True)
# data count limits
minmcntpdata = np.amin(gdat.cntpdata)
maxmcntpdata = np.amax(gdat.cntpdata)
minm = minmcntpdata
maxm = maxmcntpdata
setp_varb(gdat, 'cntpdata', minm=minm, maxm=maxm, lablroot='$C_{D}$', scal='asnh', strgmodl='plot')
maxm = maxmcntpdata
minm = 1e-1 * minmcntpdata
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
setp_varb(gdat, 'cntpmodl', minm=minm, maxm=maxm, strgmodl=strgmodl, scal='asnh')
print('gdat.labltickmajrpara.cntpmodl')
print(gdat.labltickmajrpara.cntpmodl)
# residual limits
maxm = np.ceil(maxmcntpdata * 0.1)
minm = -np.ceil(maxmcntpdata * 0.1)
setp_varb(gdat, 'cntpresi', minm=minm, maxm=maxm, lablroot='$C_{R}$', scal='asnh', strgmodl='plot')
# 1-point function of the data counts
for m in gdat.indxevtt:
if gdat.numbpixl > 1:
for i in gdat.indxener:
print('gdat.cntpdata[i, :, m]')
summgene(gdat.cntpdata[i, :, m])
print('gdat.binspara.cntpdata')
summgene(gdat.binspara.cntpdata)
histcntp = np.histogram(gdat.cntpdata[i, :, m], bins=gdat.binspara.cntpdata)[0]
setattr(gdat, 'histcntpdataen%02devt%d' % (i, m), histcntp)
else:
histcntp = np.histogram(gdat.cntpdata[:, 0, m], bins=gdat.binspara.cntpdata)[0]
setattr(gdat, 'histcntpdataevt%d' % m, histcntp)
# obtain cartesian versions of the maps
if gdat.typepixl == 'cart':
## data counts
gdat.cntpdatacart = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))
gdat.cntpdatacart[:, gdat.indxpixlrofi, :] = gdat.cntpdata
gdat.cntpdatacart = gdat.cntpdatacart.reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt))
def retr_infodens(pdfnpost, pdfnprio):
infodens = pdfnpost * np.log(pdfnpost / pdfnprio)
return infodens
def retr_llik(gdat, strgmodl, cntpmodl):
if gdat.liketype == 'pois':
llik = gdat.cntpdata * np.log(cntpmodl) - cntpmodl
if gdat.liketype == 'gaus':
llik = -0.5 * (gdat.cntpdata - cntpmodl)**2 / gdat.varidata
return llik
def retr_mapsgaus(gdat, lgal, bgal, spec, size, ellp, angl):
rttrmatr = np.array([[np.cos(angl), -np.sin(angl)], [np.sin(angl), np.cos(angl)]])
icovmatr = np.array([[1. / ((1. - ellp) * size)**2, 0.], [0., 1. / size**2]])
posi = np.array([lgalgrid - lgal, bgalgrid - bgal])
mapsgaus = flux * np.exp(-0.5 * np.sum(posi * tensordot(self.icovmatr, posi, (1,0)), 0)) / size**2 / (1. - ellp)
return mapsgaus
def retr_sbrtsers(gdat, lgalgrid, bgalgrid, lgal, bgal, spec, size, ellp, angl, seri=np.array([4.])):
lgalrttr = (1. - ellp) * (np.cos(angl) * (lgalgrid - lgal) - np.sin(angl) * (bgalgrid - bgal))
bgalrttr = np.sin(angl) * (lgalgrid - lgal) + np.cos(angl) * (bgalgrid - bgal)
angl = np.sqrt(lgalrttr**2 + bgalrttr**2)
# interpolate pixel-convolved Sersic surface brightness
if gdat.typesers == 'intp':
shapinpt = angl.shape
inpt = np.empty(list(shapinpt) + [3])
inpt[..., 0] = angl
inpt[..., 1] = size
inpt[..., 2] = seri
sbrtsers = spec[:, None, None] * sp.interpolate.interpn((gdat.binspara.lgalsers, gdat.binspara.halfsers, gdat.binspara.indxsers), gdat.sersprof, inpt)[None, :, None]
# evaluate directly de Vaucouleurs
if gdat.typesers == 'vauc':
sbrtsers = spec[:, None, None] * retr_sbrtsersnorm(angl, size)[None, :, None]
return sbrtsers
def retr_sbrtsersnorm(angl, halfsers, indxsers=4.):
## this approximation works for 0.5 < indx < 10
factsers = 1.9992 * indxsers - 0.3271
## surface brightness profile at the half-light radius for a 1 erg cm^-2 s^-1 A^-1 source
if indxsers == 4.:
sbrthalf = 1. / 7.2 / np.pi / halfsers**2
else:
sbrthalf = 1. / 2. / np.pi / np.exp(factsers) * factsers**(2 * indxsers) / indxsers / sp.special.gamma(2. * indxsers) / halfsers**2
## surface brightness profile
sbrtsers = sbrthalf * np.exp(-factsers * ((angl / halfsers)**(1. / indxsers) - 1.))
return sbrtsers
def copytdgu(varb):
if isinstance(varb, np.ndarray):
return np.copy(varb)
else:
return deepcopy(varb)
def proc_anim(rtag):
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
print('Making animations of frame plots for %s...' % rtag)
path = pathoutprtag + 'gdatinit'
gdat = readfile(path)
for strgpdfn in gdat.liststrgpdfn:
for nameextn in gdat.liststrgfoldanim:
pathframextn = gdat.pathimag + rtag + '/' + strgpdfn + '/fram/' + nameextn
pathanimextn = gdat.pathimag + rtag + '/' + strgpdfn + '/anim/' + nameextn
try:
listfile = fnmatch.filter(os.listdir(pathframextn), '*_swep*.pdf')
except:
print('%s failed.' % pathframextn)
continue
listfiletemp = []
for thisfile in listfile:
listfiletemp.extend((thisfile.split('_')[0]).rsplit('/', 1))
listname = list(set(listfiletemp))
if len(listname) == 0:
continue
shuffle(listname)
for name in listname:
strgtemp = '%s*_swep*.pdf' % name
listfile = fnmatch.filter(os.listdir(pathframextn), strgtemp)
numbfile = len(listfile)
liststrgextn = []
for k in range(numbfile):
liststrgextn.append((listfile[k].split(name)[1]).split('_')[0])
liststrgextn = list(set(liststrgextn))
for k in range(len(liststrgextn)):
listfile = fnmatch.filter(os.listdir(pathframextn), name + liststrgextn[k] + '_swep*.pdf')
numbfile = len(listfile)
indxfilelowr = 0
if indxfilelowr < numbfile:
indxfileanim = np.arange(indxfilelowr, numbfile)
else:
continue
indxfileanim = np.random.choice(indxfileanim, replace=False, size=indxfileanim.size)
cmnd = 'convert -delay 20 -density 300 -quality 100 '
for n in range(indxfileanim.size):
cmnd += '%s%s ' % (pathframextn, listfile[indxfileanim[n]])
namegiff = '%s%s.gif' % (pathanimextn, name + liststrgextn[k])
cmnd += ' ' + namegiff
print('Processing %s' % namegiff)
if not os.path.exists(namegiff):
print('Run: %s, pdf: %s' % (rtag, strgpdfn))
print('Making %s animation...' % name)
os.system(cmnd)
else:
print('GIF already exists.')
pass
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
filestat = open(pathoutprtag + 'stat.txt', 'a')
filestat.write('animfinl written.\n')
filestat.close()
def plot_samp(gdat, gdatmodi, strgstat, strgmodl, strgphas, strgpdfn='post', gdatmock=None, booltile=False):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
if not booltile:
if strgstat != 'pdfn':
numbelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
if gdatmodi is not None:
strgswep = '_%09d' % gdatmodi.cntrswep
else:
strgswep = ''
if not booltile:
# data count maps
if gdat.numbpixl > 1:
for i in gdat.indxener:
for m in gdat.indxevtt:
if gdat.boolmakeframcent and (i != gdat.numbener / 2 or m != gdat.numbevtt / 2):
continue
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpdata', i, m)
## residual count maps
for i in gdat.indxener:
for m in gdat.indxevtt:
if gdat.boolmakeframcent and (i != gdat.numbener / 2 or m != gdat.numbevtt / 2):
continue
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpresi', i, m)
if gdat.numbpixl > 1:
if gmod.numbparaelem > 0:
if gmod.boolelemlens:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelem', booltdim=True)
# temp -- restrict other plots to indxmodlelemcomp
if gdat.boolbinsener:
for specconvunit in gdat.listspecconvunit:
if not gmod.boolbfun:
plot_sbrt(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, specconvunit)
if gmod.boolapplpsfn:
plot_psfn(gdat, gdatmodi, strgstat, strgmodl)
setp_indxswepsave(gdat)
if gmod.numbparaelem > 0:
# element parameter histograms
if not (strgmodl == 'true' and gdat.typedata == 'inpt'):
limtydat = gdat.limtydathistfeat
for l in gmod.indxpopl:
strgindxydat = 'pop%d' % l
for nameparaderielemodim in gmod.namepara.derielemodim[l]:
if not (nameparaderielemodim == 'flux' or nameparaderielemodim == 'mcut' or \
nameparaderielemodim == 'deltllik' or nameparaderielemodim == 'defs' or nameparaderielemodim == 'nobj'):
continue
if gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt':
continue
indxydat = [l, slice(None)]
name = nameparaderielemodim
namepopl = nameparaderielemodim + 'pop%d' % l
lablxdat = getattr(gmod.labltotlpara, namepopl)
scalxdat = getattr(gmod.scalpara, namepopl)
limtxdat = getattr(gmod.limtpara, namepopl)
meanxdat = getattr(gdat.meanpara, name)
if gdat.numbpixl > 1:
listydattype = ['totl', 'sden']
else:
listydattype = ['totl']
for ydattype in listydattype:
## plot the surface density of elements
if ydattype == 'sden':
# plot the surface density of elements only for the amplitude feature
if nameparaderielemodim != gmod.nameparagenrelemampl:
continue
if gdat.sdenunit == 'degr':
lablydat = r'$\Sigma_{%s}$ [deg$^{-2}$]' % gmod.lablelemextn[l]
if gdat.sdenunit == 'ster':
lablydat = r'$\Sigma_{%s}$ [sr$^{-2}$]' % gmod.lablelemextn[l]
## plot the total number of elements
if ydattype == 'totl':
lablydat = r'$N_{%s}$' % gmod.lablelemextn[l]
if ydattype == 'totl' and not gdat.rtagmock is None:
listtypehist = ['hist', 'histcorrreca']
else:
listtypehist = ['hist']
boolhistprio = not booltile
for typehist in listtypehist:
if typehist == 'histcorrreca':
if gmod.numbparaelem == 0 or gdat.priofactdoff == 0.:
continue
if nameparaderielemodim == 'specplot' or nameparaderielemodim == 'spec' or nameparaderielemodim == 'deflprof':
continue
if not nameparaderielemodim in gmod.namepara.genrelem[l]:
continue
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'hist' + nameparaderielemodim + 'pop%d' % l, \
'mean' + nameparaderielemodim, scalydat='logt', lablxdat=lablxdat, \
lablydat=lablydat, histodim=True, ydattype=ydattype, \
scalxdat=scalxdat, meanxdat=meanxdat, limtydat=limtydat, \
limtxdat=limtxdat, boolhistprio=boolhistprio, \
#indxydat=indxydat, strgindxydat=strgindxydat, \
nameinte='histodim/', typehist=typehist)
if not booltile:
if gmod.numbparaelem > 0:
# element parameter correlations
for l in gmod.indxpopl:
if strgmodl != 'true' and gdat.boolinforefr and gdat.boolasscrefr:
for strgfeat in gmod.namepara.derielemodim[l]:
if not (strgfeat == 'flux' or strgfeat == 'mass' or strgfeat == 'deltllik' or strgfeat == 'nobj') and \
(gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
continue
for q in gdat.indxrefr:
if not l in gdat.refrindxpoplassc[q]:
continue
if gdat.refr.numbelem[q] == 0:
continue
if not strgfeat in gdat.refr.namepara.elem[q] or strgfeat in gdat.refr.namepara.elemonly[q][l]:
continue
plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat)
plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat, plotdiff=True)
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
# plots
for i in gdat.indxener:
for m in gdat.indxevtt:
if gmod.numbpopl > 1:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpdata', i, m, indxpoplplot=l)
## histograms of the number of counts per pixel
limtxdat = [gdat.minmpara.cntpmodl, gdat.maxmpara.cntpmodl]
for nameecom in gmod.listnameecomtotl:
name = 'histcntp' + nameecom
for m in gdat.indxevtt:
for i in gdat.indxener:
if gdat.numbener > 1:
name += 'en%02d' % (i)
if gdat.numbevtt > 1:
name += 'evt%d' % (m)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \
name, 'meancntpdata', scalydat='logt', scalxdat='logt', lablxdat=gdat.lablcnts, histodim=True, \
lablydat='$N_{pix}$', limtydat=[0.5, gdat.numbener], limtxdat=limtxdat)
## highest amplitude element
# temp
if gmod.numbparaelem > 0:
# completeness and false discovery rate
if strgmodl != 'true' and gdat.boolasscrefr:
for strgclas in ['cmpl', 'fdis']:
nameinte = strgclas + 'odim/'
limtydat = [getattr(gdat, 'minm' + strgclas), getattr(gdat, 'maxm' + strgclas)]
for l in gmod.indxpopl:
for q in gdat.indxrefr:
if not l in gdat.refrindxpoplassc[q]:
continue
if gdat.refr.numbelem[q] == 0 and strgclas == 'cmpl' or gmod.numbparaelem == 0 and strgclas == 'fdis':
continue
if strgclas == 'cmpl':
lablydat = getattr(gmod.lablpara, strgclas + 'pop%dpop%d' % (l, q))
strgindxydat = 'pop%dpop%d' % (l, q)
else:
lablydat = getattr(gmod.lablpara, strgclas + 'pop%dpop%d' % (q, l))
strgindxydat = 'pop%dpop%d' % (q, l)
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'etag':
continue
if strgclas == 'fdis' and not strgfeat in gmod.namepara.derielemodim[l]:
continue
if not strgfeat.startswith('spec') and not strgfeat.startswith('defl') \
and not strgfeat in gdat.refr.namepara.elemonly[q][l] and \
not (gdat.typedata == 'mock' and (strgfeat.endswith('pars') or strgfeat.endswith('nrel'))):
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgclas + strgfeat + strgindxydat, \
'mean' + strgfeat, lablxdat=lablxdat, \
lablydat=lablydat, \
#plottype='errr', \
scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \
omittrue=True, nameinte=nameinte)
if gmod.numbparaelem > 0:
alph = 0.1
if strgmodl == 'true':
pathtemp = gdat.pathinit
else:
if strgstat == 'this':
pathtemp = gdat.pathplotrtag + strgpdfn + '/fram/'
elif strgstat == 'mlik':
pathtemp = gdat.pathplotrtag + strgpdfn + '/finl/'
elif strgstat == 'pdfn':
pathtemp = gdat.pathplotrtag + strgpdfn + '/finl/'
colr = retr_colr(gdat, strgstat, strgmodl, indxpopl=None)
# transdimensional element parameters projected onto the data axes
if not (strgstat == 'pdfn' and not gdat.boolcondcatl):
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lght':
# PS spectra
if strgstat == 'pdfn':
specplot = [np.empty((gdat.numbenerplot, gdat.numbstkscond))]
for r in gdat.indxstkscond:
specplot[0][:, r] = gdat.dictglob['poststkscond'][r]['specplot'][0, :]
listxdat = []
listplottype = []
for k in range(specplot[l].shape[-1]):
listxdat.append(gdat.meanpara.enerplot)
listplottype.append('lghtline')
for specconvunit in gdat.listspecconvunit:
listydat = []
for k in range(specplot[l].shape[-1]):
specplottemp = specplot[l]
if strgmodl == 'true':
specplottemp = np.copy(specplottemp[0, :, k])
else:
specplottemp = np.copy(specplottemp[:, k])
if specconvunit[0] == 'en01':
specplottemp *= gdat.meanpara.enerplot
if specconvunit[0] == 'en02':
specplottemp *= gdat.meanpara.enerplot**2
if specconvunit[0] == 'en03':
# temp
pass
listydat.append(specplottemp)
lablydat = getattr(gmod.lablpara, 'flux' + specconvunit[0] + specconvunit[1] + 'totl')
strgtemp = specconvunit[0] + specconvunit[1]
if specconvunit[0] == 'en03':
strgtemp += specconvunit[2]
path = pathtemp + strgstat + 'specpop%d%s%s.pdf' % (l, strgtemp, strgswep)
limtydat = [np.amin(gdat.minmspec), np.amax(gdat.maxmspec)]
tdpy.plot_gene(path, listxdat, listydat, scalxdat='logt', scalydat='logt', \
lablxdat=gdat.lablenertotl, colr=colr, alph=alph, \
plottype=listplottype, limtxdat=[gdat.minmener, gdat.maxmener], lablydat=lablydat, \
limtydat=limtydat)
if gmod.boollenssubh:
## deflection profiles
if gdat.boolvariasca and gdat.boolvariacut:
lablxdat = gdat.labltotlpara.gang
if strgstat == 'pdfn':
deflprof = [np.empty((gdat.numbanglfull, gdat.numbstkscond))]
asca = [np.empty(gdat.numbstkscond)]
acut = [np.empty(gdat.numbstkscond)]
for r in gdat.indxstkscond:
deflprof[0][:, r] = gdat.dictglob['poststkscond'][r]['deflprof'][0, :]
asca[0][r] = gdat.dictglob['poststkscond'][r]['asca'][0]
acut[0][r] = gdat.dictglob['poststkscond'][r]['acut'][0]
for l in range(len(deflprof)):
xdat = gdat.meanpara.anglfull * gdat.anglfact
listydat = []
listvlinfrst = []
listvlinseco = []
if 'deflprof' in gmod.typeelem[l]:
if strgmodl == 'true':
deflproftemp = deflprof[l][0, :, :]
else:
deflproftemp = deflprof[l]
for k in range(deflprof[l].shape[-1]):
listydat.append(deflproftemp[:, k] * gdat.anglfact)
if strgmodl == 'true':
ascatemp = asca[l][0, k]
acuttemp = acut[l][0, k]
else:
ascatemp = asca[l][k]
acuttemp = acut[l][k]
listvlinfrst.append(ascatemp * gdat.anglfact)
listvlinseco.append(acuttemp * gdat.anglfact)
beinhost = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'paragenrscalfull', strgpdfn, indxvarb=gmod.indxpara.beinhost)
listydat.append(xdat * 0. + gdat.anglfact * beinhost)
path = pathtemp + strgstat + 'deflsubhpop%d%s.pdf' % (l, strgswep)
limtydat = [1e-3, 1.]
limtxdat = [1e-3, 1.]
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', \
lablxdat=lablxdat, drawdiag=True, limtydat=limtydat, \
limtxdat=limtxdat, colr=colr, alph=alph, lablydat=r'$\alpha$ [$^{\prime\prime}$]', \
listvlinfrst=listvlinfrst, listvlinseco=listvlinseco)
if gdat.typedata == 'mock':
# pulsar masses
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
lablxdat = gdat.labltotlpara.gang
limtydat = [gdat.minmmassshel, gdat.maxmmassshel]
lablydat = gdat.lablmassshel
name = 'massshelpop%d' % l
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
if gmod.boollens:
## radial mass budget
lablxdat = gdat.lablanglfromhosttotl
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
# host mass
for e in gmod.indxsersfgrd:
strgsersfgrd = 'isf%d' % e
limtydat = [gdat.minmmcut, getattr(gdat, 'plotmaxmmasshost' + strgsersfgrd + strgcalcmasssubh + 'bein')]
lablydat = getattr(gmod.lablpara, 'masshost' + strgsersfgrd + strgcalcmasssubh + 'totl')
name = 'masshost%s%s' % (strgsersfgrd, strgcalcmasssubh)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
if gmod.boolelemdeflsubhanyy:
# subhalo masses
limtydat = [gdat.minmmcut, getattr(gdat, 'plotmaxmmasssubh' + strgcalcmasssubh + 'bein')]
lablydat = getattr(gmod.lablpara, 'masssubh' + strgcalcmasssubh + 'totl')
name = 'masssubh%s' % (strgcalcmasssubh)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
# subhalo mass fraction
limtydat = [1e-3, 0.1]
lablydat = getattr(gmod.lablpara, 'fracsubh' + strgcalcmasssubh + 'totl')
name = 'fracsubh%s' % (strgcalcmasssubh)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
alph = 0.1
if gdat.boolmodipsfn and gmod.boolelempsfnanyy:
## PSF radial profile
for i in gdat.indxener:
for m in gdat.indxevtt:
indxydat = [i, slice(None), m]
strgindxydat = 'en%02devt%d' % (i, m)
lablxdat = gdat.labltotlpara.gang
limtydat= np.array([1e-3, 1e3]) * gdat.anglfact**2
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'psfn', \
'binsangl', indxydat=indxydat, strgindxydat=strgindxydat, scalydat='logt', \
lablxdat=lablxdat, lablydat=r'$\mathcal{P}$', limtydat=limtydat)
# internally and externally corrected element parameter histograms
if gdat.typedata == 'inpt' and strgstat == 'pdfn' and gdat.rtagmock is not None:
limtydat = gdat.limtydathistfeat
for l in gmod.indxpopl:
strgindxydat = 'pop%d' % l
for strgfeat in gmod.namepara.derielemodim[l]:
if strgfeat.startswith('aerr') or strgfeat == 'specplot' or strgfeat == 'spec' or strgfeat == 'deflprof':
continue
lablydat = r'$N_{%s}$' % gmod.lablelemextn[l]
for namecorr in ['incr', 'excr']:
nameinte = namecorr + 'odim/'
for qq in gdatmock.indxrefr:
if namecorr == 'excr':
if not strgfeat in gmod.namepara.extrelem[l]:
continue
q = gdat.listnamerefr.index(strgfeat[-4:])
if getattr(gdat, 'crex' + strgfeat + 'pop%dpop%dpop%d' % (q, qq, l)) is None:
continue
name = namecorr + strgfeat + 'pop%dpop%dpop%d' % (q, qq, l)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'mean' + strgfeat, scalydat='logt', lablxdat=lablxdat, \
lablydat=lablydat, histodim=True, ydattype='totl', \
scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \
nameinte=nameinte)
else:
if strgfeat in gmod.namepara.extrelem[l]:
continue
name = namecorr + strgfeat + 'pop%dpop%d' % (qq, l)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'mean' + strgfeat, scalydat='logt', lablxdat=lablxdat, \
lablydat=lablydat, histodim=True, ydattype='totl', \
scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \
nameinte=nameinte)
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
if gmod.numbparaelem > 0:
# element parameter correlations
liststrgelemtdimvarb = getattr(gdat, 'liststrgelemtdimvarb' + strgphas)
for strgelemtdimtype in gdat.liststrgelemtdimtype:
for strgelemtdimvarb in liststrgelemtdimvarb:
if strgelemtdimvarb.startswith('cmpl'):
continue
for l0 in gmod.indxpopl:
for strgfrst in gmod.namepara.genrelem[l0]:
if strgfrst.startswith('spec') or strgfrst == 'specplot' or strgfrst == 'deflprof':
continue
for strgseco in gmod.namepara.genrelem[l0]:
if strgseco.startswith('spec') or strgseco == 'specplot' or strgseco == 'deflprof':
continue
if not checstrgfeat(strgfrst, strgseco):
continue
if strgelemtdimvarb.startswith('hist'):
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%d' % l0
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst + 'pop%d' % l0, \
strgseco + 'pop%d' % l0, \
strgtotl, strgpdfn=strgpdfn)
else:
if booltile:
continue
if strgfrst.startswith('aerr') or strgseco.startswith('aerr'):
continue
if strgelemtdimvarb.startswith('fdis'):
for q in gdat.indxrefr:
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%d' % (q, l0)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)
elif strgelemtdimvarb.startswith('excr') or strgelemtdimvarb.startswith('incr'):
for qq in gdatmock.indxrefr:
if strgelemtdimvarb.startswith('excr'):
for q in gdat.indxrefr:
if getattr(gdat, 'crex' + strgfrst + strgseco + 'pop%dpop%dpop%d' % (q, qq, l0)) is None:
continue
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%dpop%d' % (q, qq, l0)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)
else:
if strgfrst[-4:] in gdat.listnamerefr and strgseco[-4:] in gdat.listnamerefr:
continue
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%d' % (qq, l0)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)
if not (gdat.typedata == 'mock' and (gmod.numbelemtotl == 0 or gmod.maxmpara.numbelemtotl == 0)):
for q in gdat.indxrefr:
if strgphas == 'init' and gdat.typedata == 'mock':
continue
print('strgpdfn')
print(strgpdfn)
raise Exception('')
if booltile:
continue
for l0 in gmod.indxpopl:
for refrstrgfrst in gdat.refr.namepara.elem[q]:
if refrstrgfrst == 'spec' or refrstrgfrst == 'specplot' or refrstrgfrst == 'deflprof' or refrstrgfrst == 'etag':
continue
if refrstrgfrst in gdat.refr.namepara.elemonly[q][l0]:
continue
for refrstrgseco in gdat.refr.namepara.elem[q]:
if refrstrgseco in gdat.refr.namepara.elemonly[q][l0]:
continue
if refrstrgseco == 'spec' or refrstrgseco == 'specplot' or refrstrgseco == 'deflprof' or refrstrgseco == 'etag':
continue
if not checstrgfeat(refrstrgfrst, refrstrgseco):
continue
if refrstrgfrst.startswith('aerr') or refrstrgseco.startswith('aerr') or refrstrgfrst == 'specplot' or refrstrgseco == 'specplot':
continue
strgtotl = 'cmpl' + refrstrgfrst + refrstrgseco + 'pop%dpop%d' % (l0, q)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, 'bind', 'cmpl', \
q, refrstrgfrst + 'pop%d' % l0, refrstrgseco + 'pop%d' % l0, strgtotl, strgpdfn=strgpdfn)
if not booltile:
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
# data and model count scatter
for m in gdat.indxevttplot:
if gdat.numbpixl > 1:
for i in gdat.indxener:
plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, m, indxenerplot=i)
else:
plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, m)
## spatial priors
# temp
if gdat.numbpixl > 1:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for strgfeat, strgpdfn in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):
if strgpdfn == 'tmplreln':
plot_genemaps(gdat, gdatmodi, 'fitt', strgpdfn, 'lpdfspatpriointp', booltdim=True)
if strgpdfn == 'tmplgaum':
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'lpdfspatpriointp', booltdim=True)
# model count maps
## backgrounds
if gdat.numbpixl > 1:
for i in gdat.indxener:
for m in gdat.indxevtt:
for c in gmod.indxback:
if gmod.boolbfun:
continue
if not gmod.boolunifback[c]:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpback%04d' % c, i, m, strgcbar='cntpdata')
## count error
if strgmodl != 'true':
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.boolcalcerrr[l]:
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntperrr', i, -1, strgcbar='cntpresi')
## diffuse components
for i in gdat.indxener:
for k, name in enumerate(gmod.listnamediff):
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntp%s' % (name), i, strgcbar='cntpdata')
## model count maps
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpmodl', i, m, strgcbar='cntpdata')
# likelihood
if strgmodl != 'true':
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'llik', i, m, strgcbar='llikmaps')
if gmod.boollens:
## lensing signal to noise
if strgmodl == 'true':
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 's2nr', i, -1)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magn', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'conv', booltdim=True)
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplens', i, strgcbar='cntpdata', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplensgradmgtd', i, strgcbar='cntpdata', booltdim=True)
if gdat.penalpridiff:
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \
'psecodimdatapntsen%02devt%d' % (i, m), 'meanmpolodim', lablxdat='$l$', lablydat='$P_{resi}(l)$', \
limtydat=[1e-2, 2.], scalxdat='logt', scalydat='logt')
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'psecodimdatapntsprioen%02devt%d' % (i, m), 'meanmpolodim', lablxdat='$l$', \
lablydat='$P_{prio}(l)$', limtydat=[1e-2, 2.], scalxdat='logt', scalydat='logt')
if gmod.boollens:
indxydat = [slice(None)]
strgindxydat = ''
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convpsecodim', 'meanwvecodim', lablxdat='$k$ [1/kpc]', lablydat='$P(k)$', limtydat=[1e-1, 1e2], \
scalxdat='logt', scalydat='logt', indxydat=indxydat, strgindxydat=strgindxydat)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'histdefl', 'meandefl', \
scal='self', lablxdat=r'$\alpha$ [arcsec]', lablydat=r'$N_{pix}$', \
strgindxydat=strgindxydat, indxydat=indxydat, histodim=True)
if gmod.numbparaelem > 0 and gmod.boolelemdeflsubhanyy:
indxydat = [slice(None)]
strgindxydat = ''
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convpsecelemodim', 'meanwvecodim', lablxdat='$k$ [1/kpc]', lablydat='$P_{sub}(k)$', \
strgindxydat=strgindxydat, indxydat=indxydat, limtydat=[1e-5, 1e-1], scalxdat='logt', scalydat='logt')
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'histdeflsubh', 'meandeflsubh', scal='self', lablxdat=r'$\alpha$ [arcsec]', \
strgindxydat=strgindxydat, indxydat=indxydat, lablydat=r'$N_{pix}$', histodim=True)
if gmod.boollens:
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrd', i, -1, strgcbar='cntpdata')
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrdgalx', i, -1, strgcbar='cntpdata')
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrdexts', i, -1, strgcbar='cntpdata')
# gradient of the lens emission
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplensgrad', indxenerplot=i, indxevttplot=m)
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
if gmod.boollens:
# overall deflection field
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, multfact=0.1)
# deflection field due to individual lenses
for k in range(numbdeflsingplot):
if k == 0:
multfact = 0.1
elif k == 1:
multfact = 1.
elif k >= 2:
multfact = 10.
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, indxdefl=k, multfact=multfact)
# residual deflection field
if strgmodl == 'fitt' and gdat.typedata == 'mock':
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, nameparagenrelem='resi', multfact=100.)
if strgstat != 'pdfn':
for k in range(numbsingcomm):
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, nameparagenrelem='resi', indxdefl=k, multfact=100.)
if gdat.numbpixl > 1:
if gmod.numbparaelem > 0:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelemresi', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelemresiperc', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magnresi', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magnresiperc', booltdim=True)
def dele_rtag(rtag):
pathdata = pathpcat + '/data/outp/'
pathimag = pathpcat + '/imag/'
cmnd = 'rm -rf %s%s' % (pathdata, rtag)
print(cmnd)
os.system(cmnd)
cmnd = 'rm -rf %s%s' % (pathimag, rtag)
os.system(cmnd)
print(cmnd)
def plot_infopvks(gdat, gdatprio, name, namefull, nameseco=None):
pvks = getattr(gdat, 'pvks' + namefull)
info = getattr(gdat, 'info' + namefull)
path = gdat.pathinfo + 'info' + namefull
if nameseco is not None:
indxpoplfrst = int(namefull[-1])
# information gain
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.pcolor(varbfrst, varbseco, info, cmap='Greys')
plt.colorbar(imag)
plot_sigmcont(gdat.fitt, '', axis, name, indxpoplfrst, strgseco=nameseco)
if scalfrst == 'logt':
axis.set_xscale('log')
if scalseco == 'logt':
axis.set_yscale('log')
axis.set_xlabel(getattr(gdat.labltotlpara, name))
axis.set_ylabel(getattr(gdat.labltotlpara, nameseco))
axis.set_xlim(limtfrst)
axis.set_ylim(limtseco)
plt.tight_layout()
plt.savefig(path)
plt.close(figr)
# KS test p value
pathpvkstdim = gdat.pathinfo + 'pvks' + namefull
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.pcolor(varbfrst, varbseco, pvks, cmap='Greys')
plt.colorbar(imag)
plot_sigmcont(gdat.fitt, '', axis, name, indxpoplfrst, strgseco=nameseco)
if scalfrst == 'logt':
axis.set_xscale('log')
if scalseco == 'logt':
axis.set_yscale('log')
axis.set_xlabel(getattr(gdat.labltotlpara, name))
axis.set_ylabel(getattr(gdat.labltotlpara, nameseco))
axis.set_xlim(limtfrst)
axis.set_ylim(limtseco)
plt.tight_layout()
plt.savefig(pathpvkstdim)
plt.close(figr)
elif name != namefull:
lablydat = '$D_{KL}$'
lablxdat = getattr(gmod.lablpara, name + 'totl')
xdat = getattr(gdat, 'mean' + name)
ydat = getattr(gdat, 'info' + namefull)
tdpy.mcmc.plot_plot(path, xdat, ydat, lablxdat, lablydat, scal)
ydat = getattr(gdat, 'pvks' + namefull)
pathpvks = gdat.pathinfo + 'pvks' + namefull
tdpy.mcmc.plot_plot(pathpvks, xdat, ydat, lablxdat, '$p_{KS}$', scal)
else:
# horizontal axis
xdat = getattr(gdat, 'mean' + name)
lablxdat = getattr(gmod.lablpara, name + 'totl')
# scaling
scal = getattr(gdat, 'scal' + name)
# common title
titl = '$D_{KL} = %.3g$, KS = %.3g $\sigma$' % (info, pvks)
# DKL density
pathdinf = gdat.pathinfo + 'dinf' + namefull
ydat = getattr(gdat, 'infodens' + namefull)
lablydat = r'$\rho_{D_{KL}}$'
tdpy.mcmc.plot_plot(pathdinf, xdat, ydat, lablxdat, lablydat, scal, titl=titl)
# prior and posterior PDFs
pathpdfn = gdat.pathinfo + 'pdfn' + namefull
lablydat = r'$P$'
ydat = [getattr(gdat, 'pdfnpost' + namefull), getattr(gdatprio, 'pdfnprio' + namefull)]
legd = ['$P$(%s|$D$)' % lablxdat, '$P$(%s)' % lablxdat]
tdpy.mcmc.plot_plot(pathpdfn, xdat, ydat, lablxdat, lablydat, scal, colr=['k', 'k'], linestyl=['-', '--'], legd=legd, titl=titl)
def plot_finl(gdat=None, gdatprio=None, rtag=None, strgpdfn='post', gdatmock=None, booltile=None):
if gdat.typeverb > 0:
print('plot_finl()')
print('Producing postprocessing plots...')
timetotlinit = gdat.functime()
gdat.strgbest = 'ML'
if not booltile:
# terms in the log-acceptance probability
listindxsamptotlproptotl = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlproptotl')
listindxsamptotlpropaccp = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlpropaccp')
listindxsamptotlpropreje = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlpropreje')
for n in gdat.indxproptype:
pathbase = getattr(gdat, 'path' + strgpdfn + 'finl%s' % gdat.nameproptype[n])
for k in gdat.indxtermlacp:
varb = getattr(gdat, 'list' + strgpdfn + gdat.listnametermlacp[k])
labl = gdat.listlabltermlacp[k]
if listindxsamptotlproptotl[n].size > 0 and (varb[listindxsamptotlproptotl[n]] != 0.).any():
path = pathbase + gdat.listnametermlacp[k] + 'totl'
tdpy.mcmc.plot_trac(path, varb[listindxsamptotlproptotl[n]], labl, titl=gdat.nameproptype[n] + ', Total')
if listindxsamptotlpropaccp[n].size > 0 and (varb[listindxsamptotlpropaccp[n]] != 0.).any():
path = pathbase + gdat.listnametermlacp[k] + 'accp'
tdpy.mcmc.plot_trac(path, varb[listindxsamptotlpropaccp[n]], labl, titl=gdat.nameproptype[n] + ', Accepted')
if listindxsamptotlpropreje[n].size > 0 and (varb[listindxsamptotlpropreje[n]] != 0.).any():
path = pathbase + gdat.listnametermlacp[k] + 'reje'
tdpy.mcmc.plot_trac(path, varb[listindxsamptotlpropreje[n]], labl, titl=gdat.nameproptype[n] + ', Rejected')
if gdat.checprio and strgpdfn == 'post' and not booltile:
# this works only for scalar variables -- needs to be generalized to all variables
if gdatprio is None:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdatfinlprio'
gdatprio = readfile(path)
for namevarbscal in gmod.namepara.scal:
plot_infopvks(gdat, gdatprio, namevarbscal, namevarbscal)
for l in gmod.indxpopl:
for strgfeatfrst in gmod.namepara.genrelem[l]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
plot_infopvks(gdat, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l)
for strgfeatseco in gmod.namepara.genrelem[l]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
plot_infopvks(gdat, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l, nameseco=strgfeatseco)
listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrscalbase = getattr(gdat, 'list' + strgpdfn + 'paragenrscalbase')
listboolpropfilt = getattr(gdat, 'list' + strgpdfn + 'boolpropfilt')
listmemoresi = getattr(gdat, 'list' + strgpdfn + 'memoresi')
listindxproptype = getattr(gdat, 'list' + strgpdfn + 'indxproptype')
listsampproc = getattr(gdat, 'list' + strgpdfn + 'sampproc')
# Gelman-Rubin test
pathdiag = getattr(gdat, 'path' + strgpdfn + 'finldiag')
if gdat.numbproc > 1:
if np.isfinite(gdat.gmrbstat).all():
if gdat.typeverb > 0:
print('Gelman-Rubin TS...')
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
minm = min(np.amin(gdat.gmrbstat), np.amin(gdat.gmrbparagenrscalbase))
maxm = max(np.amax(gdat.gmrbstat), np.amax(gdat.gmrbparagenrscalbase))
bins = np.linspace(minm, maxm, 40)
axis.hist(gdat.gmrbstat.flatten(), bins=bins, label='Data proj.')
axis.hist(gdat.gmrbparagenrscalbase, bins=bins, label='Fixed dim.')
axis.set_xlabel('PSRF')
axis.set_ylabel('$N_{stat}$')
plt.tight_layout()
figr.savefig(pathdiag + 'gmrbhist.pdf')
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gmod.indxparagenrbase, gdat.gmrbparagenrscalbase)
axis.set_xticklabels(gmod.labltotlpara.genrbase)
axis.set_ylabel('PSRF')
plt.tight_layout()
figr.savefig(pathdiag + 'gmrbparagenrscalbase.pdf')
plt.close(figr)
for i in gdat.indxener:
for m in gdat.indxevtt:
maps = gdat.gmrbstat[i, :, m]
path = pathdiag + 'gmrbdataen%02devt%d.pdf' % (i, m)
tdpy.plot_maps(path, maps, indxpixlrofi=gdat.indxpixlrofi, numbpixl=gdat.numbpixlfull, typepixl=gdat.typepixl, \
minmlgal=gdat.anglfact*gdat.minmlgal, maxmlgal=gdat.anglfact*gdat.maxmlgal, \
minmbgal=gdat.anglfact*gdat.minmbgal, maxmbgal=gdat.anglfact*gdat.maxmbgal)
else:
print('Inappropriate Gelman-Rubin test statistics encountered.')
# plot autocorrelation
if gdat.typeverb > 0:
print('Autocorrelation...')
tdpy.mcmc.plot_atcr(pathdiag, gdat.atcrcntp[0, 0, 0, 0, :], gdat.timeatcrcntp[0, 0, 0, 0], strgextn='cntp')
tdpy.mcmc.plot_atcr(pathdiag, gdat.atcrpara[0, 0, :], gdat.timeatcrpara[0, 0], strgextn='para')
print('Autocorrelation times:')
for k, namepara in enumerate(gmod.namepara):
print('%s %g' % (namepara, np.mean(gdat.timeatcrpara[:, k])))
# plot proposal efficiency
if gdat.typeverb > 0:
print('Acceptance ratio...')
numbtimemcmc = 20
binstimemcmc = np.linspace(0., gdat.numbswep, numbtimemcmc)
numbtick = 2
sizefigrydat = 4. * gdat.numbproptype
figr, axgr = plt.subplots(gdat.numbproptype, 1, figsize=(12., sizefigrydat), sharex='all')
if gdat.numbproptype == 1:
axgr = [axgr]
for n, axis in enumerate(axgr):
histtotl = axis.hist(listindxsamptotlproptotl[n], bins=binstimemcmc)[0]
histaccp = axis.hist(listindxsamptotlpropaccp[n], bins=binstimemcmc)[0]
axis.set_ylabel('%s' % gdat.nameproptype[n])
if k == gdat.numbproptype - 1:
axis.set_xlabel('$i_{samp}$')
plt.tight_layout()
figr.savefig(pathdiag + 'accpratiproptype.pdf')
plt.close(figr)
if gdat.typeverb > 0:
print('Proposal execution times...')
## time performance
#listchro = np.empty((gdat.numbswep, gdat.numbchro))
#listchro = []
#for k, name in enumerate(gdat.listnamechro):
# #listchro[:, k] = getattr(gdat, 'list' + strgpdfn + 'chro' + name).flatten() * 1e3
# listchro.append(getattr(gdat, 'list' + strgpdfn + 'chro' + name).flatten() * 1e3)
#pathdiag = getattr(gdat, 'path' + strgpdfn + 'finldiag')
#figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
#axis.violin(listchro)
#axis.set_yscale('log')
#axis.set_ylabel('$t$ [ms]')
#axis.set_xticklabels(gdat.listlablchro)
#axis.axvline(mean(chro), ls='--', alpha=0.2, color='black')
#figr.savefig(pathdiag + 'chro.pdf' % gdat.listnamechro[k])
#plt.close(figr)
# temp
gdat.lablpmea = 'Mean'
# posterior versions of the frame plots
plot_samp(gdat, None, 'pdfn', 'fitt', 'finl', strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)
if booltile:
return
if gmod.numbparaelem > 0:
if gdat.typeverb > 0:
print('A mosaic of samples...')
## mosaic of images of posterior catalogs
if gdat.numbpixl > 1:
plot_mosa(gdat, strgpdfn)
## randomly selected trandimensional parameters
if gmod.numbparaelem > 0:
if gdat.typeverb > 0:
print('Transdimensional parameters...')
# choose the parameters based on persistence
stdvlistsamptran = np.std(listparagenrscalfull[:, gmod.indxsamptrap], axis=0)
indxtrapgood = np.where(stdvlistsamptran > 0.)[0]
gmod.numbparaelemgood = indxtrapgood.size
gmod.numbparaelemplot = min(3, gmod.numbparaelemgood)
if gmod.numbparaelemplot > 0:
indxtrapplot = np.sort(np.random.choice(gmod.indxsamptrap[indxtrapgood], size=gmod.numbparaelemplot, replace=False))
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
tdpy.mcmc.plot_grid(path, 'listelemfrst', listparagenrscalfull[:, gmod.indxsamptrap[:3]], [gmod.lablpara[k] for k in gmod.indxsamptrap[:3]])
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
tdpy.mcmc.plot_grid(path, 'listsamp', listparagenrscalfull[:, indxtrapplot], ['%d' % k for k in indxtrapplot])
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
tdpy.mcmc.plot_grid(path, 'listsamp', listparagenrscalfull[:, indxtrapplot], [gmod.lablpara[k] for k in indxtrapplot])
if gdat.typeverb > 0:
print('Scalar variables...')
# scalar variables
## trace and marginal distribution of each parameter
for name in gmod.namepara.scal:
if gdat.typeverb > 0:
print('Working on %s...' % name)
scal = getattr(gdat, 'scal' + name)
corr = getattr(gdat, 'corr' + name)
if corr is None:
truepara = None
else:
truepara = getattr(gdat, 'corr' + name)
listvarb = getattr(gdat, 'list' + strgpdfn + name)
if listvarb.ndim != 1:
if listvarb.shape[1] == 1:
listvarb = listvarb[:, 0]
else:
raise Exception('')
mlik = getattr(gdat, 'mlik' + name)
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscaltrac') + name
tdpy.mcmc.plot_trac(path, listvarb, labltotl, truepara=truepara, scalpara=scal, listvarbdraw=[mlik], listlabldraw=[''], listcolrdraw=['r'])
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalhist') + name
tdpy.mcmc.plot_hist(path, listvarb, labltotl, truepara=truepara, scalpara=scal, listvarbdraw=[mlik], listlabldraw=[''], listcolrdraw=['r'])
for nameseco in gmod.namepara.scal:
if name == nameseco:
continue
if gdat.typeverb > 0:
print('Working on correlation of %s with %s...' % (name, nameseco))
pathjoin = getattr(gdat, 'path' + strgpdfn + 'finlvarbscaljoin')
if corrseco is None:
trueparaseco = None
else:
trueparaseco = getattr(gdat, 'corr' + nameseco)
if listvarbseco.ndim != 1:
if listvarbseco.shape[1] == 1:
listvarbseco = listvarbseco[:, 0]
else:
raise Exception('')
listjoin = np.vstack((listvarb, listvarbseco)).T
tdpy.mcmc.plot_grid(pathjoin, name + nameseco, listjoin, [labltotl, labltotlseco], scalpara=[scal, scalseco], truepara=[truepara, trueparaseco], \
join=True, listvarbdraw=[np.array([mlik, mlikseco])])
if gdat.typeverb > 0:
print('Fixed dimensional parameter covariance...')
### covariance
## overall
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
truepara = gmod.corrparagenrscalbase
mlikpara = gdat.mlikparagenrscalbase
tdpy.mcmc.plot_grid(path, 'paragenrscalbase', listparagenrscalbase, gmod.labltotlpara.genrbasetotl, truepara=truepara, listvarbdraw=[mlikpara])
# stacked posteiors binned in position and flux
if gmod.numbparaelem > 0 and gdat.numbpixl > 1:
liststrgbins = ['quad', 'full']
for l in gmod.indxpopl:
plot_histlgalbgalelemstkd(gdat, strgpdfn, l, 'cumu')
for strgbins in liststrgbins:
plot_histlgalbgalelemstkd(gdat, strgpdfn, l, strgbins, namepara.elemsign[l])
if gdat.typeverb > 0:
print('Prior and likelihood...')
for strgpdfntemp in ['lpritotl', 'lliktotl']:
if strgpdfntemp == 'lpritotl':
labltemp = '\ln P(M)'
if strgpdfntemp == 'lliktotl':
labltemp = '\ln P(D|M)'
labl = r'$%s$' % labltemp
path = getattr(gdat, 'path' + strgpdfn + 'finl') + strgpdfntemp
varb = getattr(gdat, 'list' + strgpdfn + strgpdfntemp)
tdpy.mcmc.plot_hist(path, varb, labl)
listvarbdraw = []
listlabldraw = []
listcolrdraw = []
if gdat.typedata == 'mock':
listvarbdraw += [getattr(gdat.true, strgpdfntemp)]
listlabldraw += ['True model']
listcolrdraw += [gdat.refr.colr]
tdpy.mcmc.plot_trac(path, getattr(gdat, 'list' + strgpdfn + strgpdfntemp), labl, \
listvarbdraw=listvarbdraw, listlabldraw=listlabldraw, listcolrdraw=listcolrdraw)
# plot resident memory
figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
axis.plot(gdat.indxswep, np.mean(listmemoresi, 1) / float(2**30))
axis.set_ylabel(r'$M$ [GB]')
axis.set_xlabel(r'$i_{samp}$')
plt.tight_layout()
figr.savefig(pathdiag + 'memoresi.pdf')
plt.close(figr)
timetotlfinl = gdat.functime()
if gdat.typeverb > 0:
print('Plots and animations are produced in %.3g seconds.' % (timetotlfinl - timetotlinit))
def plot_sbrt(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, specconvunit):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
for b, namespatmean in enumerate(gdat.listnamespatmean):
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
# plot reference spectra
if gdat.listprefsbrtlabltotl is not None:
for k in range(len(gdat.listprefsbrtlabltotl)):
if gdat.listprefsbrttype[k] == 'shad':
factenerrefr = [[] for a in range(3)]
for a in range(3):
factenerrefr[a] = retr_factener(specconvunit[0], gdat.listprefsbrtener[k][a])
axis.plot(gdat.listprefsbrtener[k][0], gdat.listprefsbrtsbrt[k][0] * factenerrefr[0], color='m', label=gdat.listprefsbrtlabltotl[k])
enerpoly = np.empty(gdat.listprefsbrtener[k][1].size + gdat.listprefsbrtener[k][2].size)
enerpoly[:gdat.listprefsbrtener[k][1].size] = gdat.listprefsbrtener[k][1]
enerpoly[gdat.listprefsbrtener[k][1].size:] = gdat.listprefsbrtener[k][2][::-1]
sbrtpoly = np.empty(gdat.listprefsbrtener[k][1].size + gdat.listprefsbrtener[k][2].size)
sbrtpoly[:gdat.listprefsbrtener[k][1].size] = gdat.listprefsbrtsbrt[k][1] * factenerrefr[1]
sbrtpoly[gdat.listprefsbrtener[k][1].size:] = gdat.listprefsbrtsbrt[k][2][::-1] * factenerrefr[2][::-1]
axis.fill(enerpoly, sbrtpoly, color='m', alpha=0.5)
else:
factenerrefr = retr_factener(specconvunit[0], gdat.listprefsbrtener[k][1])
axis.errorbar(gdat.listprefsbrtener[k][1], gdat.listprefsbrtsbrt[k][1] * factenerrefr, label=gdat.listprefsbrtlabltotl[k], color='m')
if strgmodl == 'true':
liststrgmodl = [strgmodl]
listgdatobjt = [gdat]
if strgmodl == 'fitt' and (strgstat == 'this' or strgstat == 'pdfn'):
if gdat.typedata == 'mock':
liststrgmodl = [strgmodl, 'true']
listgdatobjt = [gdatobjt, gdat]
else:
liststrgmodl = [strgmodl]
listgdatobjt = [gdatobjt]
numbstrgstattemp = len(liststrgmodl)
for a in range(numbstrgstattemp):
indxploteleminit = []
indxplotelemendd = []
# number of transdimensional elements to be overplotted
numbelemtemp = 0
if gdat.numbpixl == 1 and strgstat != 'pdfn':
if liststrgmodl[a] == 'fitt':
numbelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
numbelemtemp += np.sum(gmodstat.numbelem[l])
else:
for q in gdat.indxrefr:
numbelemtemp += np.sum(gdat.refr.numbelem[q])
numbplot = numblablsbrtspec + numbelemtemp
listydat = np.zeros((numbplot, gdat.numbener))
listyerr = np.zeros((2, numbplot, gdat.numbener))
cntr = 0
cntrdata = cntr
## data
listydat[cntr, :] = gdat.sbrtdatamean[b]
listyerr[:, cntr, :] = gdat.sbrtdatastdv[b]
cntr += 1
for c in gmod.indxback:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtback%04dmea%d' % (c, b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtback%04dmea%d' % (c, b), strgpdfn, strgmome='errr')
cntr += 1
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy and not (liststrgmodl[a] == 'true' and gdat.refr.numbelemtotl == 0):
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncsubtmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncsubtmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrthostisf%dmea%d' % (e, b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], \
'sbrthostisf%dmea%d' % (e, b), strgpdfn, strgmome='errr')
cntr += 1
if gmod.boollens:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtlensmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtlensmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
if gdat.numbpixl == 1 and strgstat != 'pdfn':
cntrline = cntr
indxploteleminit.append(cntr)
for l in gmod.indxpopl:
if liststrgmodl[a] == 'true':
for k in range(gmod.numbelem[l]):
listydat[cntr, :] = getattr(listgdatobjt[a], liststrgmodl[a] + 'spec')[l][0, :, k]
if cntr == cntrline:
listlablsbrtspec = listlablsbrtspec[:cntr] + ['Lines'] + listlablsbrtspec[cntr:]
else:
listlablsbrtspec = listlablsbrtspec[:cntr] + [None] + listlablsbrtspec[cntr:]
cntr += 1
if k == gmod.numbelem[l] - 1:
indxplotelemendd.append(k)
else:
for k in range(gmodstat.numbelem[l]):
listydat[cntr, :] = getattr(listgdatobjt[a], strgstat + 'spec')[l][:, k]
if cntr == cntrline:
listlablsbrtspec = listlablsbrtspec[:cntr] + ['Lines'] + listlablsbrtspec[cntr:]
else:
listlablsbrtspec = listlablsbrtspec[:cntr] + [None] + listlablsbrtspec[cntr:]
cntr += 1
if k == gmodstat.numbelem[l] - 1:
indxplotelemendd.append(k)
## total model
if numblablsbrt > 1:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtmodlmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtmodlmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
if liststrgmodl[a] == 'true':
listyerr = np.zeros((2, numbplot, gdat.numbener))
# plot energy spectra of the data, background model components and total background
if gdat.numbener > 1:
listmrkr = ['o', '>', 's', 'h', '*', 'p', 'x']
for k in range(100):
listmrkr.append('x')
# determine the energy scaling factor
if specconvunit[0] == 'en00':
factener = 1.
if specconvunit[0] == 'en01':
factener = gdat.meanpara.ener
if specconvunit[0] == 'en02':
factener = gdat.meanpara.ener**2
if specconvunit[0] == 'en03':
# temp
pass
factener = 1.
#indxenerintv = np.where((gdat.meanpara.ener < specconvunit[4]) & (gdat.meanpara.ener > specconvunit[3]))[0]
#ener = np.concatenate((np.array([specconvunit[3]]), gdat.meanpara.ener[indxenerintv], np.array([specconvunit[4]])))
#
#for k in range(3):
# if k == 0:
# ydattemp =
# ydatminmener = np.interp(specconvunit[3], gdat.meanpara.ener, ydat)
# ydatmaxmener = np.interp(specconvunit[4], gdat.meanpara.ener, ydat)
# ydat = np.concatenate((np.array([ydatminmener]), ydat[indxenerintv], np.array([ydatmaxmener])))
# ydat = np.trapz(ydat, gdat.meanpara.ener)
#
#yerrminmener = np.interp(specconvunit[3], gdat.meanpara.ener, yerr, axis=1)
#yerrmaxmener = np.interp(specconvunit[4], gdat.meanpara.ener, yerr, axis=1)
#ydat = np.stack((np.array([yerrminmener]), ydat[indxenerintv], np.array([yerrmaxmener])))
#
#
#yerr = np.trapz(yerr, gdat.meanpara.ener)
xdat = gdat.meanpara.ener
cntr = 0
for k in range(listydat.shape[0]):
mrkr = listmrkr[cntr]
if k == cntrdata:
colr = 'black'
alph = 1.
linestyl = '-'
else:
colr = retr_colr(gdat, strgstat, liststrgmodl[a], indxpopl=None)
linestyl = '--'
alph = 0.5
ydat = np.copy(listydat[k, :])
yerr = np.copy(listyerr[:, k, :])
ydat *= factener
yerr *= factener
if k == cntrdata and a > 0:
continue
if liststrgmodl[a] == 'fitt':
labl = listlablsbrtspec[k]
else:
labl = None
temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, color=colr, marker=mrkr, ls=linestyl, markersize=10, alpha=alph, label=labl)
for caps in listcaps:
caps.set_markeredgewidth(1)
if gdat.numbpixl == 1 and strgstat != 'pdfn':
if cntr != cntrline or k in indxplotelemendd:
cntr += 1
else:
cntr += 1
if gdat.numbener > 1:
axis.set_xlim([np.amin(gdat.binspara.ener), np.amax(gdat.binspara.ener)])
if gdat.typeexpr == 'chan':
factminm = 1e-1
factmaxm = 1e2
elif gdat.typeexpr == 'ferm':
factminm = 1e1
factmaxm = 1e-1
else:
factminm = 1e-4
factmaxm = 1e0
minmydat = factminm * gdat.factylimtbrt[0] * np.amax(listydat[cntrdata, :] * factener)
maxmydat = factmaxm * gdat.factylimtbrt[1] * np.amax(listydat[cntrdata, :] * factener)
limtydat = [minmydat, maxmydat]
axis.set_ylim(limtydat)
axis.set_yscale('log')
axis.set_xlabel(gdat.lablenertotl)
axis.set_xscale('log')
labl = getattr(gmod.lablpara, 'sbrt' + specconvunit[0] + specconvunit[1] + 'stertotl')
axis.set_ylabel(labl)
make_legd(axis, numbcols=2)
plt.tight_layout()
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'sdenmean%s%s%s' % (namespatmean, specconvunit[0], specconvunit[1]))
figr.savefig(path)
plt.close(figr)
def retr_factener(strgconvunit, ener):
if strgconvunit == 'en00':
factener = np.ones_like(ener)
if strgconvunit == 'en01':
factener = ener
if strgconvunit == 'en02':
factener = ener**2
if strgconvunit == 'en03':
# temp
pass
factener = np.ones_like(ener)
return factener
def plot_pdfntotlflux():
minm = 1e-9
maxm = 10e-9
numbvarb = 90
numbparagenrfull = 100000
numbbins = 40
alph = 0.5
binssing = np.linspace(minm, maxm, numbvarb + 1)
meansing = (binssing[:-1] + binssing[1:]) / 2.
deltsing = binssing[1:] - binssing[:-1]
binsdoub = np.linspace(2. * minm, 2. * maxm, 2 * numbvarb)
meandoub = (binsdoub[:-1] + binsdoub[1:]) / 2.
deltdoub = binsdoub[1:] - binsdoub[:-1]
bins = np.linspace(minm, 2. * maxm, 2 * numbvarb + 1)
arry = np.empty((2, numbparagenrfull))
minmslop = 1.5
maxmslop = 3.
numbslop = 4
sloparry = np.linspace(minmslop, maxmslop, numbslop)
for n in range(numbslop):
slop = sloparry[n]
for k in range(2):
arry[k, :] = (np.random.rand(numbparagenrfull) * (maxm**(1. - slop) - minm**(1. - slop)) + minm**(1. - slop))**(1. / (1. - slop))
totl = np.sum(arry, 0)
powrprob = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop)) * meansing**(-slop)
convprob = convolve(powrprob, powrprob) * deltdoub[0]
indxdoub = np.where(meandoub <= maxm)[0]
convprobpoly = polyval(polyfit(meandoub[indxdoub], convprob[indxdoub], 8), meandoub[indxdoub])
figr, axis = plt.subplots()
axis.hist(arry[k, :], bins=bins, alpha=alph, label='$f_1$ (Sampled)', color='b')
axis.hist(totl, bins=bins, alpha=alph, label='$f_0$ (Sampled)', color='g')
axis.plot(meansing, powrprob * numbparagenrfull * deltsing, label='$f_1$ (Analytic)', color='b')
axis.plot(meandoub, convprob * numbparagenrfull * deltdoub[0], label='$f_0$ (Numerically convolved)', color='g')
axis.plot(meandoub[indxdoub], convprobpoly * numbparagenrfull * deltdoub[indxdoub], label='$f_0$ (Fit)', color='r')
axis.set_ylim([0.5, numbsamp])
axis.set_xlabel('$f$')
axis.set_xlim([np.amin(bins), np.amax(bins)])
axis.set_xscale('log')
axis.set_yscale('log')
axis.set_ylabel('$N_{samp}$')
make_legd(axis)
plt.tight_layout()
pathfold = os.environ["TDGU_DATA_PATH"] + '/imag/powrpdfn/'
figr.savefig(pathfold + 'powrpdfn%04d.pdf' % n)
plt.close(figr)
def savefigr(gdat, gdatmodi, figr, path):
#if gdatmodi is not None and gdat.numbproc > 1:
# gdatmodi.lock.acquire()
# print 'Process %d acquiring the lock...' % gdatmodi.indxprocwork
plt.savefig(path)
#if gdatmodi is not None and gdat.numbproc > 1:
# gdatmodi.lock.release()
# print 'Process %d releasing the lock...' % gdatmodi.indxprocwork
def plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, indxpoplfrst, strgfrst, \
strgseco, strgtotl, strgmome='pmea', strgpdfn='post'):
gmod = getattr(gdat, strgmodl)
sizelarg = 10
sizesmll = 1
if strgstat == 'pdfn':
lablmome = getattr(gdat, 'labl' + strgmome)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
if strgmodl == 'fitt':
colrtemp = gmod.colrelem[indxpoplfrst]
if strgstat == 'pdfn':
labl = gdat.lablsampdist + ' ' + lablmome
if strgelemtdimtype == 'bind':
varb = getattr(gdat, strgmome + strgpdfn + strgtotl)
varbfrst = gdat.binspara.strgfrst
varbseco = getattr(gdat.binspara, strgseco)
if strgtotl.startswith('hist') or strgtotl.startswith('exr') or strgtotl.startswith('incr') or np.amax(varb) <= 0.:
normtdim = None
else:
normtdim = mpl.colors.LogNorm(0.5, vmax=np.amax(varb))
imag = axis.pcolor(varbfrst, varbseco, varb.T, cmap='Blues', label=labl, norm=normtdim)
make_cbar(gdat, axis, imag)
else:
if gdat.boolcondcatl:
varbfrst = np.zeros(gdat.numbprvlhigh)
varbseco = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
varbfrst[cntr] = gdat.dictglob['poststkscond'][r][strgfrst][indxpoplfrst]
varbseco[cntr] = gdat.dictglob['poststkscond'][r][strgseco][indxpoplfrst]
cntr += 1
axis.scatter(varbfrst, varbseco, alpha=gdat.alphelem, color=colrtemp, label=gdat.lablparagenrscalfull)
if strgstat == 'this' or strgstat == 'mlik':
if strgelemtdimtype == 'bind':
meanfrst = getattr(gdat.binspara, strgfrst)
meanseco = getattr(gdat.binspara, strgseco)
hist = getattr(gdatmodi, strgstat + strgtotl)
if strgtotl.startswith('hist') or strgtotl.startswith('exr') or strgtotl.startswith('incr') or np.amax(hist) <= 0.:
normtdim = None
else:
normtdim = mpl.colors.LogNorm(0.5, vmax=np.amax(hist))
imag = axis.pcolor(meanfrst, meanseco, hist.T, cmap='Blues', label=gdat.lablparagenrscalfull, alpha=gdat.alphhist, norm=normtdim)
else:
varbfrst = getattr(gdatmodi.this, strgfrst)[indxpoplfrst]
varbseco = getattr(gdatmodi.this, strgseco)[indxpoplfrst]
if len(varbfrst) == 0 or len(varbseco) == 0:
varbfrst = np.array([limtfrst[0] * 0.1])
varbseco = np.array([limtseco[0] * 0.1])
axis.scatter(varbfrst, varbseco, alpha=gdat.alphelem, color=colrtemp, label=gdat.lablparagenrscalfull)
# reference elements
if strgfrst[-4:] in gdat.listnamerefr:
strgfrsttemp = strgfrst[-4:]
else:
strgfrsttemp = strgfrst
if strgseco[-4:] in gdat.listnamerefr:
strgsecotemp = strgseco[-4:]
else:
strgsecotemp = strgseco
if hasattr(gdat.refr, strgfrsttemp) and hasattr(gdat.refr, strgsecotemp):
for q in gdat.indxrefr:
if strgfrsttemp in gdat.refr.namepara.elem[q] and strgsecotemp in gdat.refr.namepara.elem[q]:
refrvarbfrst = getattr(gdat.refr, strgfrsttemp)[q]
refrvarbseco = getattr(gdat.refr, strgsecotemp)[q]
if len(refrvarbfrst) == 0 or len(refrvarbseco) == 0:
refrvarbfrst = np.array([limtfrst[0] * 0.1])
refrvarbseco = np.array([limtseco[0] * 0.1])
axis.scatter(refrvarbfrst, refrvarbseco, alpha=gdat.alphelem, color=gdat.refr.colrelem[q], label=gdat.refr.lablelem[q], s=sizelarg)
plot_sigmcont(gdat, strgmodl, axis, strgfrst, indxpoplfrst, strgseco=strgseco)
scalfrst = getattr(gmod.scalpara, strgfrst)
scalseco = getattr(gmod.scalpara, strgseco)
if scalfrst == 'logt':
axis.set_xscale('log')
if scalseco == 'logt':
axis.set_yscale('log')
axis.set_xlabel(getattr(gmod.labltotlpara, strgfrst))
axis.set_ylabel(getattr(gmod.labltotlpara, strgseco))
axis.set_xlim(getattr(gmod.limtpara, strgfrst))
axis.set_ylim(getattr(gmod.limtpara, strgseco))
make_legd(axis)
plt.tight_layout()
if strgstat == 'pdfn':
strgmometemp = strgmome
else:
strgmometemp = ''
nameinte = strgelemtdimvarb + 'tdim/'
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, '%s%s' % (strgmometemp, strgtotl), nameinte=nameinte)
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_sigmcont(gdat, strgmodl, axis, strgfrst, indxpoplfrst, strgseco=None):
if strgfrst == 'deltllik' or strgseco == 'deltllik':
for pval in gdat.pvalcont:
if strgfrst == 'deltllik':
deltlliksigm = scipy.stats.chi2.ppf(1. - pval, gmod.numbparagenrelemsing[indxpoplfrst])
axis.axvline(deltlliksigm, ls='--', color='black', alpha=0.2)
if strgseco == 'deltllik':
deltlliksigm = scipy.stats.chi2.ppf(1. - pval, gmod.numbparagenrelemsing[indxpoplfrst])
axis.axhline(deltlliksigm, ls='--', color='black', alpha=0.2)
def plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgydat, strgxdat, typehist='hist', \
indxrefrplot=None, indxydat=None, strgindxydat=None, indxxdat=None, strgindxxdat=None, plottype='none', \
meanxdat=None, \
scal=None, scalxdat=None, scalydat=None, limtxdat=None, limtydat=None, omittrue=False, nameinte='', \
lablxdat='', lablydat='', histodim=False, offslegd=None, booltdim=False, ydattype='totl', boolhistprio=True):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
if strgydat[-8:-5] == 'pop':
boolelem = True
else:
boolelem = False
if scal is None:
if scalxdat is None:
scalxdat = 'linr'
if scalydat is None:
scalydat = 'linr'
else:
scalxdat = scal
scalydat = scal
if histodim:
figrsize = (gdat.plotsize, 0.8 * gdat.plotsize)
else:
figrsize = (gdat.plotsize, gdat.plotsize)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
if booltdim:
xdat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgxdat, strgpdfn)
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn)
else:
xdat = getattr(gdat.meanpara, strgxdat[4:])
if typehist == 'histcorrreca':
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], strgpdfn)
else:
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn)
if indxxdat is not None:
xdat = xdat[indxxdat]
if indxydat is not None:
ydat = ydat[indxydat]
xerr = np.zeros((2, xdat.size))
if booltdim:
axis.scatter(xdat, ydat, alpha=gdat.alphelem, color=colr, label=gdat.lablparagenrscalfull)
else:
if histodim:
# temp
if strgxdat[4:] in gmod.namepara.elem:
deltxdat = getattr(gdat.deltpara, strgxdat[4:])
binsxdat = getattr(gdat.binspara, strgxdat[4:])
else:
deltxdat = getattr(gdat.deltpara, strgxdat[4:])
binsxdat = getattr(gdat.binspara, strgxdat[4:])
xdattemp = binsxdat[:-1] + deltxdat / 2.
if strgmodl == 'fitt':
if boolelem:
if strgydat.startswith('cmpl'):
labl = gmod.lablelem[int(strgydat[-5])]
colr = gmod.colrelem[int(strgydat[-5])]
else:
labl = gmod.lablelem[int(strgydat[-1])]
colr = gmod.colrelem[int(strgydat[-1])]
else:
labl = gmod.labl
colr = gmod.colr
if strgstat == 'pdfn':
if typehist == 'histcorrreca':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], strgpdfn, strgmome='errr')
else:
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn, strgmome='errr')
if indxydat is not None:
yerr = yerr[[slice(None)] + indxydat]
# label
if strgydat.startswith('hist'):
## element distribution
labl = gdat.lablsampdist
else:
## other
labl = gdat.lablsampdist
# draw points
indxerrr = np.where((yerr[0, :] > 0.) | (yerr[1, :] > 0.))[0]
if indxerrr.size > 0:
labltemp = None
else:
labltemp = labl
temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, xerr=xerr, label=labl, \
marker='o', ls='', markersize=5, color=colr, lw=1, capsize=5)
# draw error-bar caps
if indxerrr.size > 0:
temp, listcaps, temp = axis.errorbar(xdat[indxerrr], ydat[indxerrr], yerr=yerr[:, indxerrr], xerr=xerr[:, indxerrr], \
marker='o', ls='', markersize=5, color=colr, lw=1, capsize=5)
for caps in listcaps:
caps.set_markeredgewidth(1)
elif strgstat == 'this' or strgstat == 'mlik':
if strgstat == 'this':
labl = gdat.lablsamp
else:
labl = gdat.lablmlik
if histodim:
axis.bar(xdattemp, ydat, deltxdat, label=gdat.lablparagenrscalfull, alpha=0.5, linewidth=1, edgecolor=colr)
else:
if plottype == 'errr':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn, strgmome='errr')
if indxydat is not None:
yerr = yerr[[slice(None)] + indxydat]
temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, xerr=xerr, \
marker='o', ls='', markersize=5, label=labl, lw=1, capsize=5, color=colr)
for caps in listcaps:
caps.set_markeredgewidth(1)
else:
axis.plot(xdat, ydat, label=gdat.lablparagenrscalfull, alpha=0.5, color=colr)
# reference histogram
if not omittrue:
for q in gdat.indxrefr:
if boolelem:
if strgydat[-12:-8] in gdat.listnamerefr:
name = 'refr' + strgydat[:-12] + 'pop%d' % q + strgydat[-4:]
else:
name = 'refr' + strgydat[:-8] + 'pop%d' % q + strgydat[-4:]
else:
name = 'refr' + strgydat
if not hasattr(gdat, name):
continue
ydattemp = getattr(gdat, name)
ydat = ydattemp
if indxydat is not None:
ydat = ydat[indxydat]
if strgydat[-8:-5] == 'pop':
labl = gdat.refr.lablelem[q]
colr = gdat.refr.colrelem[q]
else:
labl = gdat.refr.labl
colr = gdat.refr.colr
if histodim:
axis.bar(xdattemp, ydat, deltxdat, color=colr, label=labl, alpha=gdat.alphhist, linewidth=1, edgecolor=colr)
else:
axis.plot(xdat, ydat, color=colr, label=labl, alpha=gdat.alphline)
try:
if histodim:
if typehist == 'histcorrreca':
reca = getattr(gdat.true, 'reca' + strgydat[4:])
axis.plot(xdattemp, 10. * reca, color='purple', label='PTFN', alpha=gdat.alphline)
except:
pass
if not boolelem:
break
# external reference histogram
if histodim and strgydat == 'histfluxpop0':
try:
if gdat.listprefhistfluxlabl is not None:
for k in range(len(gdat.listprefhistfluxlabl)):
if gdat.listprefhistfluxtype[k] == 'shad':
axis.plot(gdat.listprefhistfluxflux[k][0], gdat.listprefhistfluxhist[k][0], color='m', label=gdat.listprefhistfluxlabl[k])
enerpoly = np.empty(gdat.listprefhistfluxflux[k][1].size + gdat.listprefhistfluxflux[k][2].size)
enerpoly[:gdat.listprefhistfluxflux[k][1].size] = gdat.listprefhistfluxflux[k][1]
enerpoly[gdat.listprefhistfluxflux[k][1].size:] = gdat.listprefhistfluxflux[k][2][::-1]
sbrtpoly = np.empty(gdat.listprefhistfluxflux[k][1].size + gdat.listprefhistfluxflux[k][2].size)
sbrtpoly[:gdat.listprefhistfluxflux[k][1].size] = gdat.listprefhistfluxhist[k][1]
sbrtpoly[gdat.listprefhistfluxflux[k][1].size:] = gdat.listprefhistfluxhist[k][2][::-1]
axis.fill(enerpoly, sbrtpoly, color='m', alpha=0.5)
else:
axis.errorbar(gdat.listprefhistfluxflux[k], gdat.listprefhistfluxhist[k], label=gdat.listprefhistfluxlabl[k], color='m')
except:
pass
if strgydat.startswith('histcntp'):
ydattemp = getattr(gmodstat, strgydat)
axis.bar(xdattemp, ydattemp, deltxdat, color='black', label='Data', alpha=gdat.alphhist, linewidth=1, edgecolor='black')
# axis scales
if scalxdat == 'logt':
axis.set_xscale('log')
if scalydat == 'logt':
if np.where(ydat > 0.)[0].size > 0:
axis.set_yscale('log')
# axis labels
axis.set_xlabel(lablxdat)
axis.set_ylabel(lablydat)
# superimpose prior on the feature
ptch = None
line = None
if strgydat.startswith('hist') and strgydat != 'histdefl' and strgydat != 'histdeflelem' and boolhistprio:
if strgydat[-8:-5] == 'pop':
strgtemp = strgydat[4:-8]
if strgtemp in gmod.namepara.genrelem[int(strgydat[-5])]:
xdatprio = getattr(gmod, strgxdat + 'prio')
if gdat.typedata == 'mock' and not omittrue:
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
if strgtemp in gmod.namepara.genrelem[q]:
truexdatprio = getattr(gdat.true, strgxdat + 'prio')
trueydatsupr = getattr(gdat.true, strgydat + 'prio')
trueydatsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'true', strgydat + 'prio', strgpdfn)
axis.plot(truexdatprio, trueydatsupr, ls='-', alpha=gdat.alphline, color=gdat.refr.colrelem[q])
if strgmodl != 'true':
ydatsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'fitt', strgydat + 'prio', strgpdfn)
if strgstat == 'pdfn':
yerrsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'fitt', strgydat + 'prio', strgpdfn, strgmome='errr')
labl = gdat.lablsampdist + ' hyper-distribution'
ptch, line = tdpy.plot_braz(axis, xdatprio, ydatsupr, yerr=yerrsupr, lcol='lightgrey', dcol='grey', labltotl=labltotl)
else:
axis.plot(xdatprio, ydatsupr, ls='--', alpha=gdat.alphline, color=gmod.colrelem[int(strgydat[-5])])
for name, valu in gdat.refr.__dict__.items():
if name[8:12] == 'hist' and name[12:16] == strgydat[4:] and name[16:19] == 'pop' and int(name[-1]) == indxpopltemp:
colr = getattr(gdat, name + 'colr')
linestyl = getattr(gdat, name + 'linestyl')
axis.plot(valu[0, :], valu[1, :], ls=linestyl, color=colr)
if strgydat.startswith('hist') and strgydat[4:-8] == 'deltllik':
plot_sigmcont(gdat, strgmodl, axis, strgxdat[4:], int(strgydat[-1]))
if indxydat is not None:
strgydat += strgindxydat
if indxxdat is not None:
strgxdat += strgindxxdat
if limtxdat is not None:
axis.set_xlim(limtxdat)
else:
axis.set_xlim([np.amin(xdat), np.amax(xdat)])
if limtydat is not None:
axis.set_ylim([limtydat[0], limtydat[1]])
else:
axis.set_ylim([np.amin(ydat), np.amax(ydat)])
if ydattype != 'totl':
strgydat += ydattype
try:
make_legd(axis, offs=offslegd, ptch=ptch, line=line)
except:
print('Legend failed when')
print('strgstat')
print(strgstat)
print('strgmodl')
print(strgmodl)
print('strgydat')
print(strgydat)
raise Exception('')
plt.tight_layout()
if typehist == 'histcorrreca':
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], nameinte=nameinte)
else:
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgydat, nameinte=nameinte)
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat, plotdiff=False):
if plotdiff:
figrsize = (gdat.plotsize, 0.7 * gdat.plotsize)
else:
figrsize = (gdat.plotsize, gdat.plotsize)
figr, axis = plt.subplots(1, 1, figsize=figrsize)
# prepare data to be plotted
xdat = np.copy(getattr(gdat.refr, strgfeat)[q][0, :])
xerr = tdpy.retr_errrvarb(getattr(gdat.refr, strgfeat)[q])
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgfeat + 'asscpop%dpop%d' % (q, l), strgpdfn)
yerr = np.zeros((2, ydat.size))
if strgstat == 'pdfn':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgfeat + 'asscpop%dpop%d' % (q, l), strgpdfn, strgmome='errr')
if plotdiff:
ydat = 100. * (ydat - xdat) / xdat
# handle the case when there is a single reference element
if yerr.ndim == 1:
ydat = np.array([ydat])
yerr = yerr[:, None]
# plot all associations
if plotdiff:
indx = np.where(ydat > -100.)[0]
else:
indx = np.where(ydat > 0.)[0]
if indx.size > 0:
axis.errorbar(xdat[indx], ydat[indx], ls='', yerr=yerr[:, indx], xerr=xerr[:, indx], lw=1, marker='o', markersize=5, color='black')
# temp -- plot associations inside the comparison area
if plotdiff:
axis.axhline(0., ls='--', alpha=gdat.alphline, color='black')
else:
axis.plot(binsplot, binsplot, ls='--', alpha=gdat.alphline, color='black')
lablxdat = getattr(gmod.lablpara, strgfeat + 'refr')
lablydat = getattr(gmod.lablpara, strgfeat + 'paragenrscalfull')
axis.set_xlabel(lablxdat)
axis.set_ylabel(lablydat)
boollogtxaxi = False
boollogtyaxi = False
if indx.size > 0 and scal == 'logt':
if not plotdiff:
axis.set_yscale('log')
boollogtyaxi = True
axis.set_xscale('log')
boollogtaxis = True
if plotdiff:
limtydat = np.array([-100., 100.])
else:
limtydat = np.array([minmplot, maxmplot])
limtxdat = [minmplot, maxmplot]
# overplot text
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
if boollogtxaxi:
sizexoff = 0.01 * xdat[indx[k]]
else:
sizexoff = 0.01 * (limtxdat[1] - limtxdat[0])
if boollogtyaxi:
sizeyoff = 0.01 * ydat[indx[k]]
else:
sizeyoff = 0.01 * (limtydat[1] - limtydat[0])
axis.text(xdat[indx[k]] + sizexoff, ydat[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
axis.set_ylim(limtydat)
axis.set_xlim(limtxdat)
plt.tight_layout()
if plotdiff:
strgtype = 'diff'
else:
strgtype = ''
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'scatassc' + strgfeat + '%spop%dpop%d' % (strgtype, q, l), nameinte='assc')
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, indxevttplot, indxenerplot=None):
gmod = getattr(gdat, strgmodl)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'cntpmodl', strgpdfn)
if indxenerplot is None:
xdat = gdat.cntpdata[:, :, indxevttplot].flatten()
ydat = ydat[:, :, indxevttplot].flatten()
nameplot = 'scatcntpevt%d' % (indxevttplot)
if strgstat == 'pdfn':
indxvarb = [slice(None), slice(None), indxevttplot]
else:
xdat = gdat.cntpdata[indxenerplot, :, indxevttplot]
ydat = ydat[indxenerplot, :, indxevttplot]
nameplot = 'scatcntpen%02devt%d' % (indxenerplot, indxevttplot)
if strgstat == 'pdfn':
indxvarb = [indxenerplot, slice(None), indxevttplot]
if strgstat == 'pdfn':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'cntpmodl', strgpdfn, strgmome='errr', indxvarb=indxvarb)
colr = gmod.colr
if strgstat == 'pdfn':
axis.errorbar(xdat, ydat, yerr=yerr, marker='o', ls='', markersize=5, color=gmod.colr, capsize=5)
else:
axis.plot(xdat, ydat, marker='o', ls='', markersize=5, color=gmod.colr)
gdat.limtcntpdata = [gdat.binspara.cntpdata[0], gdat.binspara.cntpdata[-1]]
axis.set_xlim(gdat.limtcntpdata)
axis.set_ylim(gdat.limtcntpdata)
axis.set_ylabel('$k^{modl}$')
axis.set_xlabel('$k^{data}$')
axis.set_xscale('log')
axis.set_yscale('log')
plt.tight_layout()
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_indxprox(gdat):
numbbins = 40
numbfluxprox = len(gdat.indxpixlprox)
bins = np.empty((gdat.numbprox, numbbins + 1))
indxpixlproxsize = np.empty((numbfluxprox, gdat.numbpixlfull))
for h in gdat.indxprox:
for j in gdat.indxpixlfull:
try:
indxpixlproxsize[h, j] = gdat.indxpixlprox[h][j].size
except:
indxpixlproxsize[h, j] = gdat.numbpixlfull
bins[h, :] = np.logspace(np.log10(np.amin(indxpixlproxsize[h, :])), np.log10(np.amax(indxpixlproxsize[h, :])), numbbins + 1)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
for h in gdat.indxprox:
axis.hist(indxpixlproxsize[h, :], bins=bins[h, :], log=True, label='Flux bin %d' % h, alpha=gdat.alphhist)
axis.set_xscale('log')
axis.axvline(gdat.numbpixlfull, label='ROI', ls='--')
axis.set_xlabel('Number of pixels')
axis.set_ylabel("Number of tables")
make_legd(axis)
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'init/indxprox.pdf')
plt.close()
def plot_psfn_type():
devi = np.linspace(0., 5., 100)
y = np.zeros((x.size, 5))
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
singgaus = retr_singgaus(devi, 0.25)
axis.plot(devi, singgaus, label='Single Gaussian')
singking = retr_singking(devi, 0.25, 10.)
axis.plot(devi, singking, label='Single King')
doubgaus = retr_doubgaus(devi, 0.1, 0.25, 1.)
axis.plot(devi, doubgaus, label='Double Gaussian')
gausking = retr_gausking(devi, 0.1, 0.25, 1., 10.)
axis.plot(devi, gausking, label='Gaussian + King')
doubking = retr_doubking(devi, 0.1, 0.25, 10., 1., 5.)
axis.plot(devi, doubking, label='Double King')
make_legd(axis)
axis.set_xscale('log')
axis.set_yscale('log')
axis.set_ylim([1e-3, None])
def plot_evidtest():
minmgain = -1.
maxmgain = 5.
minmdevi = 0.
maxmdevi = 5.
gain = np.linspace(minmgain, maxmgain, 100)
devi = np.linspace(minmdevi, maxmdevi, 100)
evid = np.log(np.sqrt(1. + np.exp(2. * gain[None, :])) * np.exp(-devi[:, None]**2 / 2. / (1. + 1. / np.exp(2. * gain[None, :]))))
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
figr.suptitle('Log-Bayesian Evidence For Lower-Dimension Model', fontsize=18)
imag = axis.imshow(evid, extent=[minmgain, maxmgain, minmdevi, maxmdevi], cmap='winter', origin='lower')
cset1 = plt.contourf(gain, devi, evid, cmap='winter')
axis.set_xlabel('Information gain')
axis.set_ylabel('Goodness of fit')
plt.colorbar(imag, ax=axis, fraction=0.03)
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'evidtest.pdf')
plt.close(figr)
def plot_histlgalbgalelemstkd(gdat, strgpdfn, indxpoplplot, strgbins, strgfeat=None):
if strgfeat is not None:
numbparaplot = gdat.numbbinsplot
else:
numbparaplot = 1
if strgbins == 'cumu':
numbrows = 1
numbcols = 1
else:
numbcols = 2
if strgbins == 'full':
numbrows = numbparaplot / 2
else:
numbrows = 2
histlgalbgalelemstkd = getattr(gdat, strgpdfn + 'histlgalbgalelemstkd')
figr, axgr = plt.subplots(numbrows, numbcols, figsize=(numbcols * gdat.plotsize, numbrows * gdat.plotsize), sharex='all', sharey='all')
if numbrows == 1:
axgr = [axgr]
for a, axrw in enumerate(axgr):
if numbcols == 1:
axrw = [axrw]
for b, axis in enumerate(axrw):
if strgfeat is not None:
h = a * 2 + b
if strgbins == 'full':
indxlowr = h
indxuppr = h + 1
elif strgbins == 'cumu':
indxlowr = 0
indxuppr = numbparaplot
else:
if h < 3:
indxlowr = 2 * h
indxuppr = 2 * (h + 1)
else:
indxlowr = 2 * h
indxuppr = numbparaplot
temp = np.sum(histlgalbgalelemstkd[indxpoplplot][:, :, indxlowr:indxuppr], 2).T
else:
temp = np.sum(np.sum(histlgalbgalelemstkd[indxpoplplot], 2), 2).T
if np.where(temp > 0.)[0].size > 0:
imag = axis.imshow(temp, interpolation='nearest', origin='lower', cmap='BuPu', \
extent=gdat.exttrofi, norm=mpl.colors.LogNorm(vmin=0.5, vmax=None))
else:
imag = axis.imshow(temp, interpolation='nearest', origin='lower', cmap='BuPu', extent=gdat.exttrofi)
if strgfeat is not None:
bins = getattr(gdat.binspara, strgfeat)
# superimpose reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
# temp -- backcomp
reframpl = getattr(gdat.refr, gdat.refr.nameparagenrelemampl[q])
if strgfeat in gdat.refr.namepara.elem[q]:
refrfeat = getattr(gdat.refr, strgfeat)[q]
if len(refrfeat) > 0:
indxelem = np.where((bins[indxlowr] < refrfeat[0, :]) & (refrfeat[0, :] < bins[indxuppr]))[0]
else:
indxelem = np.array([])
else:
indxelem = np.arange(gdat.refr.numbelem[q])
# temp -- backcomp
try:
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl[q][0, indxelem], gdat.refr.nameparagenrelemampl[q])
except:
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl[q][0, indxelem], gdat.refr.nameparagenrelemampl[q])
if indxelem.size > 0:
axis.scatter(gdat.anglfact * gdat.refr.dictelem[q]['lgal'][0, indxelem], gdat.anglfact * gdat.refr.dictelem[q]['bgal'][0, indxelem], \
s=mrkrsize, alpha=gdat.alphelem, marker=gdat.refrlistmrkrhits[q], lw=2, color=gdat.refr.colrelem[q])
if a == numbrows - 1:
axis.set_xlabel(gdat.labllgaltotl)
else:
axis.set_xticklabels([])
if b == 0:
axis.set_ylabel(gdat.lablbgaltotl)
else:
axis.set_yticklabels([])
draw_frambndr(gdat, axis)
if strgbins != 'cumu':
titl = tdpy.mexp(bins[indxlowr]) + ' < $%s$ < ' % lablfeat + tdpy.mexp(bins[indxuppr])
axis.set_title(titl)
if strgfeat is not None:
lablfeattotl = getattr(gmod.lablpara, strgfeat + 'totl')
plt.figtext(0.5, 0.95, '%s' % lablfeattotl, ha='center', va='center')
axiscomm = figr.add_axes([0.87, 0.2, 0.02, 0.6])
cbar = figr.colorbar(imag, cax=axiscomm)
plt.subplots_adjust()
#plt.subplots_adjust(left=0.18, top=.9, right=0.82, bottom=0.15, hspace=0.08, wspace=0.08)
if strgbins == 'cumu':
strgtemp = ''
else:
strgtemp = strgfeat
path = getattr(gdat, 'path' + strgpdfn + 'finl') + 'histlgalbgalelemstkd%s%spop%d' % (strgbins, strgtemp, indxpoplplot) + '.pdf'
figr.savefig(path)
plt.close(figr)
def plot_king(gdat):
angl = rad2deg(gdat.binspara.angl)
figr, axgr = plt.subplots(1, 2, figsize=(2 * gdat.plotsize, gdat.plotsize))
figr.suptitle('King Function', fontsize=20)
for k, axis in enumerate(axgr):
if k == 0:
sigmlist = [0.25]
gammlist = [1.01, 2.5, 10.]
else:
sigmlist = [0.1, 0.25, 1.]
gammlist = [2.]
for sigm in sigmlist:
for gamm in gammlist:
axis.plot(angl, retr_singking(angl, sigm, gamm), label=r'$\sigma = %.4g, \gamma = %.3g$' % (sigm, gamm))
make_legd(axis)
axis.set_yscale('log')
axis.set_xlabel(gdat.labltotlpara.gang)
axis.set_xlabel(r'$\mathcal{K}$')
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'king.pdf')
plt.close(figr)
def plot_intr(gdat):
if gdat.typeverb > 0:
print('Making PCAT introductory plots...')
#plot_grap(plottype='meta', typeverb=1)
plot_grap(plottype='lght0000', typeverb=1)
#plot_grap(plottype='lght0001', typeverb=1)
#plot_grap(plottype='lght0002', typeverb=1)
#plot_grap(plottype='lght0003', typeverb=1)
#plot_grap(plottype='lens0000', typeverb=1)
plot_grap(plottype='lens0001', typeverb=1)
with plt.xkcd():
from matplotlib import patheffects
mpl.rcParams['path.effects'] = [patheffects.withStroke(linewidth=0)]
figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
catl = np.arange(80)
probcatl = pss.pmf(catl, 30.) + 0.5 * pss.pmf(catl, 60.)
axis.plot(catl, probcatl)
axis.set_xticks([10, 30, 60])
axis.set_xticklabels(["Crackpot's Catalog", "Best-fit catalog", "Not-so-best-fit catalog"])
axis.set_yticks([])
text = axis.set_title("Exploring the catalog space with Probabilistic cataloging")
text.set_position([.5, 1.05])
axis.set_xlabel('Catalog index')
axis.set_ylabel("Probability")
axis.tick_params(axis='x', colors='#B6E954')
axis.tick_params(axis='y', colors='#B6E954')
axis.spines['bottom'].set_color('#B6E954')
axis.spines['top'].set_color('#B6E954')
axis.spines['right'].set_color('#B6E954')
axis.spines['left'].set_color('#B6E954')
axis.yaxis.label.set_color('#B6E954')
axis.xaxis.label.set_color('#B6E954')
axis.title.set_color('#B6E954')
axis.set_axis_bgcolor('black')
figr.set_facecolor('black')
plt.tight_layout()
figr.savefig(gdat.pathimag + 'talkintr.pdf', facecolor=figr.get_facecolor())
plt.close()
def plot_psfn(gdat, gdatmodi, strgstat, strgmodl):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
for i in gdat.indxener:
for m in gdat.indxevtt:
for k in range(gdat.numbprox + 1):
if k == 0 or k == gdat.numbprox:
alph = 1.
colr = 'b'
if k == 0:
labl = 'Dimmest PS'
else:
labl = 'Brightest PS'
else:
alph = 0.2
labl = None
colr = 'black'
axis.plot(gdat.binspara.angl * gdat.anglfact, gdat.binspara.prox[k] * gmodstat.psfn[i, :, m], label=labl, color=colr, alpha=alph)
axis.set_xlim([np.amin(gdat.binspara.angl) * gdat.anglfact, np.amax(gdat.binspara.angl) * gdat.anglfact])
if k > 0:
axis.axvline(gdat.anglfact * gdat.maxmangleval[k-1], ls='--', alpha=alph, color=colr)
axis.set_yscale('log')
axis.set_xlabel(gdat.labltotlpara.gang)
axis.set_ylabel(gdat.lablsbrttotl)
limt = gdat.specfraceval * np.amax(gdat.binspara.prox[0] * gmodstat.psfn[i, :, m])
if limt != 0.:
axis.axhline(limt, color='red', ls=':', label='Flux floor')
make_legd(axis)
plt.tight_layout()
name = 'psfn'
if gdat.numbener > 1:
name += 'en%02d' % i
if gdat.numbevtt > 1:
name += 'evt%d' % m
figr.savefig(gdat.pathinit + name + '.pdf')
plt.close(figr)
def plot_mosa(gdat, strgpdfn):
# empty global object
gdatmodi = tdpy.gdatstrt()
listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdat, 'list' + strgpdfn + 'paragenrunitfull')
numbrows = 3
numbcols = 2
numbsampmosa = numbrows * numbcols
if numbsampmosa <= gdat.numbsamptotl:
indxsampmosa = np.random.choice(gdat.indxsamptotl, size=numbsampmosa, replace=False)
for l in gmod.indxpopl:
for i in gdat.indxener:
for m in gdat.indxevttplot:
figr, axgr = plt.subplots(numbrows, numbcols, figsize=(numbcols * gdat.plotsize, numbrows * gdat.plotsize))
for a, axrw in enumerate(axgr):
for b, axis in enumerate(axrw):
n = indxsampmosa[numbcols*a+b]
gdatmodi.this.paragenrscalfull = listparagenrscalfull[n, :].flatten()
gdatmodi.this.paragenrunitfull = listparagenrunitfull[n, :].flatten()
if gmod.numbparaelem > 0:
gdatmodi.this.indxelemfull = getattr(gdat, 'list' + strgpdfn + 'indxelemfull')[n]
proc_samp(gdat, gdatmodi, 'this', 'fitt')
if a == numbrows - 1:
axis.set_xlabel(gdat.labllgaltotl)
else:
axis.set_xticklabels([])
if b == 0:
axis.set_ylabel(gdat.lablbgaltotl)
else:
axis.set_yticklabels([])
imag = retr_imag(gdat, axis, gdat.cntpdata, '', 'fitt', 'cntpdata', i, m)
supr_fram(gdat, gdatmodi, 'this', 'fitt', axis, l)
if gdat.boolbinsener:
plt.figtext(0.5, 0.93, gdat.strgener[i], ha='center', va='center')
axiscomm = figr.add_axes([0.92, 0.1, 0.02, 0.8])
cbar = figr.colorbar(imag, cax=axiscomm)
cbar.set_ticks(gdat.valutickmajrpara.cntpdata)
cbar.set_ticklabels(gdat.labltickmajrpara.cntpdata)
plt.subplots_adjust()
#plt.subplots_adjust(left=0.1, top=.91, hspace=0.03, wspace=0.1, bottom=0.09)
if l == 1:
strg = ''
else:
strg = 'pop%d' % l
pathfinl = getattr(gdat, 'path' + strgpdfn + 'finl')
if m is None:
path = pathfinl + 'mosa' + strg + 'en%02dA.pdf' % (gdat.indxenerincl[i])
else:
path = pathfinl + 'mosa' + strg + 'en%02devtt%d.pdf' % (gdat.indxenerincl[i], gdat.indxevttincl[m])
figr.savefig(path)
plt.close(figr)
else:
if gdat.typeverb > 0:
print('Skipping the mosaic plot...')
def plot_grap(plottype, typeverb=0):
import networkx as nx
figr, axis = plt.subplots(figsize=(6, 6))
grap = nx.DiGraph()
if plottype == 'meta':
listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'magenta']
if plottype == 'lens0001':
listcolr = ['olive', 'olive', 'black', 'magenta', 'magenta', 'magenta', 'magenta', 'magenta', 'olive', 'olive', 'olive', 'olive', 'olive', \
r'black', 'olive', 'black']
if plottype == 'lght0000':
listcolr = [r'olive', r'black', r'magenta', r'magenta', 'magenta', r'magenta', r'olive', r'olive', r'black', r'olive', r'olive', r'black', r'olive']
if plottype == 'lght0001':
listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'olive', 'olive', 'magenta', 'magenta', 'magenta', 'magenta', 'black']
if plottype == 'lght0002':
listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'olive', 'olive', 'olive', 'magenta', \
'magenta', 'magenta', 'magenta', 'magenta', 'black']
if plottype == 'lght0003':
listcolr = ['black', 'black', 'black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', \
'olive', 'olive', 'magenta', 'magenta', 'magenta', 'magenta']
if plottype == 'lens0000':
listcolr = ['olive', 'black', 'black', 'olive', 'olive', 'olive', 'olive', 'black', 'olive', 'magenta', 'magenta', 'magenta']
if plottype.startswith('meta'):
grap.add_edges_from([ \
('meanelem', 'numbelem'), \
('modl','data'), \
('psfp', 'modl'), \
('feat','modl'), \
('numbelem','feat'), \
('amplslop', 'ampl'), \
])
if plottype.startswith('lght') or plottype.startswith('lens'):
grap.add_edges_from([ \
('meanelem', 'numbelem'), \
('modl','data'), \
('psfp', 'modl'), \
('bacp', 'modl'), \
('lgal','modl'), \
('bgal','modl'), \
('numbelem','lgal'), \
('numbelem','bgal'), \
])
if plottype.startswith('lght'):
grap.add_edges_from([ \
('amplslop', 'ampl'), \
('ampl', 'modl'), \
('numbelem','ampl'), \
('numbelem', 'sind'), \
('sind','modl'), \
])
if plottype.startswith('lens'):
grap.add_edges_from([ \
('lenp', 'modl'), \
('defsslop', 'defs'), \
('defs', 'modl'), \
('numbelem','defs'), \
])
if plottype == 'lens0001':
grap.add_edges_from([ \
('asca', 'modl'), \
('numbelem','asca'), \
('acut', 'modl'), \
('numbelem','acut'), \
])
if plottype == 'lght0001' or plottype == 'lght0002':
grap.add_edges_from([ \
('sinddistmean', 'sind'), \
])
if plottype == 'lght0002':
grap.add_edges_from([ \
('numbelem', 'expc'), \
('expc', 'modl'), \
])
if plottype == 'lght0003':
grap.add_edges_from([ \
('spatdistcons', 'lgal'), \
('spatdistcons', 'bgal'), \
])
labl = {}
if plottype.startswith('lens'):
nameelem = r'\rm{sub}'
else:
nameelem = r'\rm{pts}'
if plottype.startswith('lght') and (plottype == 'lght0001' or plottype == 'lght0002'):
labl['numbelem'] = r'$\vec{N}_{%s}$' % nameelem
labl['meanelem'] = r'$\vec{\mu}_{%s}$' % nameelem
else:
labl['numbelem'] = '$N_{%s}$' % nameelem
labl['meanelem'] = r'$\mu_{%s}$' % nameelem
if plottype.startswith('lght'):
if plottype == 'lght0000' or plottype == 'lght0003':
labl['amplslop'] = r'$\alpha$'
else:
labl['amplslop'] = r'$\vec{\alpha}$'
if plottype.startswith('lens'):
labl['defsslop'] = r'$\beta$'
if plottype == 'lght0001' or plottype == 'lght0002':
labl['sinddistmean'] = r'$\vec{\beta}$'
if plottype == 'lght0003':
labl['spatdistcons'] = r'$\gamma$'
if plottype.startswith('lens'):
labl['lenp'] = r'$\vec{\chi}$'
labl['psfp'] = r'$\vec{\eta}$'
labl['bacp'] = r'$\vec{A}$'
labl['lgal'] = r'$\vec{\theta_1}$'
labl['bgal'] = r'$\vec{\theta_2}$'
if plottype.startswith('meta'):
labl['feat'] = r'$\vec{\xi}$'
else:
if plottype.startswith('lght'):
labl['sind'] = r'$\vec{s}$'
labl['ampl'] = r'$\vec{f}$'
else:
labl['defs'] = r'$\vec{\alpha_{\rm{s}}}$'
if plottype == 'lens0001':
labl['asca'] = r'$\vec{\theta_{\rm{s}}}$'
labl['acut'] = r'$\vec{\theta_{\rm{c}}}$'
if plottype == 'lght0002':
labl['expc'] = r'$\vec{E_{\rm{c}}}$'
labl['modl'] = r'$M_D$'
labl['data'] = r'$D$'
posi = nx.circular_layout(grap)
posi['sinddistmean'] = np.array([0.4, 0.15])
if plottype == 'lght0003':
posi['spatdistcons'] = np.array([-0.2, 0.15])
if plottype.startswith('lght'):
posi['numbelem'] = np.array([0., 0.075])
posi['meanelem'] = np.array([0., 0.15])
posi['amplslop'] = np.array([0.2, 0.15])
if plottype.startswith('lens'):
posi['numbelem'] = np.array([-0.1, 0.075])
posi['meanelem'] = np.array([-0.1, 0.15])
posi['defsslop'] = np.array([0.1, 0.15])
if plottype.startswith('lght'):
if plottype == 'lght0002':
posi['psfp'] = np.array([0.7, -0.0])
posi['bacp'] = np.array([0.9, -0.0])
else:
posi['psfp'] = np.array([0.5, -0.0])
posi['bacp'] = np.array([0.7, -0.0])
if plottype == 'lens0000':
posi['psfp'] = np.array([0.3, -0.0])
posi['bacp'] = np.array([0.5, -0.0])
posi['lenp'] = np.array([0.7, -0.0])
if plottype == 'lens0001':
posi['psfp'] = np.array([0.7, -0.0])
posi['bacp'] = np.array([0.9, -0.0])
posi['lenp'] = np.array([1.1, -0.0])
posi['lgal'] = np.array([-0.3, -0.0])
posi['bgal'] = np.array([-0.1, -0.0])
if plottype.startswith('lght'):
posi['ampl'] = np.array([0.1, -0.0])
posi['sind'] = np.array([0.3, -0.0])
if plottype == 'lght0002':
posi['expc'] = np.array([0.5, -0.0])
if plottype.startswith('lens'):
posi['defs'] = np.array([0.1, -0.0])
if plottype == 'lens0001':
posi['asca'] = np.array([0.3, -0.0])
posi['acut'] = np.array([0.5, -0.0])
posi['modl'] = np.array([0., -0.075])
posi['data'] = np.array([0., -0.15])
if typeverb > 0:
numb = max(len(grap.edges()), len(listcolr))
for k in range(numb):
try:
print('%15s %15s %15s' % (grap.edges()[k][0], grap.edges()[k][1], listcolr[k]))
except:
print('unequal')
size = 1000
nx.draw(grap, posi, labels=labl, ax=axis, edgelist=[], nodelist=[])
nx.draw_networkx_edges(grap, posi, ax=axis, labels=labl, edge_color=listcolr)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['modl', 'data'], node_color='grey', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['numbelem'], node_color='b', node_size=size)
if plottype.startswith('lght'):
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['meanelem', 'amplslop'], node_color='r', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'ampl', 'sind'], node_color='g', node_size=size)
if plottype == 'lght0001' or plottype == 'lght0002':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['sinddistmean'], node_color='r', node_size=size)
if plottype == 'lght0002':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['expc'], node_color='g', node_size=size)
if plottype == 'lght0003':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['spatdistcons'], node_color='r', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['psfp', 'bacp'], node_color='y', node_size=size)
if plottype.startswith('lens'):
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['meanelem', 'defsslop'], node_color='r', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lenp'], node_color='y', node_size=size)
if plottype == 'lens0000':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'defs'], node_color='g', node_size=size)
if plottype == 'lens0001':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'defs', 'asca', 'acut'], node_color='g', node_size=size)
pathplot = pathpcat + '/imag/'
plt.tight_layout()
figr.savefig(pathplot + 'grap%s.pdf' % plottype)
plt.close(figr)
def plot_3fgl_thrs(gdat):
path = pathpcat + '/detthresh_P7v15source_4years_PL22.fits'
fluxthrs = astropy.io.fits.getdata(path, 0)
bgalfgl3 = np.linspace(-90., 90., 481)
lgalfgl3 = np.linspace(-180., 180., 960)
bgalexpo = np.linspace(-90., 90., 400)
lgalexpo = np.linspace(-180., 180., 800)
#fluxthrs = interp2d(lgalfgl3, bgalfgl3, fluxthrs)(lgalexpo, bgalexpo)
fluxthrs = griddata([lgalfgl3, bgalfgl3], fluxthrs, [gdat.lgalheal])
cntsthrs = fluxthrs * gdat.expo
jbgal = np.where(abs(bgalexpo) < 10.)[0]
jlgal = np.where(abs(lgalexpo) < 10.)[0]
extent = [-10, 10, -10, 10]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.set_xlabel(gdat.labllgaltotl)
axis.set_ylabel(gdat.lablbgaltotl)
imag = plt.imshow(fluxthrs[np.amin(jbgal):np.amax(jbgal)+1, np.amin(jlghprofi):np.amax(jlghprofi)+1], origin='lower', cmap='Reds', extent=gdat.exttrofi)
plt.colorbar(imag, fraction=0.05)
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'thrs.pdf')
plt.close(figr)
def plot_init(gdat):
print('Making initial plots...')
gmod = gdat.fitt
# make initial plots
if gdat.makeplot:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if (gmod.typeelemspateval[l] == 'locl' and gmod.maxmpara.numbelem[l] > 0) and gdat.numbpixl > 1:
plot_indxprox(gdat)
for i in gdat.indxener:
for m in gdat.indxevtt:
if gdat.typedata == 'mock' and gmod.boollens:
figr, axis, path = init_figr(gdat, None, 'post', 'cntpmodlraww', 'this', 'true', i, m, -1)
imag = retr_imag(gdat, axis, gmod.cntpmodlraww, 'this', 'true', 'cntpdata', i, m, booltdim=True)
make_cbar(gdat, axis, imag, 0, tick=gdat.valutickmajrpara.cntpdata, labltotl=gdat.lablcntpdata)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
if gdat.boolcorrexpo:
gdat.lablnumbpixl = r'$N_{\rm{pix}}$'
gdat.limtexpo = [gdat.minmpara.expo, gdat.maxmpara.expo]
if gdat.boolbinsener:
path = gdat.pathinit + 'expototlmean.pdf'
tdpy.plot_gene(path, gdat.meanpara.ener, gdat.expototlmean, scalxdat='logt', scalydat='logt', lablxdat=gdat.lablenertotl, \
lablydat=gdat.lablexpototl, limtydat=gdat.limtexpo)
for m in gdat.indxevtt:
for i in gdat.indxener:
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.hist(gdat.expo[i, :, m], gdat.binspara.expo)
axis.set_xlabel(gdat.labltotlpara.expo)
axis.set_ylabel(gdat.labltotlpara.numbpixl)
axis.set_xscale('log')
axis.set_yscale('log')
plt.tight_layout()
name = 'histexpo'
if gdat.numbener > 1:
name += 'en%02d' % i
if gdat.numbevtt > 1:
name += 'evt%d' % m
path = gdat.pathinit + name + '.pdf'
figr.savefig(path)
plt.close(figr)
if gdat.numbpixl > 1:
for i in gdat.indxener:
for m in gdat.indxevtt:
figr, axis, path = init_figr(gdat, None, 'post', 'expo', '', '', i, m, -1)
imag = retr_imag(gdat, axis, gdat.expo, None, None, 'expo', i, m)
make_cbar(gdat, axis, imag, i)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \
strgvarb='defl', nameparagenrelem='', indxdefl=None, indxpoplplot=-1, multfact=1., indxenerplot=None, indxevttplot=None):
if indxdefl is not None:
strgvarb += 'sing'
strgvarb = strgvarb + nameparagenrelem
defl = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn)
defl *= multfact
if indxenerplot is not None:
defl = defl[indxenerplot, :, indxevttplot, ...]
if indxdefl is not None:
defl = defl[..., indxdefl]
strgvarb += '%04d' % indxdefl
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
figr, axis, path = init_figr(gdat, gdatmodi, strgpdfn, strgvarb, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot)
make_legdmaps(gdat, strgstat, strgmodl, axis)
draw_frambndr(gdat, axis)
defllgal = defl[:, :, 0]
deflbgal = defl[:, :, 1]
fact = 4
ptch = axis.quiver(gdat.anglfact * gdat.lgalgridcart[::fact, ::fact], gdat.anglfact * gdat.bgalgridcart[::fact, ::fact], \
gdat.anglfact * defllgal[::fact, ::fact], gdat.anglfact * deflbgal[::fact, ::fact], scale_units='xy', angles='xy', scale=1)
supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis)
plt.subplots_adjust(left=0.2, bottom=0.15, top=0.75, right=0.85)
plt.subplots_adjust()
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgvarb, indxenerplot=None, indxevttplot=-1, strgcbar=None, \
booltdim=False, indxpoplplot=-1, strgmome='pmea'):
gmod = getattr(gdat, strgmodl)
if strgcbar is None:
strgcbar = strgvarb
# construct the string for the map
if strgvarb == 'cntpdata':
strgplot = strgvarb
else:
if strgstat == 'post':
strgtemp = strgmome + strgpdfn
else:
strgtemp = ''
strgplot = strgtemp + strgvarb
figr, axis, path = init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot)
maps = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn)
imag = retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot, indxevttplot, booltdim=booltdim)
make_cbar(gdat, axis, imag, strgvarb)
make_legdmaps(gdat, strgstat, strgmodl, axis)
if gdat.boolsuprelem:
supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot)
print('strgvarb')
print(strgvarb)
plt.tight_layout()
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def init( \
# user interaction
## type of verbosity
typeverb=1, \
## path in which PCAT data lives
pathpcat=None, \
# miscelleneaous
## type of PDF to sample from
strgpdfn='post', \
# data
## type of data
### 'mock': simulated data
### 'inpt': input data
### 'real': real data retrieved from databases
typedata=None, \
## type of experiment
typeexpr='user', \
# diagnostics
## Boolean to enter the diagnostic mode
booldiagmode=True, \
## squeeze exposure to check the low sample limit
boolsqzeexpo=False, \
### explode exposure to check the large sample limit
boolexplexpo=False, \
## squeeze proposal scale to check the acceptance ratio
boolsqzeprop=False, \
## explode proposal scale to check the acceptance ratio
boolexplprop=False, \
## Boolean to thin down the data
boolthindata=False, \
## factor by which to thin down the data
factthin=None, \
# reference catalog
## Boolean to use the reference catalogs to associate
boolasscrefr=None, \
# sampling
## Boolean flag to make burn-in tempered
boolburntmpr=False, \
## number of sweeps
numbswep=100000, \
## number of samples
numbsamp=None, \
## number of initial sweeps to be burned
numbburn=None, \
# output
## Boolean to make condensed catalog
boolcondcatl=True, \
refrlabltotl=None, \
refrlablpopl=None, \
fittlablpopl=None, \
# numpy RNG seed
seedtype=0, \
## Boolean flag to re-seed each chain separately
boolseedchan=True, \
## optional deterministic seed for sampling element parameters
seedelem=None, \
indxevttincl=None, \
indxenerincl=None, \
listmask=None, \
# number of samples for Bootstrap
numbsampboot=None, \
listnamefeatsele=None, \
# type of mask for the exposure map
typemaskexpo='ignr', \
# type of exposure
## 'cons': constant
## 'file': provided in a file
typeexpo='cons', \
# maximum spatial distance out to which element kernel will be evaluated
maxmangleval=None, \
# initial state
initpsfprefr=False, \
initpsfp=None, \
# evaluate the likelihood inside circles around elements
typeelemspateval=None, \
namestattrue=None, \
# plotting
## Boolean flag to make the frame plots short
boolshrtfram=True, \
boolrefeforc=False, \
indxrefrforc=None, \
## Boolean to overplot the elements
boolsuprelem=True, \
## Boolean to plot the correlation between elements
boolplotelemcorr=True, \
## Boolean flag to vary the PSF
boolmodipsfn=False, \
# name of the configuration
strgcnfg=None, \
# model
## number of spatial dimensions
numbspatdims=2, \
# hyperparameters
fittampldisttype=None, \
# metamodel settings
## PSF evaluation type
## kernel evaluation type
kernevaltype='ulip', \
# photometric model
## base parameters
### Sersic type
typesers='vauc', \
## transdimensional parameters (elements)
### vary projected scale radius
variasca=True, \
### vary projected cutoff radius
variacut=True, \
# prior
penalpridiff=False, \
priotype='logt', \
priofactdoff=None, \
# initialization
## initialization type
inittype=None, \
loadvaripara=False, \
# save the state of the MCMC
savestat=False, \
namesavestat=None, \
# recover the state from a previous run
namerecostat=None, \
forcsavestat=False, \
# proposals
## Boolean flag to turn on proposals on element parameters
boolpropcomp=True, \
boolpropcova=True, \
propwithsing=True, \
# type of covariance estimation
typeopti='none', \
# modes of operation
## only generate and plot mock data
boolmockonly=False, \
## perform an additional run sampling from the prior
checprio=False, \
strgexprsbrt=None, \
anglassc=None, \
nameexpr=None, \
# likelihood dependent
## exposure map
expo=None, \
lgalprio=None, \
bgalprio=None, \
minmcntpdata=None, \
strgexpo=None, \
# number of processors
numbproc=None, \
# likelihood function
liketype='pois', \
# user-defined likelihood function
retr_llik=None, \
anlytype=None, \
lgalcntr=0., \
bgalcntr=0., \
maxmangl=None, \
# spatial grid
## type of spatial pixelization
typepixl=None, \
## Boolean flag to force Cartesian spatial grid
boolforccart=False, \
# number of pixels on a side in the Cartesian grid
numbsidecart=None, \
# Nside in Healpix
numbsideheal=256, \
allwfixdtrue=True, \
asscmetrtype='dist', \
# plotting
numbswepplot=None, \
# Boolean flagt to make the frame plots only for the central energy and PSF bin
boolmakeframcent=True, \
makeplot=True, \
makeplotinit=True, \
makeplotfram=True, \
makeplotfinlprio=True, \
makeplotfinlpost=True, \
makeplotintr=False, \
scalmaps='asnh', \
makeanim=True, \
strgenerfull=None, \
strgexprname=None, \
strganglunit=None, \
strganglunittext=None, \
anglfact=None, \
limtydathistfeat=None, \
# model
# emission
## elements
## PSF
specfraceval=None, \
numbangl=1000, \
binsangltype='logt', \
numbsidepntsprob=100, \
listprefsbrtsbrt=None, \
listprefsbrtener=None, \
listprefsbrtlabltotl=None, \
lablgangunit=None, \
labllgal=None, \
lablbgal=None, \
lablfluxunit=None, \
lablflux=None, \
strgenerunit=None, \
indxenerfull=None, \
indxevttfull=None, \
binsenerfull=None, \
asymfluxprop=False, \
## Boolean flag to make the PSF model informed
boolpriopsfninfo=False, \
## spectral
# lensing
fittrelnpowr=0., \
# temp
margfactmodl=1., \
maxmgangdata=None, \
# proposals
stdvprophypr=0.01, \
stdvproppsfp=0.1, \
stdvpropbacp=0.01, \
stdvproplenp=1e-4, \
stdvlgal=0.001, \
stdvbgal=0.001, \
stdvflux=0.001, \
stdvspep=0.001, \
stdvspmrsind=0.2, \
varistdvlbhl=True, \
rtagmock=None, \
## transdimensional proposal probabilities
probtran=None, \
probspmr=None, \
# when proposing from the covariance, fracproprand should be very small!
fracproprand=0., \
# standard deviation of the Gaussian from which the angular splitting will be drawn for splits and merges
radispmr=None, \
defa=False, \
**args \
):
# preliminary setup
# construct the global object
gdat = tdpy.gdatstrt()
for attr, valu in locals().items():
if '__' not in attr and attr != 'gdat':
setattr(gdat, attr, valu)
# copy all provided inputs to the global object
for strg, valu in args.items():
setattr(gdat, strg, valu)
# PCAT folders
if gdat.pathpcat is None:
gdat.pathpcat = os.environ["PCAT_DATA_PATH"] + '/'
if gdat.pathpcat[-1] != '/':
gdat.pathpcat += '/'
gdat.pathdata = gdat.pathpcat + 'data/'
gdat.pathdataopti = gdat.pathdata + 'opti/'
gdat.pathimag = gdat.pathpcat + 'imag/'
gdat.pathoutp = gdat.pathdata + 'outp/'
gdat.pathinpt = gdat.pathdata + 'inpt/'
# list of parameter groups
gdat.liststrggroppara = ['genrbase', 'genrelem', 'derifixd', 'derielem', 'genrelemextd', 'derielemextd', 'kind', 'full']
# list of parameter features to be turned into lists
gdat.listfeatparalist = ['minm', 'maxm', 'fact', 'scal', 'lablroot', 'lablunit', 'stdv', 'labltotl', 'name']
# list of parameter features
gdat.listfeatpara = gdat.listfeatparalist + ['limt', 'bins', 'delt', 'numb', 'indx', 'cmap', 'mean', 'tick', 'numbbins', 'valutickmajr', 'labltickmajr', 'valutickminr', 'labltickminr']
# run tag
gdat.strgswep = '%d' % (gdat.numbswep)
## time stamp
gdat.strgtimestmp = tdpy.retr_strgtimestmp()
## name of the configuration function
if gdat.strgcnfg is None:
gdat.strgcnfg = inspect.stack()[1][3]
gdat.strgvers = 'v0.3'
if gdat.typeverb > 0:
print('PCAT %s started at %s.' % (gdat.strgvers, gdat.strgtimestmp))
print('Configuration %s' % gdat.strgcnfg)
# string describing the number of sweeps
gdat.strgnumbswep = '%d' % gdat.numbswep
# output paths
gdat.rtag = retr_rtag(gdat.strgcnfg, gdat.strgnumbswep)
gdat.pathoutprtag = retr_pathoutprtag(gdat.pathpcat, gdat.rtag)
# physical constants
gdat.prsccmtr = 3.086e18
gdat.ergsgevv = 624.151
gdat.factnewtlght = 2.09e13 # Msun / pc
gdat.listnamepdir = ['forw', 'reve']
gdat.listlablpdir = ['f', 'r']
# number of standard deviations around mean of Gaussian-distributed variables
gdat.numbstdvgaus = 4.
# start the timer
gdat.timerealtotl = time.time()
gdat.timeproctotl = time.clock()
# list of parameter types
## 'genr': generative parameters
## 'deri': derived parameters
gdat.liststrgtypepara = ['genr', 'deri']
booltemp = chec_statfile(gdat.pathpcat, gdat.rtag, 'gdatmodi')
if booltemp:
print('gdatmodi already exists. Skipping...')
else:
# create output folder for the run
os.system('mkdir -p %s' % gdat.pathoutprtag)
# write the list of arguments to file
fram = inspect.currentframe()
listargs, temp, temp, listargsvals = inspect.getargvalues(fram)
fileargs = open(gdat.pathoutprtag + 'cmndargs.txt', 'w')
fileargs.write('PCAT call arguments\n')
for args in listargs:
fileargs.write('%s = %s\n' % (args, listargsvals[args]))
fileargs.close()
# write the list of arguments to file
fileargs = open(gdat.pathoutprtag + 'args.txt', 'w')
fileargs.write('PCAT call arguments\n')
for args in listargs:
fileargs.write('%20s %s\n' % (args, listargsvals[args]))
fileargs.close()
# defaults
if gdat.typedata is None:
if gdat.strgexprsbrt is None:
gdat.typedata = 'mock'
else:
gdat.typedata = 'inpt'
print('gdat.typedata')
print(gdat.typedata)
# list of models
gdat.liststrgmodl = []
if gdat.typedata == 'mock':
gdat.liststrgmodl += ['true']
gdat.liststrgmodl += ['fitt']
gdat.refr = tdpy.gdatstrt()
gdat.listgmod = []
for strgmodl in gdat.liststrgmodl + ['refr']:
setattr(gdat, strgmodl, tdpy.gdatstrt())
gmod = getattr(gdat, strgmodl)
for strgstat in ['this', 'next']:
setattr(gmod, strgstat, tdpy.gdatstrt())
for strgfeatpara in gdat.listfeatpara:
setattr(gmod, strgfeatpara + 'para', tdpy.gdatstrt())
gdat.listgmod += [gmod]
for strgfeatpara in gdat.listfeatpara:
setattr(gdat, strgfeatpara + 'para', tdpy.gdatstrt())
## number of processes
gdat.strgproc = os.uname()[1]
if gdat.numbproc is None:
if gdat.strgproc == 'fink1.rc.fas.harvard.edu' or gdat.strgproc == 'fink2.rc.fas.harvard.edu' or gdat.strgproc == 'wise':
gdat.numbproc = 1
else:
gdat.numbproc = 1
if gdat.typedata == 'inpt' and gdat.rtagmock is not None:
print('Will use %s to account for selection effects.' % gdat.rtagmock)
gdat.pathoutprtagmock = retr_pathoutprtag(gdat.pathpcat, gdat.rtagmock)
## number of burned sweeps
if gdat.numbburn is None:
print('gdat.numbswep')
print(gdat.numbswep)
gdat.numbburn = int(gdat.numbswep / 10)
print('gdat.numbburn')
print(gdat.numbburn)
# burn-in
gdat.factburntmpr = 0.75
gdat.numbburntmpr = gdat.factburntmpr * gdat.numbburn
if (gdat.boolsqzeprop or gdat.boolexplprop) and gdat.typeopti == 'hess':
raise Exception('')
print('gdat.boolpriopsfninfo')
print(gdat.boolpriopsfninfo)
print('gdat.typeexpr')
print(gdat.typeexpr)
## factor by which to thin the sweeps to get samples
if gdat.factthin is not None and gdat.numbsamp is not None:
raise Exception('Both factthin and numbparagenrfull cannot be provided at the same time.')
elif gdat.factthin is None and gdat.numbsamp is None:
gdat.factthin = int(np.ceil(1e-3 * (gdat.numbswep - gdat.numbburn)))
gdat.numbsamp = int((gdat.numbswep - gdat.numbburn) / gdat.factthin)
elif gdat.numbsamp is not None:
gdat.factthin = int((gdat.numbswep - gdat.numbburn) / gdat.numbsamp)
elif gdat.factthin is not None:
gdat.numbsamp = int((gdat.numbswep - gdat.numbburn) / gdat.factthin)
if not isinstance(gdat.numbsamp, int) or not isinstance(gdat.factthin, int) or \
not isinstance(gdat.numbburn, int) or not isinstance(gdat.numbswep, int):
print('gdat.numbsamp')
print(gdat.numbsamp)
print('gdat.factthin')
print(gdat.factthin)
print('gdat.numbburn')
print(gdat.numbburn)
print('gdat.numbswep')
print(gdat.numbswep)
raise Exception('Number of samples is not an integer.')
# samples to be saved
gdat.indxsamp = np.arange(gdat.numbsamp)
# samples to be saved from all chains
gdat.numbsamptotl = gdat.numbsamp * gdat.numbproc
gdat.indxsamptotl = np.arange(gdat.numbsamptotl)
gdat.numbsweptotl = gdat.numbswep * gdat.numbproc
if gdat.typeverb > 0:
print('%d samples will be taken, discarding the first %d. The chain will be thinned by a factor of %d.' % \
(gdat.numbswep, gdat.numbburn, gdat.factthin))
print('The resulting chain will contain %d samples per chain and %d samples in total.' % (gdat.numbsamp, gdat.numbsamptotl))
if gdat.anlytype is None:
if gdat.typeexpr == 'chan':
gdat.anlytype = 'home'
elif gdat.typeexpr == 'ferm':
gdat.anlytype = 'rec8pnts'
else:
gdat.anlytype = 'nomi'
if gdat.priofactdoff is None:
gdat.priofactdoff = 1.
# experiment defaults
if gdat.typeexpr == 'ferm':
gdat.lablenerunit = 'GeV'
if gdat.typeexpr == 'chan':
gdat.lablenerunit = 'keV'
if gdat.typeexpr == 'gene':
gdat.lablenerunit = ''
if gdat.typeexpr == 'fire':
gdat.lablenerunit = '$\mu$m^{-1}'
if gdat.typeexpr == 'ferm':
if gdat.anlytype[4:8] == 'pnts':
bins = np.logspace(np.log10(0.3), np.log10(10.), 4)
if gdat.anlytype[4:8] == 'back':
bins = np.logspace(np.log10(0.3), np.log10(300.), 31)
if gdat.typeexpr == 'chan':
if gdat.anlytype.startswith('home'):
bins = np.array([0.5, 0.91, 1.66, 3.02, 5.49, 10.])
if gdat.anlytype.startswith('extr'):
bins = np.array([0.5, 2., 8.])
if gdat.anlytype.startswith('spec'):
bins = np.logspace(np.log10(0.5), np.log10(10.), 21)
if gdat.typeexpr == 'fire':
bins = np.logspace(np.log10(1. / 2.5e-6), np.log10(1. / 0.8e-6), 31)
if gdat.typeexpr == 'hubb':
# temp
#bins = np.array([500., 750, 1000.])
bins = np.array([750, 1000.])
if gdat.typeexpr != 'gene':
setp_varb(gdat, 'enerfull', bins=bins)
setp_varb(gdat, 'numbpixl', lablroot='$N_{pix}$')
if gdat.expo is not None:
setp_varb(gdat, 'expo', minm=np.amin(gdat.expo), maxm=np.amax(gdat.expo), lablroot='$\epsilon$', cmap='OrRd', scal='logt')
# energy band string
if gdat.strgenerfull is None:
if gdat.typeexpr == 'tess':
gdat.strgenerfull = ['T']
if gdat.typeexpr == 'sdss':
gdat.strgenerfull = ['z-band', 'i-band', 'r-band', 'g-band', 'u-band']
if gdat.typeexpr == 'hubb':
#gdat.strgenerfull = ['F606W', 'F814W']
gdat.strgenerfull = ['F814W']
if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'fire':
gdat.strgenerfull = []
for i in range(len(gdat.binspara.enerfull) - 1):
gdat.strgenerfull.append('%.3g %s - %.3g %s' % (gdat.binspara.enerfull[i], gdat.lablenerunit, gdat.binspara.enerfull[i+1], gdat.lablenerunit))
if gdat.typeexpr == 'gene':
gdat.strgenerfull = ['']
## PSF class
if gdat.indxevttfull is None:
if gdat.typeexpr == 'ferm':
gdat.indxevttfull = np.arange(2)
else:
gdat.indxevttfull = np.arange(1)
if gdat.indxevttincl is None:
if gdat.typeexpr == 'ferm':
gdat.indxevttincl = np.array([0, 1])
else:
gdat.indxevttincl = np.arange(1)
if gdat.indxevttincl is not None:
gdat.evttbins = True
else:
gdat.evttbins = False
if gdat.evttbins:
gdat.numbevtt = gdat.indxevttincl.size
gdat.numbevttfull = gdat.indxevttfull.size
else:
gdat.numbevtt = 1
gdat.numbevttfull = 1
gdat.indxevttincl = np.array([0])
gdat.indxevtt = np.arange(gdat.numbevtt)
# Boolean flag to indicate that the data are binned in energy
if gdat.typeexpr == 'gene':
gdat.boolbinsener = False
else:
gdat.boolbinsener = True
if gdat.boolbinsener:
gdat.numbenerfull = len(gdat.strgenerfull)
else:
gdat.numbenerfull = 1
gdat.indxenerfull = np.arange(gdat.numbenerfull)
if gdat.typepixl is None:
if gdat.typeexpr == 'ferm':
gdat.typepixl = 'heal'
else:
gdat.typepixl = 'cart'
if gdat.boolbinsener:
gdat.meanpara.enerfull = np.sqrt(gdat.binspara.enerfull[1:] * gdat.binspara.enerfull[:-1])
setp_varb(gdat, 'boolmodipsfn', valu=False, strgmodl='fitt')
# default values for model types
print('Starting to determine the default values for model types using setp_varbvalu()...')
if gdat.typeexpr == 'hubb':
typeemishost = 'sers'
else:
typeemishost = 'none'
setp_varb(gdat, 'typeemishost', valu=typeemishost)
setp_varb(gdat, 'lliktotl', lablroot='$L$')
### background type
#### template
if gdat.typeexpr == 'ferm':
if gdat.anlytype == 'bfun':
gdat.ordrexpa = 10
gdat.numbexpasing = gdat.ordrexpa**2
gdat.numbexpa = gdat.numbexpasing * 4
gdat.indxexpa = np.arange(gdat.numbexpa)
typeback = ['bfun%04d' % k for k in gdat.indxexpa]
else:
typeback = [1., 'sbrtfdfmsmthrec8pntsnorm.fits']
if gdat.typeexpr == 'chan':
# particle background
if gdat.anlytype.startswith('spec'):
# temp -- this is fake!
sbrtparttemp = np.array([70.04, 70.04, 12.12, 15.98, 10.79, 73.59, 73.59])
binsenerpart = np.logspace(np.log10(0.5), np.log10(10.), 6)
meanenerpart = np.sqrt(binsenerpart[:-1] * binsenerpart[1:])
meanenerparttemp = np.concatenate((np.array([0.5]), meanenerpart, np.array([10.])))
typebacktemp = interp(gdat.meanpara.enerfull, meanenerparttemp, sbrtparttemp)
if gdat.anlytype.startswith('home') :
typebacktemp = 1.
#typebacktemp = np.array([70.04, 12.12, 15.98, 10.79, 73.59]) / 70.04
if gdat.anlytype.startswith('extr'):
#typebacktemp = 'sbrtchanback' + gdat.anlytype + '.fits'
typebacktemp = 1.
if gdat.anlytype.startswith('spec'):
typeback = [[1e2, 2.], typebacktemp]
else:
typeback = [1., typebacktemp]
if gdat.typeexpr == 'hubb':
typeback = [1.]
if gdat.typeexpr == 'tess':
typeback = [1.]
if gdat.typeexpr == 'gene':
typeback = [1.]
if gdat.typeexpr == 'fire':
typeback = [1.]
if gdat.typeexpr != 'user':
setp_varb(gdat, 'typeback', valu=typeback)
if gdat.typeexpr == 'hubb':
numbsersfgrd = 1
else:
numbsersfgrd = 0
setp_varb(gdat, 'numbsersfgrd', valu=numbsersfgrd)
if gdat.typeexpr == 'gene':
typeelem = ['clus']
if gdat.typeexpr == 'ferm':
typeelem = ['lghtpnts']
if gdat.typeexpr == 'tess':
typeelem = ['lghtpnts']
if gdat.typeexpr == 'chan':
typeelem = ['lghtpnts']
if gdat.typeexpr == 'hubb':
typeelem = ['lghtpnts', 'lens', 'lghtgausbgrd']
if gdat.typeexpr == 'fire':
typeelem = ['lghtlineabso']
if gdat.typeexpr == 'user':
typeelem = ['user']
setp_varb(gdat, 'typeelem', valu=typeelem)
print('gdat.fitt.typeelem')
print(gdat.fitt.typeelem)
### PSF model
#### angular profile
if gdat.typeexpr == 'ferm':
typemodlpsfn = 'doubking'
if gdat.typeexpr == 'chan':
typemodlpsfn = 'singking'
if gdat.typeexpr == 'sdss':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'hubb':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'tess':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'gene':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'fire':
typemodlpsfn = None
if gdat.typeexpr != 'user':
setp_varb(gdat, 'typemodlpsfn', valu=typemodlpsfn)
#### background names
listnameback = ['isot']
if gdat.typeexpr == 'ferm':
listnameback.append('fdfm')
#if gdat.typeexpr == 'chan':
# listnameback.append('part')
setp_varb(gdat, 'listnameback', valu=listnameback)
if gdat.strgpdfn == 'prio':
gdat.lablsampdist = 'Prior'
if gdat.strgpdfn == 'post':
gdat.lablsampdist = 'Posterior'
for strgmodl in gdat.liststrgmodl:
# set up the indices of the model
setp_indxpara(gdat, 'init', strgmodl=strgmodl)
if gdat.numbswepplot is None:
gdat.numbswepplot = 50000
gdat.numbplotfram = gdat.numbswep / gdat.numbswepplot
#setp_varb(gdat, 'colr', valu='mediumseagreen', strgmodl='refr')
setp_varb(gdat, 'colr', valu='b', strgmodl='fitt')
if gdat.typedata == 'mock':
setp_varb(gdat, 'colr', valu='g', strgmodl='true')
#gdat.refr.colr = 'mediumseagreen'
#gdat.fitt.colr = 'deepskyblue'
gdat.minmmass = 1.
gdat.maxmmass = 10.
if gdat.checprio:
gdat.liststrgpdfn = ['prio', 'post']
else:
gdat.liststrgpdfn = ['post']
gdat.lablmass = 'M'
gdat.minmmassshel = 1e1
gdat.maxmmassshel = 1e5
gdat.lablmassshel = '$M_r$'
gdat.lablcurv = r'\kappa'
gdat.lablexpc = r'E_{c}'
gmod.scalcurvplot = 'self'
gmod.scalexpcplot = 'self'
#gdat.minmper0 = 1e-3
#gdat.maxmper0 = 1e1
#
#gdat.minmmagf = 10**7.5
#gdat.maxmmagf = 10**16
# temp -- automatize this eventually
#gmod.minmper0 = gdat.minmper0
#gmod.minmper0 = gdat.minmper0
#gmod.maxmper0 = gdat.maxmper0
#gmod.maxmper0 = gdat.maxmper0
#gmod.minmmagf = gdat.minmmagf
#gmod.minmmagf = gdat.minmmagf
#gmod.maxmmagf = gdat.maxmmagf
#gmod.maxmmagf = gdat.maxmmagf
gdat.fitt.listelemmrkr = ['+', '_', '3']
gdat.true.listmrkrhits = ['x', '|', '4']
gdat.true.listmrkrmiss = ['s', 'o', 'p']
gdat.true.listlablmiss = ['s', 'o', 'p']
# list of scalings
gdat.listscaltype = ['self', 'logt', 'atan', 'gaus', 'pois', 'expo']
# number of grids
gdat.numbgrid = 1
gdat.indxgrid = np.arange(gdat.numbgrid)
if gdat.typepixl == 'heal' and gdat.boolforccart:
raise Exception('Cartesian forcing can only used with cart typepixl')
gdat.liststrgphas = ['fram', 'finl', 'anim']
gdat.liststrgelemtdimtype = ['bind']
# lensing
## list of strings indicating different methods of calculating the subhalo mass fraction
gdat.liststrgcalcmasssubh = ['delt', 'intg']
# input data
if gdat.typedata == 'inpt':
path = gdat.pathinpt + gdat.strgexprsbrt
gdat.sbrtdata = astropy.io.fits.getdata(path)
if gdat.typepixl == 'heal' or gdat.typepixl == 'cart' and gdat.boolforccart:
if gdat.sbrtdata.ndim != 3:
raise Exception('exprsbrtdata should be a 3D numpy np.array if pixelization is HealPix.')
else:
if gdat.sbrtdata.ndim != 4:
raise Exception('exprsbrtdata should be a 4D numpy np.array if pixelization is Cartesian.')
if gdat.typepixl == 'cart' and not gdat.boolforccart:
gdat.sbrtdata = gdat.sbrtdata.reshape((gdat.sbrtdata.shape[0], -1, gdat.sbrtdata.shape[3]))
gdat.numbenerfull = gdat.sbrtdata.shape[0]
if gdat.typepixl == 'heal':
gdat.numbpixlfull = gdat.sbrtdata.shape[1]
elif gdat.boolforccart:
gdat.numbpixlfull = gdat.numbsidecart**2
else:
gdat.numbpixlfull = gdat.sbrtdata.shape[1] * gdat.sbrtdata.shape[2]
gdat.numbevttfull = gdat.sbrtdata.shape[2]
if gdat.typepixl == 'heal':
# temp
gdat.numbsidecart = 100
gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)
gdat.numbsideheal = int(np.sqrt(gdat.numbpixlfull / 12))
if gdat.typeexpr == 'hubb':
gdat.hubbexpofact = 1.63050e-19
if gdat.strgexpo is None:
if gdat.typeexpr == 'ferm':
gdat.strgexpo = 'expofermrec8pntsigal0256.fits'
if gdat.typeexpo is None:
if gdat.typeexpr == 'ferm':
gdat.typeexpo = 'file'
else:
gdat.typeexpo = 'cons'
print('strgexpo')
print(strgexpo)
## generative model
# the factor to convert radians (i.e., internal angular unit of PCAT) to the angular unit that will be used in the output (i.e., plots and tables)
if gdat.anglfact is None:
if gdat.typeexpr == 'ferm':
gdat.anglfact = 180. / np.pi
if gdat.typeexpr == 'tess':
gdat.anglfact = 60 * 180. / np.pi
if gdat.typeexpr == 'sdss' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':
gdat.anglfact = 3600 * 180. / np.pi
if gdat.typeexpr == 'sche' or gdat.typeexpr == 'gene':
gdat.anglfact = 1.
if gdat.numbsidecart is not None and gdat.typepixl == 'cart' and not gdat.boolforccart and isinstance(strgexpo, str):
raise Exception('numbsidecart argument should not be provided when strgexpo is a file name and pixelization is Cartesian.')
if gdat.typepixl == 'heal' or gdat.typepixl == 'cart' and gdat.boolforccart:
if gdat.numbsidecart is None:
gdat.numbsidecart = 100
# exposure
gdat.boolcorrexpo = gdat.expo is not None
if gdat.typeexpo == 'cons':
if gdat.typedata == 'mock':
if gdat.numbsidecart is None:
gdat.numbsidecart = 100
if gdat.typedata == 'mock':
if gdat.typepixl == 'heal':
gdat.expo = np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gdat.typepixl == 'cart':
gdat.expo = np.ones((gdat.numbenerfull, gdat.numbsidecart**2, gdat.numbevttfull))
if gdat.typedata == 'inpt':
gdat.expo = np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gdat.typeexpo == 'file':
path = gdat.pathinpt + gdat.strgexpo
if gdat.typeverb > 0:
print('Reading %s...' % path)
gdat.expo = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
gdat.expo = gdat.expo.reshape((gdat.expo.shape[0], -1, gdat.expo.shape[-1]))
if gdat.numbsidecart is None:
# temp -- gdat.numbsidecart takes the value of the region 0
if np.sqrt(gdat.expo.shape[1]) % 1. != 0.:
raise Exception('')
gdat.numbsidecart = int(np.sqrt(gdat.expo.shape[1]))
if gdat.typedata == 'mock':
if gdat.typepixl == 'cart':
gdat.numbpixlfull = gdat.numbsidecart**2
if gdat.typepixl == 'heal':
gdat.numbpixlfull = 12 * gdat.numbsideheal**2
# initialization type
if gdat.inittype is None:
gdat.inittype = 'rand'
if gdat.typeexpr != 'user':
# Boolean flag to indicate binning in space
gdat.boolbinsspat = gdat.numbpixlfull != 1
print('gdat.boolbinsspat')
print(gdat.boolbinsspat)
if gdat.boolcorrexpo and np.amin(gdat.expo) == np.amax(gdat.expo) and not isinstance(gdat.strgexpo, float):
raise Exception('Bad input exposure map.')
if gdat.boolbinsspat:
if gdat.typepixl == 'cart' and isinstance(gdat.strgexpo, float) and gdat.typedata == 'inpt':
if np.sqrt(gdat.sbrtdata.shape[1]) % 1. != 0.:
raise Exception('')
gdat.numbsidecart = int(np.sqrt(gdat.sbrtdata.shape[1]))
gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)
if gdat.typepixl == 'cart':
gdat.numbpixlcart = gdat.numbsidecart**2
### spatial extent of the data
if gdat.maxmgangdata is None:
if gdat.typeexpr == 'chan':
gdat.maxmgangdata = 0.492 / gdat.anglfact * gdat.numbsidecarthalf
if gdat.typeexpr == 'ferm':
gdat.maxmgangdata = 15. / gdat.anglfact
if gdat.typeexpr == 'tess':
gdat.maxmgangdata = 20. / gdat.anglfact
if gdat.typeexpr == 'hubb':
gdat.maxmgangdata = 2. / gdat.anglfact
if gdat.typeexpr == 'gene':
gdat.maxmgangdata = 1. / gdat.anglfact
print('gdat.numbsidecart')
print(gdat.numbsidecart)
print('gdat.maxmgangdata')
print(gdat.maxmgangdata)
# pixelization
if gdat.typepixl == 'cart':
gdat.apix = (2. * gdat.maxmgangdata / gdat.numbsidecart)**2
if gdat.typepixl == 'heal':
temp, temp, temp, gdat.apix = tdpy.retr_healgrid(gdat.numbsideheal)
gdat.sizepixl = np.sqrt(gdat.apix)
# factor by which to multiply the y axis limits of the surface brightness plot
if gdat.numbpixlfull == 1:
gdat.factylimtbrt = [1e-4, 1e7]
else:
gdat.factylimtbrt = [1e-4, 1e3]
# grid
gdat.minmlgaldata = -gdat.maxmgangdata
gdat.maxmlgaldata = gdat.maxmgangdata
gdat.minmbgaldata = -gdat.maxmgangdata
gdat.maxmbgaldata = gdat.maxmgangdata
if gdat.typepixl == 'cart' and gdat.boolforccart:
if gdat.typedata == 'inpt':
sbrtdatatemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtdatatemp[i, :, m] = tdpy.retr_cart(gdat.sbrtdata[i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gdat.sbrtdata = sbrtdatatemp
if gdat.boolcorrexpo:
expotemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
expotemp[i, :, m] = tdpy.retr_cart(gdat.expo[i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gdat.expo = expotemp
gdat.sdenunit = 'degr'
gdat.factergskevv = 1.6e-9
if gdat.typeexpr == 'ferm':
gdat.listspecconvunit = [['en02', 'gevv']]
if gdat.typeexpr == 'chan':
gdat.listspecconvunit = [['en00', 'kevv'], ['en02', 'kevv'], ['en02', 'ergs'], ['en03', 'ergs', '0520', 0.5, 2.], \
['en03', 'ergs', '0210', 2., 10.], \
['en03', 'ergs', '0510', 0.5, 10.], \
['en03', 'ergs', '0208', 2., 8.], \
['en03', 'ergs', '0508', 0.5, 8.], \
['en03', 'ergs', '0207', 2., 7.], \
['en03', 'ergs', '0507', 0.5, 7.]]
if gdat.typeexpr == 'hubb':
gdat.listspecconvunit = [['en03', 'ergs']]
if gdat.typeexpr == 'fire':
gdat.listspecconvunit = [['en00', 'imum']]
# temp
#if gdat.typeexpr == 'chan' and (gdat.anlytype.startswith('home') or gdat.anlytype.startswith('extr')):
# gmod.lablpopl = ['AGN', 'Galaxy']
if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'fire':
gdat.enerdiff = True
if gdat.typeexpr == 'hubb' or gdat.typeexpr == 'gene' or gdat.typeexpr == 'tess':
gdat.enerdiff = False
if gdat.indxenerincl is None:
# default
if gdat.boolbinsener:
gdat.indxenerincl = np.arange(gdat.binspara.enerfull.size - 1)
if gdat.typeexpr == 'ferm':
if gdat.anlytype[4:8] == 'pnts':
gdat.indxenerincl = np.arange(3)
if gdat.anlytype[4:8] == 'back':
gdat.indxenerincl = np.arange(30)
if gdat.typeexpr == 'chan':
if gdat.anlytype.startswith('home'):
gdat.indxenerincl = np.arange(5)
if gdat.anlytype.startswith('extr'):
gdat.indxenerincl = np.arange(2)
if gdat.typeexpr == 'hubb':
gdat.indxenerincl = np.array([0])
#gdat.indxenerincl = np.array([1])
#gdat.indxenerincl = np.array([0, 1])
if gdat.typeexpr == 'gene':
gdat.indxenerincl = np.array([0])
if gdat.indxenerincl is None:
gdat.numbener = 1
else:
gdat.numbener = gdat.indxenerincl.size
gdat.indxener = np.arange(gdat.numbener, dtype=int)
if gdat.indxenerincl is None:
gdat.indxenerincl = gdat.indxener
if gdat.boolbinsener:
gdat.indxenerinclbins = np.empty(gdat.numbener+1, dtype=int)
gdat.indxenerinclbins[0:-1] = gdat.indxenerincl
gdat.indxenerinclbins[-1] = gdat.indxenerincl[-1] + 1
gdat.indxenerpivt = 0
gdat.numbenerplot = 100
gdat.strgener = [gdat.strgenerfull[k] for k in gdat.indxenerincl]
gdat.binspara.ener = gdat.binspara.enerfull[gdat.indxenerinclbins]
gdat.meanpara.ener = np.sqrt(gdat.binspara.ener[1:] * gdat.binspara.ener[:-1])
gdat.deltener = gdat.binspara.ener[1:] - gdat.binspara.ener[:-1]
gdat.minmener = gdat.binspara.ener[0]
gdat.maxmener = gdat.binspara.ener[-1]
retr_axis(gdat, 'ener')
gdat.limtener = [np.amin(gdat.binspara.ener), np.amax(gdat.binspara.ener)]
if gdat.boolbinsener:
if gdat.numbener > 1:
gdat.enerpivt = gdat.meanpara.ener[gdat.indxenerpivt]
# energy bin indices other than that of the pivot bin
gdat.indxenerinde = np.setdiff1d(gdat.indxener, gdat.indxenerpivt)
# temp
if gdat.typeexpr == 'chan':
gdat.edis = 0.3 * np.sqrt(gdat.binspara.ener) / 2.35
gdat.edisintp = sp.interpolate.interp1d(gdat.binspara.ener, gdat.edis, fill_value='extrapolate')
else:
gdat.edis = None
gdat.edisintp = None
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
setp_varb(gdat, 'cntpmodl', lablroot='$C_{M}$', scal='asnh', strgmodl=strgmodl)
# number of elements
if strgmodl == 'true':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
numbelem = 25
else:
numbelem = 5
setp_varb(gdat, 'numbelem', minm=0, maxm=10, lablroot='N', scal='pois', valu=numbelem, popl=l, strgmodl=strgmodl, strgstat='this')
if strgmodl == 'fitt':
setp_varb(gdat, 'numbelem', minm=0, maxm=10, lablroot='N', scal='pois', popl='full', strgmodl=strgmodl)
## hyperparameters
setp_varb(gdat, 'typemodltran', valu='drct', strgmodl=strgmodl)
if gmod.typemodltran == 'pois':
setp_varb(gdat, 'meanelem', minm=0.1, maxm=1000., scal='logt', popl='full', strgmodl=strgmodl)
#### boolean flag background
if gdat.typeexpr != 'user':
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
boolspecback = [True, True]
else:
boolspecback = [False, False]
else:
boolspecback = [False for k in gmod.indxback]
setp_varb(gdat, 'boolspecback', valu=boolspecback, strgmodl=strgmodl)
typeelemspateval = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# these element types slow down execution!
if gmod.typeelem[l] == 'lens' or gmod.typeelem[l].startswith('lghtline') or gmod.typeelem[l] == 'clusvari' or gmod.typeelem[l] == 'lghtgausbgrd':
typeelemspateval[l] = 'full'
else:
typeelemspateval[l] = 'locl'
setp_varb(gdat, 'typeelemspateval', valu=typeelemspateval, strgmodl=strgmodl)
gmod.minmpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
gmod.maxmpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = int(getattr(gmod.maxmpara, 'numbelempop%d' % l))
gmod.minmpara.numbelem[l] = int(getattr(gmod.minmpara, 'numbelempop%d' % l))
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
gmod.minmpara.numbelemtotl = np.sum(gmod.minmpara.numbelem)
# spatial distribution type
typespatdist = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
typespatdist[l] = 'unif'
setp_varb(gdat, 'typespatdist', valu=typespatdist, strgmodl=strgmodl)
# flux distribution type
typeprioflux = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# temp -- this can assign powr to populations whose flux is not drawn from a power law!
if gmod.typeelem[l].startswith('lght'):
typeprioflux[l] = 'powr'
else:
typeprioflux[l] = None
setp_varb(gdat, 'typeprioflux', valu=typeprioflux, strgmodl=strgmodl)
if gdat.strgexprname is None:
if gdat.typeexpr == 'chan':
gdat.strgexprname = 'Chandra'
if gdat.typeexpr == 'ferm':
gdat.strgexprname = 'Fermi-LAT'
if gdat.typeexpr == 'hubb':
gdat.strgexprname = 'HST'
if gdat.typeexpr == 'sche':
gdat.strgexprname = 'XXXXX'
if gdat.typeexpr == 'gene':
gdat.strgexprname = 'TGAS-RAVE'
if gdat.lablgangunit is None:
if gdat.typeexpr == 'ferm':
gdat.lablgangunit = '$^o$'
if gdat.typeexpr == 'gene':
gdat.lablgangunit = ''
if gdat.typeexpr == 'sdss' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':
gdat.lablgangunit = '$^{\prime\prime}$'
if gdat.labllgal is None:
if gdat.typeexpr == 'gene':
gdat.labllgal = r'L_{z}'
else:
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0 and gdat.bgalcntr == 0:
gdat.labllgal = r'l'
else:
gdat.labllgal = r'\theta_1'
if gdat.lablbgal is None:
if gdat.typeexpr == 'gene':
gdat.lablbgal = r'E_k'
else:
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0 and gdat.bgalcntr == 0:
gdat.lablbgal = r'b'
else:
gdat.lablbgal = r'\theta_2'
if gdat.strgenerunit is None:
if gdat.typeexpr == 'ferm':
gdat.strgenerunit = 'GeV'
gdat.nameenerunit = 'gevv'
if gdat.typeexpr == 'chan':
gdat.strgenerunit = 'keV'
gdat.nameenerunit = 'kevv'
if gdat.typeexpr == 'gene':
gdat.strgenerunit = ''
gdat.nameenerunit = ''
if gdat.typeexpr == 'hubb':
gdat.strgenerunit = 'erg'
gdat.nameenerunit = 'ergs'
if gdat.typeexpr == 'fire':
gdat.strgenerunit = '$\mu$ m$^{-1}$'
gdat.nameenerunit = 'imum'
if gdat.nameexpr is None:
if gdat.typeexpr == 'ferm':
gdat.nameexpr = 'Fermi-LAT'
if gdat.typeexpr == 'sdss':
gdat.nameexpr = 'SDSS'
if gdat.typeexpr == 'chan':
gdat.nameexpr = 'Chandra'
if gdat.typeexpr == 'hubb':
gdat.nameexpr = 'HST'
if gdat.typeexpr == 'gaia':
gdat.nameexpr = 'Gaia'
## Lensing
if gdat.radispmr is None:
if gdat.typeexpr == 'ferm':
gdat.radispmr = 0.6 / gdat.anglfact
if gdat.typeexpr == 'hubb':
gdat.radispmr = 0.15 / gdat.anglfact
if gdat.typeexpr == 'tess':
gdat.radispmr = 1. / gdat.anglfact
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.radispmr = 0.1
else:
gdat.radispmr = 0.2 / gdat.anglfact
if gdat.typeexpr == 'sdss':
gdat.radispmr = 0.5 / gdat.anglfact
if gdat.typeexpr == 'gene':
gdat.radispmr = 0.2
print('gdat.radispmr')
print(gdat.radispmr)
if gdat.anglassc is None:
gdat.anglassc = 5. * gdat.radispmr
print('gdat.anglassc')
print(gdat.anglassc)
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
if gdat.boolbinsspat:
if gdat.typeexpr == 'chan' or gdat.typeexpr == 'sdss':
numbpsfpform = 0
gmod.numbpsfptotl = 0
if gdat.typeexpr == 'chan':
retr_psfpchan(gmod)
if gdat.typeexpr == 'ferm':
retr_psfpferm(gmod)
if gdat.typeexpr == 'sdss':
retr_psfpsdss(gmod)
if gdat.typeexpr == 'hubb':
retr_psfphubb(gmod)
if gdat.typeexpr == 'tess':
retr_psfptess(gmod)
if gdat.typeexpr == 'gene':
retr_psfpsdyn(gmod)
# model evaluation approximation error tolerance in units of the fraction of the lowest PS flux
if gdat.specfraceval is None:
if gdat.typeexpr == 'ferm':
gdat.specfraceval = 0.5
else:
gdat.specfraceval = 0.1
gdat.binspara.lgalcart = np.linspace(gdat.minmlgaldata, gdat.maxmlgaldata, gdat.numbsidecart + 1)
gdat.binspara.bgalcart = np.linspace(gdat.minmbgaldata, gdat.maxmbgaldata, gdat.numbsidecart + 1)
gdat.meanpara.lgalcart = (gdat.binspara.lgalcart[0:-1] + gdat.binspara.lgalcart[1:]) / 2.
gdat.meanpara.bgalcart = (gdat.binspara.bgalcart[0:-1] + gdat.binspara.bgalcart[1:]) / 2.
# reference elements
gdat.numbrefr = 0
if gdat.typedata == 'mock':
gdat.numbrefr = gmod.numbpopl
if gdat.typedata == 'inpt':
if gdat.typeexpr == 'ferm':
gdat.numbrefr = 2
if gdat.typeexpr == 'chan':
gdat.numbrefr = 2
print('gdat.numbrefr')
print(gdat.numbrefr)
gdat.indxrefr = np.arange(gdat.numbrefr)
if gdat.boolasscrefr is None:
gdat.boolasscrefr = [True for q in gdat.indxrefr]
gdat.listnamerefr = []
gdat.refr.nameparagenrelemampl = [[] for q in gdat.indxrefr]
gdat.refr.namepara.elem = [[] for q in gdat.indxrefr]
gdat.refr.namepara.elemodim = [[] for q in gdat.indxrefr]
gdat.boolinforefr = False
gdat.listpathwcss = []
gdat.numbpixllgalshft = []
gdat.numbpixlbgalshft = []
gdat.refrindxpoplassc = [[] for q in gdat.indxrefr]
# temp -- this allows up to 3 reference populations
gdat.true.colrelem = ['darkgreen', 'olivedrab', 'mediumspringgreen']
# temp -- this allows up to 3 reference populations
gdat.fitt.colrelem = ['royalblue', 'dodgerblue', 'navy']
if gdat.typedata == 'mock':
gdat.boolinforefr = True
gdat.listnamerefr = ['moc%d' % l for l in gmod.indxpopl]
gdat.indxrefr = np.arange(gdat.numbrefr)
if gdat.typedata == 'inpt':
if gdat.typeexpr == 'ferm':
gdat.boolinforefr = True
retr_refrferminit(gdat)
for q in gdat.indxrefr:
gdat.refrindxpoplassc[q] = gmod.indxpopl
if gdat.typeexpr == 'chan':
gdat.boolinforefr = True
retr_refrchaninit(gdat)
for q in gdat.indxrefr:
gdat.refrindxpoplassc[q] = gmod.indxpopl
for q in gdat.indxrefr:
if 'lgal' in gdat.refr.namepara.elem[q] and 'bgal' in gdat.refr.namepara.elem[q]:
gdat.refr.namepara.elem[q] += ['gang', 'aang']
for strgfeat in gdat.refr.namepara.elem[q]:
setattr(gdat.refr, strgfeat, [[] for q in gdat.indxrefr])
if gdat.typeexpr == 'ferm':
retr_refrfermfinl(gdat)
if gdat.typeexpr == 'chan':
retr_refrchanfinl(gdat)
if gdat.typeexpr == 'hubb':
boollenshost = True
else:
boollenshost = False
setp_varb(gdat, 'boollenshost', valu=boollenshost)
if gdat.typeexpr == 'hubb':
boollenssubh = True
else:
boollenssubh = False
setp_varb(gdat, 'boollenssubh', valu=boollenssubh)
if gdat.typeexpr == 'hubb':
boollens = True
else:
boollens = False
setp_varb(gdat, 'boollens', valu=boollens)
if gdat.typeexpr == 'hubb':
boolemishost = True
else:
boolemishost = False
setp_varb(gdat, 'boolemishost', valu=boolemishost)
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
## names of the variables for which cumulative posteriors will be plotted
if gmod.boollenssubh:
gmod.listnamevarbcpct = ['convelem']
else:
gmod.listnamevarbcpct = []
# the adis in the file is kpc
fileh5py = h5py.File(gdat.pathdata + 'inpt/adis.h5','r')
gdat.redsintp = fileh5py['reds'][()]
gdat.adisintp = fileh5py['adis'][()] * 1e6 # [pc]
gdat.adisobjt = sp.interpolate.interp1d(gdat.redsintp, gdat.adisintp, fill_value='extrapolate')
gdat.redsfromdlosobjt = sp.interpolate.interp1d(gdat.adisintp * gdat.redsintp, gdat.redsintp, fill_value='extrapolate')
fileh5py.close()
setp_varb(gdat, 'lgal', minm=-10., maxm=10., lablroot='$l$')
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
if gdat.typedata == 'mock':
if gmod.boollenshost:
setp_varb(gdat, 'redshost', valu=0.2, strgmodl='true')
setp_varb(gdat, 'redssour', valu=1., strgmodl='true')
setp_indxpara(gdat, 'finl', strgmodl='true')
### background parameters
if gdat.typeexpr == 'chan':
if gdat.anlytype.startswith('extr'):
meanbacpbac1 = 1.
else:
meanbacpbac1 = 70.04
stdvbacpbac1 = 1e-5 * meanbacpbac1
setp_varb(gdat, 'bacp', mean=meanbacpbac1, stdv=stdvbacpbac1, back=1, scal='gaus', strgmodl='true')
if gdat.numbpixlfull == 1:
bacp = [1e0, 1e2]
setp_varb(gdat, 'bacp', limt=bacp, back=0)
else:
bacp = [1e-1, 1e3]
setp_varb(gdat, 'bacp', limt=bacp, ener='full', back=0)
if gdat.numbpixlfull == 1:
bacp = 10.
setp_varb(gdat, 'bacp', valu=bacp)
else:
setp_varb(gdat, 'bacp', valu=170., back=0, ener=0)
setp_varb(gdat, 'bacp', valu=17.4, back=0, ener=1)
setp_varb(gdat, 'bacp', valu=27., back=0, ener=2)
setp_varb(gdat, 'bacp', valu=11.8, back=0, ener=3)
setp_varb(gdat, 'bacp', valu=101., back=0, ener=4)
if gdat.typeexpr == 'ferm':
if 'ferm_bubb' in gdat.strgcnfg:
setp_varb(gdat, 'bacp', limt=[1e-10, 1e10], ener='full', back='full')
else:
# isotropic + unresolved
setp_varb(gdat, 'bacp', limt=[1e-7, 1e-2], ener=0, back=0)
setp_varb(gdat, 'bacp', limt=[1e-9, 1e-3], ener=1, back=0)
setp_varb(gdat, 'bacp', limt=[1e-10, 1e-4], ener=2, back=0)
# diffuse
setp_varb(gdat, 'bacp', limt=[1e-6, 1e-2], ener=0, back=1)
setp_varb(gdat, 'bacp', limt=[1e-7, 1e-3], ener=1, back=1)
setp_varb(gdat, 'bacp', limt=[1e-8, 1e-4], ener=2, back=1)
# dark
setp_varb(gdat, 'bacp', limt=[1e-11, 1e-4], ener=0, back=2)
setp_varb(gdat, 'bacp', limt=[1e-11, 1e-4], ener=1, back=2)
setp_varb(gdat, 'bacp', limt=[1e-11, 1e-4], ener=2, back=2)
setp_varb(gdat, 'bacp', valu=5e-6, ener=0, back=0)
setp_varb(gdat, 'bacp', valu=5e-6, ener=0, back=0)
setp_varb(gdat, 'bacp', valu=2e-8, ener=1, back=0)
setp_varb(gdat, 'bacp', valu=2e-9, ener=2, back=0)
setp_varb(gdat, 'bacp', valu=1e-5, ener=4, back=0)
setp_varb(gdat, 'bacp', valu=7e-7, ener=0, back=1)
setp_varb(gdat, 'bacp', valu=1e-4, ener=0, back=1)
setp_varb(gdat, 'bacp', valu=1e-5, ener=1, back=1)
setp_varb(gdat, 'bacp', valu=7e-7, ener=2, back=1)
setp_varb(gdat, 'bacp', valu=3e-8, ener=4, back=1)
# Fourier basis
for strgmodl in gdat.liststrgmodl:
for c in gmod.indxback:
if isinstance(typeback[c], str):
if 'bfun' in typeback[c]:
setp_varb(gdat, 'bacp', limt=[1e-10, 1e10], ener='full', back=c)
if gdat.typeexpr == 'hubb':
bacp = [1e-10, 1e-6]
if gdat.typeexpr == 'gene':
setp_varb(gdat, 'bacp', minm=1e-1, maxm=1e3, valu=1e1, lablroot='$A$', scal='logt', ener=0, back=0, strgmodl=strgmodl)
if gdat.typeexpr == 'fire':
bacp = [1e-1, 1e1]
if gdat.typeexpr == 'tess':
bacp = [1e-1, 1e1]
setp_varb(gdat, 'bacp', limt=bacp, ener='full', back=0)
if gdat.typeexpr == 'hubb':
bacp = 2e-7
if gdat.typeexpr == 'chan':
bacp = 1.
if gdat.numbpixlfull == 1:
setp_varb(gdat, 'bacp', valu=bacp, back=0)
else:
setp_varb(gdat, 'bacp', valu=bacp, ener='full', back=0)
# particle background
if gdat.typeexpr == 'chan':
bacp = 70.04
setp_varb(gdat, 'bacp', valu=bacp, back=1)
# particle background
#if gdat.typeexpr == 'chan':
# if gdat.anlytype == 'spec':
# bacp = [1e-8, 1e-6]
# else:
# bacp = [1e-1, 1e2]
# setp_varb(gdat, 'bacp', limt=bacp, back=1)
### element parameter boundaries
#### spatial
if gdat.boolbinsspat:
if gdat.typeexpr == 'ferm':
minmgang = 1e-1 / gdat.anglfact
else:
minmgang = 1e-2 / gdat.anglfact
setp_varb(gdat, 'minmgang', valu=minmgang, popl='full', strgmodl=strgmodl)
# parameter defaults
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
enertemp = np.sqrt(gdat.limtener[0] * gdat.limtener[1])
# temp -- these should depend on population index
setp_varb(gdat, 'elin', limt=gdat.limtener, strgmodl=strgmodl)
setp_varb(gdat, 'sigm', limt=np.array([1e-1, 1e0]) * enertemp, strgmodl=strgmodl)
setp_varb(gdat, 'gamm', limt=np.array([1e-1, 1e0]) * enertemp, strgmodl=strgmodl)
if gdat.boolbinsspat:
minmdefs = 0.003 / gdat.anglfact
setp_varb(gdat, 'minmdefs', valu=minmdefs, strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
if gdat.boolbinsspat:
maxmdefs = 1. / gdat.anglfact
setp_varb(gdat, 'maxmdefs', valu=maxmdefs, strgmodl=strgmodl)
# true model parameters
if gdat.typedata == 'mock':
gmod.numbelem = np.zeros(gmod.numbpopl, dtype=int)
if gmod.typemodltran == 'pois':
for l in gmod.indxpopl:
setattr(gdat.true.this, 'meanelempop%d' % l, getattr(gdat.true.this, 'numbelempop%d' % l))
gmod.numbelem[l] = getattr(gdat.true.this, 'numbelempop%d' % l)
if gmod.numbelem[l] > gmod.maxmpara.numbelem[l]:
raise Exception('True number of elements is larger than maximum.')
gdat.stdvhostsour = 0.04 / gdat.anglfact
## distribution
### flux
if gmod.boollenssubh:
### projected scale radius
limtasca = np.array([0., 0.1]) / gdat.anglfact
setp_varb(gdat, 'asca', minm=minmasca, maxm=maxmasca)
### projected cutoff radius
limtacut = np.array([0., 2.]) / gdat.anglfact
setp_varb(gdat, 'acut', minm=minmacut, maxm=maxmacut)
if gdat.boolbinsspat:
setp_varb(gdat, 'gangdisttype', valu=['self'], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typespatdist[l] == 'gangexpo':
setp_varb(gdat, 'maxmgang', valu=gmod.maxmlgal, strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
gangdistsexp = 5. / gdat.anglfact
setp_varb(gdat, 'gangdistsexp', valu=gangdistsexp, strgmodl=strgmodl, popl=l)
if gmod.typespatdist[l] == 'dsrcexpo':
if gdat.typeexpr == 'hubb':
dsrcdistsexp = 0.5 / gdat.anglfact
setp_varb(gdat, 'dsrcdistsexp', valu=dsrcdistsexp, strgmodl=strgmodl, popl=l)
if strgmodl == 'true':
if gmod.boollenshost or boolemishost:
setp_varb(gdat, 'lgalhost', mean=0., stdv=gdat.stdvhostsour, strgmodl='true', isfr='full')
setp_varb(gdat, 'bgalhost', mean=0., stdv=gdat.stdvhostsour, strgmodl='true', isfr='full')
if gmod.boollens:
setp_varb(gdat, 'lgalsour', mean=0., stdv=gdat.stdvhostsour, strgmodl='true')
setp_varb(gdat, 'bgalsour', mean=0., stdv=gdat.stdvhostsour, strgmodl='true')
if strgmodl == 'fitt':
if gmod.boollenshost or boolemishost:
setp_varb(gdat, 'lgalhost', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt', isfr='full')
setp_varb(gdat, 'bgalhost', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt', isfr='full')
if gmod.boollens:
setp_varb(gdat, 'lgalsour', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt')
setp_varb(gdat, 'bgalsour', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt')
if gmod.boollens:
setp_varb(gdat, 'redshost', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'redssour', limt=[0.5, 1.5], strgmodl=strgmodl)
setp_varb(gdat, 'fluxsour', limt=np.array([1e-22, 1e-17]), strgmodl=strgmodl)
setp_varb(gdat, 'sindsour', limt=np.array([0., 4.]), strgmodl=strgmodl)
setp_varb(gdat, 'sizesour', limt=[0.1 / gdat.anglfact, 2. / gdat.anglfact], strgmodl=strgmodl)
setp_varb(gdat, 'ellpsour', limt=[0., 0.5], strgmodl=strgmodl)
setp_varb(gdat, 'redshost', valu=0.2, strgmodl=strgmodl)
setp_varb(gdat, 'redssour', valu=1., strgmodl=strgmodl)
if gmod.boollenshost or boolemishost:
setp_varb(gdat, 'fluxhost', limt=np.array([1e-20, 1e-15]), isfr='full', strgmodl=strgmodl)
setp_varb(gdat, 'sindhost', limt=np.array([0., 4.]), isfr='full', strgmodl=strgmodl)
setp_varb(gdat, 'sizehost', limt=[0.1 / gdat.anglfact, 4. / gdat.anglfact], isfr='full', strgmodl=strgmodl)
setp_varb(gdat, 'beinhost', limt=[0.5 / gdat.anglfact, 2. / gdat.anglfact], isfr='full', strgmodl=strgmodl)
setp_varb(gdat, 'ellphost', limt=[0., 0.5], isfr='full', strgmodl=strgmodl)
setp_varb(gdat, 'anglhost', limt=[0., np.pi], isfr='full', strgmodl=strgmodl)
if strgmodl == 'fitt':
setp_varb(gdat, 'serihost', limt=[1., 8.], isfr='full', strgmodl=strgmodl)
if strgmodl == 'true':
setp_varb(gdat, 'serihost', valu=4., isfr='full', strgmodl=strgmodl)
setp_varb(gdat, 'serihost', limt=[1., 8.], isfr='full', strgmodl=strgmodl)
if gmod.boollens:
setp_varb(gdat, 'sherextr', limt=[0., 0.1], strgmodl=strgmodl)
setp_varb(gdat, 'anglsour', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'sangextr', limt=[0., np.pi], strgmodl=strgmodl)
# temp -- to be removed
#gmod.factlgal = gmod.maxmlgal - gmod.minmlgal
#gmod.factbgal = gmod.maxmbgal - gmod.minmbgal
#gmod.minmaang = -np.pi
#gmod.maxmaang = pi
# loglikelihood difference for each element
setp_varb(gdat, 'deltllik', lablroot='$\Delta \log L$', minm=1., maxm=100., strgmodl=strgmodl)
setp_varb(gdat, 'deltllik', lablroot='$\Delta \log L$', minm=1., maxm=100., popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'deltllik', lablroot='$\Delta \log L$', minm=1., maxm=100., popl=l, strgmodl=strgmodl, iele='full')
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
meanslop = 1.9
stdvslop = 0.5
scal = 'gaus'
else:
minmslop = 0.5
maxmslop = 3.
scal = 'logt'
if scal == 'gaus':
mean = meanslop
stdv = stdvslop
else:
limt = [minmslop, maxmslop]
if gmod.typeelem[l].startswith('clus'):
valu = 2.
name = 'slopprio' + gmod.nameparagenrelemampl[l]
setp_varb(gdat, name, minm=minmslop, maxm=maxmslop, scal=scal, lablroot='$\alpha$', popl=l, strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
setp_varb(gdat, 'gwdtslop', limt=[0.5, 4.], scal='logt', popl=l, strgmodl=strgmodl)
if gdat.typeexpr != 'user':
if gdat.boolbinsspat:
setp_varb(gdat, 'spatdistcons', valu=1e-3, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', valu=1.1, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'bgaldistscal', valu=2. / gdat.anglfact, popl=l, strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'sloplowrprioflux', valu=1.5, popl=l)
setp_varb(gdat, 'slopupprprioflux', valu=2.5, popl=l)
setp_varb(gdat, 'brekprioflux', valu=1e-9, popl=l)
if gmod.typeelem[l] == 'lghtpnts':
setp_varb(gdat, 'slopprioflux', valu=2.2, popl=l, strgmodl=strgmodl)
if gmod.typeelem[l].startswith('lghtline'):
setp_varb(gdat, 'slopprioflux', valu=2., popl=l, strgmodl=strgmodl)
if gmod.typeelem[l] == 'lens':
setp_varb(gdat, 'defsslop', valu=1.9, popl=l, strgmodl=strgmodl)
if gmod.typeelem[l] == 'lens':
setp_varb(gdat, 'ascadistmean', valu=0.05 / gdat.anglfact, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'ascadiststdv', valu=0.04 / gdat.anglfact, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'acutdistmean', valu=1. / gdat.anglfact, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'acutdiststdv', valu=0.04 / gdat.anglfact, popl=l, strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
setp_varb(gdat, 'gwdtslop', valu=2., popl=l, strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
sinddistmean = 2.15
if gdat.typeexpr == 'chan':
sinddistmean = 1.
if gdat.typeexpr == 'hubb':
sinddistmean = 1.
if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':
setp_varb(gdat, 'sinddistmean', valu=sinddistmean, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sinddiststdv', valu=0.5, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', valu=2., popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', valu=0.2, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', valu=2., popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', valu=0.2, popl=l, strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'per0distmean', valu=3e-3, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', valu=0.3, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', valu=10**8.5, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', valu=0.7, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', valu=2., popl=l, strgmodl=strgmodl)
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlosslop', valu=-2., popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', valu=0.5, popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', valu=1.5, popl=l, strgmodl=strgmodl)
if gmod.boollenshost:
setp_varb(gdat, 'beinhost', valu=1.5 / gdat.anglfact)
setp_varb(gdat, 'sizesour', valu=0.3 / gdat.anglfact)
setp_varb(gdat, 'sizehost', valu=1. / gdat.anglfact)
setp_varb(gdat, 'ellpsour', valu=0.2)
setp_varb(gdat, 'fluxsour', valu=1e-18)
setp_varb(gdat, 'sindsour', valu=1.5)
setp_varb(gdat, 'fluxhost', valu=1e-16)
setp_varb(gdat, 'sindhost', valu=2.5)
setp_varb(gdat, 'ellphost', valu=0.2)
setp_varb(gdat, 'sangextr', valu=np.pi / 2.)
setp_varb(gdat, 'serihost', valu=4.)
if gdat.typeexpr != 'user':
if gdat.boolbinsspat:
minm = -gdat.maxmgangdata
maxm = gdat.maxmgangdata
for l in gmod.indxpopl:
setp_varb(gdat, 'lgal', minm=minm, maxm=maxm, lablroot='$l$', strgmodl=strgmodl)
setp_varb(gdat, 'bgal', minm=minm, maxm=maxm, lablroot='$b$', strgmodl=strgmodl)
setp_varb(gdat, 'lgal', minm=minm, maxm=maxm, lablroot='l_{gal}', popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'bgal', minm=minm, maxm=maxm, lablroot='b_{gal}', popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lgal', minm=minm, maxm=maxm, lablroot='l_{gal}', popl=l, iele='full', strgmodl=strgmodl)
setp_varb(gdat, 'bgal', minm=minm, maxm=maxm, lablroot='b_{gal}', popl=l, iele='full', strgmodl=strgmodl)
minm = 0.1
maxm = 10.
for l in gmod.indxpopl:
if strgmodl == 'fitt':
setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N')
setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N', strgmodl=strgmodl)
setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N', popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N', popl=l, iele='full', strgmodl=strgmodl)
if gdat.boolbinsspat:
for l in gmod.indxpopl:
setp_varb(gdat, 'aang', minm=-np.pi, maxm=np.pi, lablroot=r'$\theta$', strgmodl=strgmodl)
setp_varb(gdat, 'aang', minm=-np.pi, maxm=np.pi, lablroot=r'$\theta$', popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'aang', minm=-np.pi, maxm=np.pi, lablroot=r'$\theta$', popl=l, strgmodl=strgmodl, iele='full')
setp_varb(gdat, 'gang', minm=0, maxm=gdat.maxmgangdata, lablroot=r'$\psi$', strgmodl=strgmodl)
setp_varb(gdat, 'gang', minm=0, maxm=gdat.maxmgangdata, lablroot=r'$\psi$', popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gang', minm=0, maxm=gdat.maxmgangdata, lablroot=r'$\psi$', popl=l, strgmodl=strgmodl, iele='full')
# copy the true model to the inference model if the inference model parameter has not been specified
#temp = deepcopy(gdat.__dict__)
#for strg, valu in temp.items():
# if strg.startswith('true') and not strg[4:].startswith('indx'):
# try:
# valumodl = getattr(gdat.fitt, strg[4:])
# if valumodl is None:
# raise
# if gdat.typeverb > 1:
# print 'Received custom input for ' + strg[4:]
# except:
# setattr(gdat.fitt, strg[4:], getattr(gdat, strg))
# check inputs
if gdat.numbburn > gdat.numbswep:
raise Exception('Bad number of burn-in sweeps.')
if gdat.factthin > gdat.numbswep - gdat.numbburn or gdat.factthin < 1:
raise Exception('Bad thinning factor.')
if gdat.typepixl == 'heal' and gdat.numbspatdims > 2:
raise Exception('More than 2 spatial dimensions require Cartesian binning.')
if gdat.defa:
return gdat
if gdat.typeverb > 0:
if gdat.boolburntmpr:
print('Warning: Tempered burn-in.')
if gdat.typedata == 'inpt':
gdat.minmpara.sind = -1.
gdat.maxmpara.sind = 2.
gdat.minmpara.curv = -1.
gdat.maxmpara.curv = 1.
gdat.minmpara.expc = 0.1
gdat.maxmpara.expc = 10.
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'etag' or strgfeat == 'gang' or strgfeat == 'aang':
continue
refrfeat = getattr(gdat.refr, strgfeat)
if len(refrfeat[q]) == 0 or refrfeat[q].ndim < 2:
raise Exception('')
if gdat.typedata != 'mock':
gdat.refr.numbelem = np.zeros(gdat.numbrefr, dtype=int)
for strgmodl in gdat.liststrgmodl:
# set up the indices of the fitting model
setp_indxpara(gdat, 'finl', strgmodl=strgmodl)
# construct the model
setp_paragenrscalbase(gdat, strgmodl=strgmodl)
gmod = getattr(gdat, strgmodl)
if strgmodl == 'true':
# transfer the true model to the reference model
#for strg, valu in gdat.true.__dict__.items():
# setattr(gdat.refr, strg, valu)
for name in ['listmrkrmiss', 'listlablmiss', 'colr', 'colrelem', 'namepara', 'nameparagenrelemampl', 'numbelem']:
setattr(gdat.refr, name, getattr(gdat.true, name))
gdat.refr.indxpoplfittassc = gdat.fitt.indxpopl
gdat.fitt.indxpoplrefrassc = gdat.fitt.indxpopl
# to be deleted
# determine total label
#for name in ['expo', 'numbpixl']:
# lablroot = getattr(gdat.lablrootpara, name)
# lablunit = getattr(gdat.lablunitpara, name)
# labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
# setattr(gdat.labltotlpara, name, labltotl)
# set the reference model to true model
# derived lens parameter minima and maxima
print('Defining minima and maxima for derived parameters...')
for strgmodl in gdat.liststrgmodl:
for e in gmod.indxsersfgrd:
strgsersfgrd = 'isf%d' % e
setp_varb(gdat, 'masshost' + strgsersfgrd + 'bein', limt=[1e7, 1e14], strgmodl=strgmodl)
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
setp_varb(gdat, 'masshost' + strgsersfgrd + strgcalcmasssubh + 'bein', limt=[1e7, 1e14], strgmodl=strgmodl)
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
setp_varb(gdat, 'masssubh' + strgsersfgrd + 'bein', limt=[1e7, 1e10], strgmodl=strgmodl)
setp_varb(gdat, 'fracsubh' + strgsersfgrd + 'bein', limt=[0., 1.], strgmodl=strgmodl)
gdat.typeelem = []
gdat.typeelemspateval = []
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
for typeelemtemp in gmod.typeelem:
if not typeelemtemp in gdat.typeelem:
gdat.typeelem.append(typeelemtemp)
for typeelemspatevaltemp in typeelemspateval:
if not typeelemspatevaltemp in gdat.typeelemspateval:
gdat.typeelemspateval.append(typeelemspatevaltemp)
for strgvarb in ['boolelempsfn']:
varbcomm = False
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
varb = getattr(gmod, strgvarb)
varbcomm = varbcomm or varb
setattr(gdat, strgvarb + 'anyy', varbcomm)
#gdat.fitt.namepara.genrelemtagg = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
#for q in gdat.indxrefr:
# for strgfeat in gdat.refr.namepara.elem[q]:
# for l in gmod.indxpopl:
# gdat.fitt.namepara.genrelemtagg[q][l].append(strgfeat + gdat.listnamerefr[q])
gdat.listnamevarbstat = ['paragenrscalfull', 'paragenrunitfull', 'indxelemfull', 'lliktotl', 'llik', 'lpritotl', 'lpri']
if gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full'):
gdat.listnamevarbstat += ['psfnconv']
if gmod.boolelemsbrtdfncanyy:
gdat.listnamevarbstat += ['sbrtdfnc']
if gmod.boolelemsbrtextsbgrdanyy:
gdat.listnamevarbstat += ['sbrtextsbgrd']
if gmod.boollens:
gdat.listnamevarbstat += ['sbrtlens']
if gmod.boollens or gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
if gmod.boollens:
gdat.listnamevarbstat += ['deflhostisf%d' % e]
if gmod.typeemishost != 'none':
gdat.listnamevarbstat += ['sbrthostisf%d' % e]
if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):
gdat.listnamevarbstat += ['sbrtmodlconv']
if gmod.boolelemdeflsubhanyy:
gdat.listnamevarbstat += ['deflsubh']
# paths
## data
gdat.pathpixlcnvt = gdat.pathdata + 'pixlcnvt/'
gdat.pathprox = gdat.pathdata + 'prox/'
## plot
gdat.pathplotrtag = gdat.pathimag + gdat.rtag + '/'
gdat.pathinit = gdat.pathplotrtag + 'init/'
gdat.pathinitintr = gdat.pathinit + 'intr/'
if gdat.boolbinsspat:
gdat.ascaglob = 0.05 / gdat.anglfact
gdat.acutglob = 1. / gdat.anglfact
gdat.cutfdefs = 3e-3 / gdat.anglfact
# plotting
gdat.lablsampdist = 'Posterior'
gdat.lablparagenrscalfull = 'Sample'
gdat.lablmlik = 'Maximum likelihood'
gdat.lablmedi = 'Median'
gdat.lablpmea = 'Mean'
gdat.lablstdv = 'Std. dev.'
# number of samples for which cumulative posterior will be calculated
gdat.numbsampcpct = 10
gdat.indxsampcpct = np.arange(gdat.numbsampcpct)
# p value contours
gdat.pvalcont = [0.317, 0.0455, 2.7e-3, 6e-5, 1.3e-6]
## number of bins in histogram plots
gdat.numbbinsplot = 20
gdat.indxbinsplot = np.arange(gdat.numbbinsplot)
## number of bins in hyperprior plots
gdat.numbbinsplotprio = 100
# temp
if gdat.typedata == 'inpt':
for l in gmod.indxpopl:
for strgpdfn in gmod.listscalparagenrelem[l]:
if strgpdfn.startswith('gaum') and gmod.lgalprio is None and gmod.bgalprio is None:
raise Exception('If typespatdist is "gaus", spatial coordinates of the prior catalog should be provided via lgalprio and bgalprio.')
# temp -- have these definitions separate for all features
# feature plotting factors and scalings
gdat.dictglob = {}
gdat.listnamechro = ['totl', 'prop', 'diag', 'save', 'plot', 'proc', 'pars', 'modl', 'llik', 'sbrtmodl']
gdat.listlablchro = ['Total', 'Proposal', 'Diagnostics', 'Save', 'Plot', 'Process', 'Parse', 'Model', 'Likelihood', 'Total emission']
if gmod.numbparaelem > 0:
gdat.listnamechro += ['spec']
gdat.listlablchro += ['Spectrum calculation']
if gmod.boollens:
gdat.listnamechro += ['deflzero', 'deflhost', 'deflextr', 'sbrtlens', 'sbrthost']
gdat.listlablchro += ['Array initialization', 'Host Deflection', 'External deflection', 'Lensed emission', 'Host emission']
if gmod.boolelemsbrtdfncanyy:
gdat.listnamechro += ['elemsbrtdfnc']
gdat.listlablchro += ['Dfnc S Brght']
if gmod.boolelemdeflsubhanyy:
gdat.listnamechro += ['elemdeflsubh']
gdat.listlablchro += ['Subh Defl']
if gmod.boolelemsbrtextsbgrdanyy:
gdat.listnamechro += ['elemsbrtextsbgrd']
gdat.listlablchro += ['Bkg Exts S Brght']
booltemp = False
for strgmodl in gdat.liststrgmodl:
booltemp = booltemp or gmod.typeevalpsfn
if booltemp or gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'full':
gdat.listnamechro += ['psfnconv']
gdat.listlablchro += ['Img for PSF Conv.']
gdat.listnamechro += ['expo', 'lpri', 'tert']
gdat.listlablchro += ['Exposure', 'Prior', 'Tertiary']
gdat.numbchro = len(gdat.listnamechro)
if gdat.typedata != 'mock':
if gmod.boolelemlghtanyy and gdat.typeexpr == 'ferm' and gdat.maxmgangdata == 20. / gdat.anglfact:
path = gdat.pathinpt + 'sbrt0018.png'
gdat.sbrt0018 = sp.ndimage.imread(path, flatten=True)
gdat.sbrt0018 -= np.amin(gdat.sbrt0018)
gdat.sbrt0018 /= np.amax(gdat.sbrt0018)
binslgaltemp = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.sbrt0018.shape[1])
binsbgaltemp = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.sbrt0018.shape[0])
gdat.sbrt0018objt = sp.interpolate.RectBivariateSpline(binsbgaltemp, binslgaltemp, gdat.sbrt0018)
# log-prior register
## indices of split and merge term
indxlprispme = -1
## number of elements
numb = 0
for l in gmod.indxpopl:
numb += len(gmod.namepara.genrelem[l])
# process index
gdat.indxproc = np.arange(gdat.numbproc)
if gmod.boollens or gdat.typedata == 'mock' and gmod.boollens:
retr_axis(gdat, 'mcut')
retr_axis(gdat, 'bein')
# angular deviation
gdat.numbanglhalf = 10
gdat.indxanglhalf = np.arange(gdat.numbanglhalf)
retr_axis(gdat, 'anglhalf')
gdat.numbanglfull = 1000
gdat.indxanglfull = np.arange(gdat.numbanglfull)
gdat.minmpara.anglfull = 0.
gdat.maxmpara.anglfull = 3. * gdat.maxmgangdata
retr_axis(gdat, 'anglfull')
# temp
#gdat.binspara.anglcosi = np.sort(np.cos(gdat.binspara.angl))
# temp
#gdat.meshbackener = np.meshgrid(gdat.gmod.indxback, gdat.indxener, indexing='ij')
# plotting
## the normalized offset for text annotation of point sources in the frames
gdat.offstextimag = gdat.maxmgangdata * 0.05
## figure size
gdat.plotsize = 6
## size of the images
gdat.sizeimag = 1.3 * gdat.plotsize
## label of the models
gdat.fitt.lablmodl = 'Model'
if gdat.typedata == 'mock':
gdat.refr.lablmodl = 'True'
else:
gdat.refr.lablmodl = 'Ref'
# element parameters common between the fitting and reference models
gdat.namepara.elemcomm = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
for strgfeat in gmod.listnameparatotlelem[l]:
if strgfeat in gdat.refr.namepara.elem[q]:
gdat.namepara.elemcomm[q][l].append(strgfeat)
if gdat.typedata == 'mock':
gdat.refr.indxpopl = gdat.true.indxpopl
gdat.refr.lablpopl = gdat.true.lablpopl
for strgmodl in ['refr', 'fitt']:
gmod = getattr(gdat, strgmodl)
print('strgmodl')
print(strgmodl)
# labels of elements
lablelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
lablelem[l] = gmod.lablmodl + ' ' + gmod.lablpopl[l]
setp_varb(gdat, 'lablelem', valu=lablelem, strgmodl=strgmodl)
lablelemmiss = [[] for l in gmod.indxpopl]
lablelemhits = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
lablelemmiss[l] = gmod.lablelem[l] + ' miss'
lablelemhits[l] = gmod.lablelem[l] + ' hit'
setp_varb(gdat, 'lablelemmiss', valu=lablelemmiss, strgmodl=strgmodl)
setp_varb(gdat, 'lablelemhits', valu=lablelemhits, strgmodl=strgmodl)
lablhost = gmod.lablmodl + ' host'
setp_varb(gdat, 'lablhost', valu=lablhost, strgmodl=strgmodl)
lablsour = gmod.lablmodl + ' sour'
setp_varb(gdat, 'lablsour', valu=lablsour, strgmodl=strgmodl)
## PSF class indices for which images will be plotted
if gdat.numbevtt == 1:
gdat.indxevttplot = gdat.indxevtt
else:
gdat.indxevttplot = np.concatenate((np.array([-1]), gdat.indxevtt))
gdat.numbenerevtt = gdat.numbener * gdat.numbevtt
# temp
gdat.boolintpanglcosi = False
if gdat.boolthindata:
gdat.factdatathin = 10
if gdat.typepixl != 'cart' or gdat.numbsidecart % gdat.factdatathin != 0:
raise Exception('Cannot thin the data.')
#gdat.indxpixlkeep = gdat.indxpixlfull[::gdat.factdatathin]
#gdat.numbpixlkeep = gdat.indxpixlkeep.size
gdat.indxpixlkill = np.setdiff1d(gdat.indxpixlfull, gdat.indxpixlkeep)
gdat.numbsidecart = gdat.numbsidecart / 10
gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)
gdat.lgalgrid = gdat.lgalgrid[gdat.indxpixlkeep]
gdat.bgalgrid = gdat.bgalgrid[gdat.indxpixlkeep]
gdat.indxpixlfull = gdat.indxpixlfull[gdat.indxpixlkeep]
# the function to measure time
# temp
gdat.strgfunctime = 'clck'
if gdat.strgfunctime == 'clck':
gdat.functime = time.clock
if gdat.strgfunctime == 'time':
gdat.functime = time.time
## longitude
gdat.numblgalpntsprob = gdat.numbsidepntsprob
gdat.numbbgalpntsprob = gdat.numbsidepntsprob
gdat.binspara.lgalpntsprob = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.numbsidepntsprob + 1)
gdat.binspara.bgalpntsprob = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.numbsidepntsprob + 1)
gdat.indxlgalpntsprob = np.arange(gdat.numblgalpntsprob)
gdat.indxbgalpntsprob = np.arange(gdat.numbbgalpntsprob)
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
if gmod.boollens or gdat.typedata == 'mock' and gmod.boollens:
retr_axis(gdat, 'defl')
retr_axis(gdat, 'deflsubh')
# lensing problem setup
## number of deflection components to plot
gdat.binspara.lgalcartmesh, gdat.binspara.bgalcartmesh = np.meshgrid(gdat.binspara.lgalcart, gdat.binspara.bgalcart, indexing='ij')
gdat.meanpara.lgalcartmesh, gdat.meanpara.bgalcartmesh = np.meshgrid(gdat.meanpara.lgalcart, gdat.meanpara.bgalcart, indexing='ij')
if gdat.typepixl == 'cart':
gdat.sizepixl = np.sqrt(gdat.apix)
gdat.indxsidecart = np.arange(gdat.numbsidecart)
gdat.indxpixlrofi = np.arange(gdat.numbpixlcart)
gdat.indxsidemesh = np.meshgrid(gdat.indxsidecart, gdat.indxsidecart, indexing='ij')
gdat.lgalgrid = gdat.meanpara.lgalcart[gdat.indxsidemesh[0].flatten()]
gdat.bgalgrid = gdat.meanpara.bgalcart[gdat.indxsidemesh[1].flatten()]
gdat.shapcart = (gdat.numbsidecart, gdat.numbsidecart)
gdat.lgalgridfull = np.copy(gdat.lgalgrid)
gdat.bgalgridfull = np.copy(gdat.bgalgrid)
gdat.lgalgridcart = gdat.lgalgrid.reshape(gdat.shapcart)
gdat.bgalgridcart = gdat.bgalgrid.reshape(gdat.shapcart)
gdat.indxpent = np.meshgrid(gdat.indxener, gdat.indxsidecart, gdat.indxsidecart, gdat.indxevtt, indexing='ij')
if gdat.typepixl == 'heal':
lgalheal, bgalheal, gdat.numbpixlfull, gdat.apix = tdpy.retr_healgrid(gdat.numbsideheal)
lgalheal = np.deg2rad(lgalheal)
bgalheal = np.deg2rad(bgalheal)
gdat.indxpixlrofi = np.where((np.fabs(lgalheal) < gdat.maxmgangdata) & (np.fabs(bgalheal) < gdat.maxmgangdata))[0]
gdat.indxpixlrofimarg = np.where((np.fabs(lgalheal) < 1.2 * gdat.maxmgangdata) & (np.fabs(bgalheal) < 1.2 * gdat.maxmgangdata))[0]
gdat.lgalgrid = lgalheal
gdat.bgalgrid = bgalheal
gdat.indxpixlfull = np.arange(gdat.numbpixlfull)
if gdat.typepixl == 'cart':
gdat.indxpixlcart = np.arange(gdat.numbpixlcart)
if gdat.evttbins:
# PSF class string
gdat.strgevtt = []
for m in gdat.indxevtt:
gdat.strgevtt.append('PSF%d' % gdat.indxevttincl[m])
# power spectra
if gdat.typepixl == 'cart':
setp_varb(gdat, 'anglodim', minm=0., maxm=1., boolinvr=True)
setp_varb(gdat, 'mpolodim', minm=0., maxm=1.)
#retr_axis(gdat, 'anglodim', boolinvr=True)
#retr_axis(gdat, 'mpolodim')
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
gdat.numbwvecodim = gdat.numbsidecart
gdat.minmanglodim = 0.
gdat.maxmanglodim = 2. * gdat.maxmgangdata
gdat.minmmpolodim = 0.
gdat.maxmmpolodim = 1. / 2. / gdat.sizepixl
if gmod.boollens or gdat.typedata == 'mock' and gmod.boollens:
# temp -- this should minima, maxima of adishost and the true metamodel into account
gdat.minmwvecodim = gdat.minmmpolodim / np.amax(gmod.adishost)
gdat.maxmwvecodim = gdat.maxmmpolodim / np.amin(gmod.adishost)
gdat.minmwlenodim = gdat.minmanglodim * np.amin(gmod.adishost)
gdat.maxmwlenodim = gdat.maxmanglodim * np.amax(gmod.adishost)
retr_axis(gdat, 'wvecodim', strgmodl=strgmodl)
retr_axis(gdat, 'wlenodim', strgmodl=strgmodl, boolinvr=True)
gdat.meanpara.wveclgal, gdat.meanpara.wvecbgal = np.meshgrid(gdat.meanpara.wvecodim, gdat.meanpara.wvecodim, indexing='ij')
gdat.meanpara.wvec = np.sqrt(gdat.meanpara.wveclgal**2 + gdat.meanpara.wvecbgal**2)
gdat.meanpara.mpollgal, gdat.meanpara.mpolbgal = np.meshgrid(gdat.meanpara.mpolodim, gdat.meanpara.mpolodim, indexing='ij')
gdat.meanpara.mpol = np.sqrt(gdat.meanpara.mpollgal**2 + gdat.meanpara.mpolbgal**2)
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
# element parameter vector indices
gmod.indxparagenrelemlgal = 0
gmod.indxparagenrelembgal = 1
gmod.indxparagenrelemflux = 2
gmod.indxparagenrelemsind = 3
gmod.indxparagenrelemcurv = 4
gmod.indxparagenrelemexpc = 4
# check the exposure map data structure
if gdat.boolcorrexpo:
booltemp = False
if gdat.expo.ndim != 3:
booltemp = True
if gdat.typepixl == 'cart' and gdat.expo.shape[1] != gdat.numbpixlcart:
booltemp = True
if booltemp:
raise Exception('Exposure does not have the right data structure. It should be a list of 3D np.arrays.')
if gdat.boolsqzeexpo:
gdat.expo *= 1e-10
if gdat.boolexplexpo:
gdat.expo *= 1e10
if gdat.boolthindata:
#gdat.expo[:, gdat.indxpixlkill, :] = 0.
expotemp = np.copy(gdat.expo[:, gdat.indxpixlfull[::gdat.factdatathin], :])
sbrttemp = np.copy(gdat.sbrtdata[:, gdat.indxpixlfull[::gdat.factdatathin], :])
gdat.expo = expotemp
gdat.sbrtdata = sbrttemp
# only include desired energy and PSF class bins
gdat.indxcubeincl = np.meshgrid(gdat.indxenerincl, gdat.indxpixlfull, gdat.indxevttincl, indexing='ij')
## exposure
if gdat.boolcorrexpo:
# temp -- for some reason lists of np.arrays require manual processing
gdat.expo = gdat.expo[tuple(gdat.indxcubeincl)]
if gdat.typedata == 'inpt':
gdat.sbrtdata = gdat.sbrtdata[tuple(gdat.indxcubeincl)]
## backgrounds
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
gmod.sbrtbacknormincl = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.sbrtbacknormincl[c] = gmod.sbrtbacknorm[c][tuple(gdat.indxcubeincl)]
# obtain cartesian versions of the maps
#if gdat.typepixl == 'cart':
# gdat.expocart = gdat.expo.reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt))
# for strgmodl in gdat.liststrgmodl:
# gmod.sbrtbacknormcart = []
# for c in getattr(gmod, 'gmod.indxback'):
# gmod.sbrtbacknormcart.append(gmod.sbrtbacknorm[c].reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt)))
# mask the exposure map
if gdat.listmask is not None:
for mask in gdat.listmask:
if mask[0] == 'sqre':
indxpixlmask = np.where((gdat.lgalgrid > mask[1]) & (gdat.lgalgrid < mask[2]) & (gdat.bgalgrid > mask[3]) & (gdat.bgalgrid < mask[4]))[0]
if mask[0] == 'circ':
indxpixlmask = np.where(np.sqrt((gdat.lgalgrid - mask[1])**2 + (gdat.bgalgrid - mask[2])**2) < mask[3])[0]
if mask[0] == 'hstr':
indxpixlmask = np.where((gdat.bgalgrid > mask[1]) & (gdat.bgalgrid < mask[2]))[0]
if gdat.typemaskexpo == 'zero':
gdat.expo[:, indxpixlmask, :] = 0.
if gdat.typemaskexpo == 'ignr':
gdat.expo[:, indxpixlmask, :] = 1e-49
# plotting
## ROI
if gdat.boolbinsspat:
gdat.exttrofi = np.array([gdat.minmlgaldata, gdat.maxmlgaldata, gdat.minmbgaldata, gdat.maxmbgaldata])
gdat.exttrofi *= gdat.anglfact
gdat.frambndrdata = gdat.maxmgangdata * gdat.anglfact
## marker size
gdat.minmmrkrsize = 100
gdat.maxmmrkrsize = 500
## marker line width
gdat.mrkrlinewdth = 3
## marker opacity
gdat.alphhist = 0.5
gdat.alphline = 0.5
gdat.alphbndr = 0.5
gdat.alphelem = 1.
gdat.alphmaps = 1.
# number of colorbar ticks in the maps
gdat.numbtickcbar = 11
## color bars
gdat.minmlpdfspatpriointp = np.log(1. / 2. / gdat.maxmgangdata) - 10.
gdat.maxmlpdfspatpriointp = np.log(1. / 2. / gdat.maxmgangdata) + 10.
gmod.scallpdfspatpriointp = 'self'
gdat.cmaplpdfspatpriointp = 'PuBu'
gdat.minmllikmaps = -10.
gdat.maxmllikmaps = 0.
gmod.scalllikmaps = 'asnh'
gdat.cmapllikmaps = 'YlGn'
gdat.minmperc = 0.
gdat.maxmperc = 1e2
gdat.scalperc = 'asnh'
gdat.cmapperc = 'afmhot'
gdat.minmpercresi = -1e2
gdat.maxmpercresi = 1e2
gdat.scalpercresi = 'asnh'
gdat.cmappercresi = 'coolwarm'
gdat.scalpara.cntpdata = 'logt'
gdat.cmappara.cntpdata = 'Greys'
gdat.scalpara.cntpmodl = 'logt'
gdat.cmappara.cntpmodl = 'Greys'
gdat.scalpara.cntpresi = 'asnh'
gdat.cmappara.cntpresi = make_cmapdivg('Red', 'Orange')
gdat.minmconv = 1e-2
gdat.maxmconv = 10.
gdat.scalconv = 'logt'
gdat.cmapconv = 'Purples'
gdat.minmconvelem = 1e-4
gdat.maxmconvelem = 1e-1
gdat.scalconvelem = 'logt'
gdat.cmapconvelem = 'Purples'
gdat.minms2nr = 0.
gdat.maxms2nr = 10.
gmod.scals2nr = 'asnh'
gdat.cmaps2nr = 'magma'
gdat.minmmagn = -1e2
gdat.maxmmagn = 1e2
gmod.scalmagn = 'asnh'
gdat.cmapmagn = 'BrBG'
gdat.minmdeflresiperc = -100.
gdat.maxmdeflresiperc = 100.
gmod.scaldeflresiperc = 'self'
gdat.cmapdeflresiperc = 'Oranges'
gdat.minmconvelemresi = -0.1
gdat.maxmconvelemresi = 0.1
gmod.scalconvelemresi = 'self'
gdat.cmapconvelemresi = 'PiYG'
gdat.minmconvelemresiperc = -100.
gdat.maxmconvelemresiperc = 100.
gmod.scalconvelemresiperc = 'self'
gdat.cmapconvelemresiperc = 'PiYG'
gdat.minmmagnresi = -10.
gdat.maxmmagnresi = 10.
gmod.scalmagnresi = 'self'
gdat.cmapmagnresi = 'PRGn'
gdat.minmmagnresiperc = -100.
gdat.maxmmagnresiperc = 100.
gmod.scalmagnresiperc = 'self'
gdat.cmapmagnresiperc = 'PRGn'
gdat.lgalgrid = gdat.lgalgrid[gdat.indxpixlrofi]
gdat.bgalgrid = gdat.bgalgrid[gdat.indxpixlrofi]
if gdat.boolcorrexpo:
if np.amax(gdat.expo) <= 0.:
raise Exception('Bad exposure.')
# temp
#gdat.expo[np.where(gdat.expo < 1e-50)] = 1e-50
# exclude voxels with vanishing exposure
if gdat.boolcorrexpo:
for i in gdat.indxener:
for m in gdat.indxevtt:
gdat.indxpixlrofi = np.intersect1d(gdat.indxpixlrofi, np.where(gdat.expo[i, :, m] > 0.)[0])
gdat.indxcuberofi = np.meshgrid(gdat.indxener, gdat.indxpixlrofi, gdat.indxevtt, indexing='ij')
gdat.numbpixl = gdat.indxpixlrofi.size
gdat.indxpixl = np.arange(gdat.numbpixl)
gdat.numbdata = gdat.numbener * gdat.numbevtt * gdat.numbpixl
#gdat.lgalgridrofi = gdat.lgalgrid[gdat.indxpixlrofi]
#gdat.bgalgridrofi = gdat.bgalgrid[gdat.indxpixlrofi]
if gdat.typedata == 'inpt':
gdat.sbrtdata = gdat.sbrtdata[tuple(gdat.indxcuberofi)]
## exposure
if gdat.boolcorrexpo:
gdat.expofull = np.copy(gdat.expo)
gdat.expo = gdat.expo[tuple(gdat.indxcuberofi)]
gdat.minmpara.expo = np.amin(gdat.expo[np.where(gdat.expo > 1e-100)])
gdat.maxmpara.expo = np.amax(gdat.expo)
gdat.minmpara.expo = np.amin(gdat.minmpara.expo)
gdat.maxmpara.expo = np.amax(gdat.maxmpara.expo)
# required to convert to an index of non-zero exposure pixels
#if gdat.minmpara.expo > 0:
# gdat.indxpixlroficnvt = np.arange(gdat.numbpixlfull)
#else:
# cntr = 0
# gdat.indxpixlroficnvt = full(gdat.numbpixlfull, -1)
# for j in gdat.indxpixlfull:
# if j in gdat.indxpixlrofi:
# gdat.indxpixlroficnvt[j] = cntr
# cntr += 1
#
## backgrounds
for strgmodl in gdat.liststrgmodl:
if gdat.typepixl == 'heal':
sbrtbackhealfull = [[] for c in gmod.indxback]
for c in gmod.indxback:
sbrtbackhealfull[c] = np.copy(gmod.sbrtbacknorm[c])
gmod.sbrtbacknormincl = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.sbrtbacknormincl[c] = gmod.sbrtbacknorm[c][tuple(gdat.indxcuberofi)]
if gdat.boolcorrexpo:
gdat.expototl = []
gdat.expototlmean = []
gdat.expototl = np.sum(gdat.expo, axis=2)
gdat.expototlmean = np.mean(gdat.expototl, axis=1)
if gdat.typeelemspateval == 'locl':
if gdat.typeexpr == 'gene':
gdat.maxmangl = 1.
if gdat.typeexpr == 'ferm':
gdat.maxmangl = 20. / gdat.anglfact
if gdat.typeexpr == 'tess':
gdat.maxmangl = 25. / gdat.anglfact
if gdat.typeexpr == 'chan':
gdat.maxmangl = 15. / gdat.anglfact
if gdat.typeexpr == 'hubb':
gdat.maxmangl = 1. / gdat.anglfact
else:
gdat.maxmangl = gdat.maxmgangdata * np.sqrt(2.) * 2. * 1.1
gdat.listnamespatmean = ['full']
if gdat.typeexpr == 'ferm':
gdat.listnamespatmean += ['innr']
gdat.numbspatmean = len(gdat.listnamespatmean)
gdat.indxspatmean = np.arange(gdat.numbspatmean)
gdat.listindxcubespatmean = [[] for b in gdat.indxspatmean]
gdat.indxcube = np.meshgrid(gdat.indxener, gdat.indxpixl, gdat.indxevtt, indexing='ij')
for b, namespatmean in enumerate(gdat.listnamespatmean):
if namespatmean == 'full':
gdat.listindxcubespatmean[b] = gdat.indxcube
if namespatmean == 'innr':
gdat.indxpixlinnr = np.where(np.sqrt(gdat.lgalgrid**2 + gdat.bgalgrid**2) < 5. / gdat.anglfact)[0]
gdat.listindxcubespatmean[b] = np.meshgrid(gdat.indxener, gdat.indxpixlinnr, gdat.indxevtt, indexing='ij')
if gdat.numbpixl > 1:
# store pixels as unit vectors
gdat.xdatgrid, gdat.ydatgrid, gdat.zaxigrid = retr_unit(gdat.lgalgrid, gdat.bgalgrid)
# construct a lookup table for converting HealPix pixels to ROI pixels
if gdat.typepixl == 'heal':
path = gdat.pathpixlcnvt + 'pixlcnvt_%09g.p' % gdat.maxmgangdata
if os.path.isfile(path):
fobj = open(path, 'rb')
gdat.pixlcnvt = pickle.load(fobj)
fobj.close()
else:
gdat.pixlcnvt = np.zeros(gdat.numbpixlfull, dtype=int) - 1
numbpixlmarg = gdat.indxpixlrofimarg.size
for k in range(numbpixlmarg):
dist = retr_angldistunit(gdat, lgalheal[gdat.indxpixlrofimarg[k]], bgalheal[gdat.indxpixlrofimarg[k]], gdat.indxpixl)
gdat.pixlcnvt[gdat.indxpixlrofimarg[k]] = argmin(dist)
fobj = open(path, 'wb')
pickle.dump(gdat.pixlcnvt, fobj, protocol=pickle.HIGHEST_PROTOCOL)
fobj.close()
# dummy pixel indices for full (nonlocal) element kernel evaluation
gdat.listindxpixl = []
if gdat.typedata == 'mock':
numb = max(np.sum(gmod.maxmpara.numbelem), np.sum(gmod.maxmpara.numbelem)) + 2
else:
numb = np.sum(gmod.maxmpara.numbelem) + 2
for k in range(int(numb)):
gdat.listindxpixl.append([])
for kk in range(k):
gdat.listindxpixl[k].append(gdat.indxpixl)
# spatial averaging setup
# temp
# temp -- check if 1000 is too much
gdat.numbanglelem = 1000
# turn off relevant proposal types
gdat.numbprop = 5
gdat.indxprop = np.arange(gdat.numbprop)
gdat.numbstdp = gmod.numbparagenrbase - gmod.numbpopl
cntr = 0
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
setattr(gdat.fitt.inxparagenrscalelemkind, nameparagenrelem + 'pop%d' % l, gdat.numbstdp)
cntr += 1
gdat.numbstdp += cntr
gdat.lablstdp = np.copy(np.array(gmod.labltotlpara.genrbase[gmod.numbpopl:]))
gdat.namestdp = np.copy(np.array(gmod.nameparagenrbase[gmod.numbpopl:]))
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gdat.lablstdp = np.append(gdat.lablstdp, getattr(gdat.fitt.labltotlpara, nameparagenrelem))
gdat.namestdp = np.append(gdat.namestdp, nameparagenrelem + 'pop%d' % l)
gdat.namestdp = gdat.namestdp.astype(object)
gdat.lablstdp = list(gdat.lablstdp)
gdat.indxstdp = np.arange(gdat.numbstdp)
gdat.indxstdpprop = gdat.indxstdp
# proposal scale indices for each parameter
indxelemfull = [list(range(gmod.maxmpara.numbelem[l])) for l in gmod.indxpopl]
gdat.fitt.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, indxelemfull, 'fitt')
gdat.indxstdppara = np.zeros(gmod.numbparagenrfull, dtype=int) - 1
cntr = 0
gdat.indxstdppara[gmod.numbpopl:gmod.numbparagenrbase] = gmod.indxparagenrbase[gmod.numbpopl:] - gmod.numbpopl
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
for indx in gdat.fitt.this.indxparagenrfullelem[l][nameparagenrelem]:
gdat.indxstdppara[indx] = cntr + gmod.numbparagenrbase - gmod.numbpopl
cntr += 1
# for the fitting model, define proposal type indices
for name, valu in gmod.indxpara.__dict__.items():
if not name.startswith('numbelem') and name != 'dist':
if not isinstance(valu, int):
continue
indxstdp = gdat.indxstdppara[valu]
setattr(gdat, 'indxstdp' + name, indxstdp)
# for each parameter in the fitting model, determine if there is a corresponding parameter in the generative model
gmod.corr = tdpy.gdatstrt()
for k in gmod.indxvarbscal:
name = gmod.namepara.scal[k]
try:
temp = getattr(gdat.true.this, name)
except:
temp = None
setattr(gmod.corr, name, temp)
gmod.corrparagenrscalbase = np.empty(gmod.numbparagenrbase)
for k in gmod.indxparagenrbase:
try:
gmod.corrparagenrscalbase[k] = getattr(gdat.true, gmod.nameparagenrbase[k])
except:
gmod.corrparagenrscalbase[k] = None
for namepara in gdat.fitt.listnameparaglob:
setattr(gdat.labltotlpara, namepara, getattr(gdat.fitt.labltotlpara, namepara))
# set parameter features common between true and fitting models
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
for namepara in gmod.namepara.kind:
try:
getattr(gdat.minmpara, namepara)
getattr(gdat.maxmpara, namepara)
except:
try:
setattr(gdat.minmpara, namepara, min(getattr(gdat.fitt.minmpara, namepara), getattr(gdat.true.minmpara, namepara)))
setattr(gdat.maxmpara, namepara, max(getattr(gdat.fitt.maxmpara, namepara), getattr(gdat.true.maxmpara, namepara)))
except:
try:
setattr(gdat.minmpara, namepara, getattr(gdat.fitt.minmpara, namepara))
setattr(gdat.maxmpara, namepara, getattr(gdat.fitt.maxmpara, namepara))
except:
setattr(gdat.minmpara, namepara, getattr(gdat.true.minmpara, namepara))
setattr(gdat.maxmpara, namepara, getattr(gdat.true.minmpara, namepara))
# set plot limits for each model if not already set (for Gaussian, log-normal distributions)
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
for namepara in gmod.namepara.kind:
minm = getattr(gmod.minmpara, namepara)
maxm = getattr(gmod.maxmpara, namepara)
limt = np.array([minm, maxm])
setattr(gmod.limtpara, namepara, limt)
# construct bins for scalar parameters
for namevarbscal in gmod.namepara.scal:
# variables with only label and scaling
if namevarbscal == 'lliktotl' or namevarbscal == 'lpripena':
continue
print('temp -- place here setp_varb for all variables')
#retr_axis(gdat, namevarbscal)
gmod = gdat.fitt
# proposal scale
if gmod.boollens or gdat.typedata == 'mock':
gdat.stdp = 1e-4 + np.zeros(gdat.numbstdp)
if gmod.typemodltran == 'pois' and gmod.numbpopl > 0:
if gmod.maxmpara.numbelem[0] > 0:
gdat.stdp[gdat.indxstdpmeanelempop0] = 1e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sigcen00evt0]] = 3e-2
gdat.stdp[gdat.indxstdppara[gmod.indxpara.bacpback0000en00]] = 1e-3
gdat.stdp[gdat.indxstdppara[gmod.indxpara.bacpback0000en00]] = 1e-1
if gmod.boollens:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.lgalsour]] = 1e-3
gdat.stdp[gdat.indxstdppara[gmod.indxpara.bgalsour]] = 1e-3
gdat.stdp[gdat.indxstdppara[gmod.indxpara.fluxsour]] = 1e-2
if gdat.numbener > 1:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sindsour]] = 1e-3
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sizesour]] = 1e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.ellpsour]] = 1e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.anglsour]] = 1e-1
if gmod.typeemishost != 'none':
gdat.stdp[gdat.indxstdppara[gmod.indxpara.lgalhostisf0]] = 3e-4
gdat.stdp[gdat.indxstdppara[gmod.indxpara.bgalhostisf0]] = 3e-4
gdat.stdp[gdat.indxstdppara[gmod.indxpara.fluxhostisf0]] = 1e-3
if gdat.numbener > 1:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sindhostisf0]] = 1e-3
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sizehostisf0]] = 3e-3
if gmod.boollens:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.beinhostisf0]] = 1e-3
if gmod.typeemishost != 'none':
gdat.stdp[gdat.indxstdppara[gmod.indxpara.ellphostisf0]] = 1e-2
gdat.stdp[gdat.indxstdppara[gmod.indxpara.anglhostisf0]] = 1e-2
gdat.stdp[gdat.indxstdppara[gmod.indxpara.serihostisf0]] = 1e-2
if gmod.boollens:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sherextr]] = 1e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sangextr]] = 3e-2
else:
if gdat.typeexpr == 'ferm':
gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)
if gmod.typemodltran == 'pois' and gmod.numbparaelem > 0:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.meanelem]] = 4e-2
for l in gmod.indxpopl:
if gmod.typeprioflux[l] == 'powr':
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sloppriofluxpop0]] = 1e-1
else:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.brekpriofluxpop0]] = 1e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sloplowrpriofluxpop0]] = 1e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.slopupprpriofluxpop0]] = 1e-1
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 5e-3
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en01')]] = 1e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en02')]] = 3e-2
if 'fdfm' in gmod.listnameback:
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0001en00')]] = 8e-4
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0001en01')]] = 1e-3
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0001en02')]] = 2e-3
if 'dark' in gmod.listnameback:
gmod.indxbackdark = gmod.listnameback.index('dark')
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback%04den00' % gmod.indxbackdark)]] = 2e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback%04den01' % gmod.indxbackdark)]] = 2e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback%04den02' % gmod.indxbackdark)]] = 3e-2
if gmod.numbparaelem > 0:
gdat.stdp[gdat.indxstdppop0flux] = 8e-2
if gmod.spectype[0] == 'colr':
gdat.stdp[gdat.indxstdppop0sindcolr0001] = 8e-2
gdat.stdp[gdat.indxstdppop0sindcolr0002] = 2e-1
if gdat.typeexpr == 'chan':
gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)
if gmod.typemodltran == 'pois' and gmod.numbparaelem > 0:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.meanelem]] = 2e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.sloppriofluxpop0]] = 2e-1
if gmod.numbparaelem > 0 and gdat.boolbinsspat:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.psfp]] = 4e-1
if gdat.indxenerincl.size == 5 and (gdat.indxenerincl == np.arange(5)).all():
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 2e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en01')]] = 3e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en02')]] = 2e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en03')]] = 2e-2
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en04')]] = 1e-2
elif gdat.indxenerincl.size == 2 and (gdat.indxenerincl == np.array([2])).all():
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 2e-2
if gmod.numbparaelem > 0:
if gdat.boolbinsspat:
gdat.stdp[gdat.fitt.inxparagenrscalelemkind.lgalpop0] = 2e-2
gdat.stdp[gdat.fitt.inxparagenrscalelemkind.bgalpop0] = 2e-2
if gdat.numbener > 1:
gdat.stdp[gdat.indxstdppop0sind] = 2e-1
gdat.stdp[gdat.indxstdppop0flux] = 2e-1
if gdat.typeexpr == 'gene':
gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)
if gmod.typemodltran == 'pois' and gmod.numbparaelem > 0:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.meanelem]] = 2e-1
gdat.stdp[gdat.indxstdppara[gmod.indxpara.slopprionobjpop0]] = 3e-1
try:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.gwdtsloppop0]] = 3e-1
except:
pass
if gmod.typeevalpsfn != 'none' and gdat.boolmodipsfn:
gdat.stdp[gdat.indxstdppara[gmod.indxpara.psfp]] = 4e-1
gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 2e-2
if gmod.numbparaelem > 0:
gdat.stdp[gdat.fitt.inxparagenrscalelemkind.lgalpop0] = 4e-2
gdat.stdp[gdat.fitt.inxparagenrscalelemkind.bgalpop0] = 4e-2
gdat.stdp[gdat.fitt.inxparagenrscalelemkind.nobjpop0] = 3e-1
try:
gdat.stdp[gdat.indxstdppop0gwdt] = 5e-1
except:
pass
if gdat.typeexpr == 'fire':
gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)
if gdat.boolsqzeprop:
gdat.stdp[:]= 1e-100
if gdat.boolexplprop:
gdat.stdp[:] = 1e100
if (gdat.stdp > 1e100).any():
raise Exception('')
if (gdat.stdp == 0).any():
raise Exception('')
if gdat.stdp.size != gdat.numbstdp or gdat.indxstdp.size != gdat.stdp.size:
print('gdat.stdp')
summgene(gdat.stdp)
print('gdat.numbstdp')
print(gdat.numbstdp)
print('gdat.indxstdp')
print(gdat.indxstdp)
raise Exception('')
if gdat.typeverb > 1:
# temp
for strgmodl in gdat.liststrgmodl:
print('strgmodl')
print(strgmodl)
print('Fixed dimensional parameters:')
print('%20s%25s%5s%20s%20s' % ('name', 'labltotl', 'scal', 'minm', 'maxm'))
for k in gmod.indxparagenrbase:
print('%20s%25s%5s%20.6g%20.6g' % (gmod.nameparagenrbase[k], gmod.labltotlpara.genrbase[k], gmod.scalpara.genrbase[k], \
gmod.minmpara.genrbase[k], gmod.maxmpara.genrbase[k]))
print('Element parameters')
print('%20s%20s' % ('nameparagenrelem', 'scalcomp'))
for l in gmod.indxpopl:
for nameparagenrelem, scalcomp in zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l]):
print('%20s%20s' % (nameparagenrelem, scalcomp))
print('%20s%20s' % ('strgmodu', 'pdfnmodu'))
for l in gmod.indxpopl:
for strgmodu, pdfnmodu in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):
print('%20s%20s' % (strgmodu, pdfnmodu))
print('%20s%20s' % ('strgfeat', 'pdfnprio'))
for l in gmod.indxpopl:
for strgfeat, pdfnprio in zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l]):
print('%20s%20s' % (strgfeat, pdfnprio))
# proposals
# terms in the log-acceptance probability
gdat.listnametermlacp = []
gdat.listlabltermlacp = []
for l in gmod.indxpopl:
if gmod.numbpopl > 1:
strgpopl = '%d,' % l
else:
strgpopl = ''
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
labl = getattr(gmod.lablrootpara, nameparagenrelem)
gdat.listlabltermlacp += ['$u_{%s%s}$' % (strgpopl, labl)]
gdat.listnametermlacp += ['ltrp']
gdat.listlabltermlacp += [u'$\ln P(q)$']
gdat.listnametermlacp += ['ljcb']
gdat.listlabltermlacp += [r'$\ln \alpha_j$']
gdat.numbtermlacp = len(gdat.listnametermlacp)
gdat.indxtermlacp = np.arange(gdat.numbtermlacp)
if gdat.probtran is None:
if gmod.numbparaelem > 0:
gdat.probtran = 0.4
else:
gdat.probtran = 0.
if gdat.probspmr is None:
if gmod.numbparaelem > 0:
gdat.probspmr = gdat.probtran / 2.
else:
gdat.probspmr = 0.
gdat.probbrde = 1. - gdat.probspmr
if gdat.probbrde < 0:
raise Exception('')
gdat.lablproptype = ['Within']
gdat.nameproptype = ['with']
if gmod.numbparaelem > 0:
gdat.lablproptype += ['Birth', 'Death', 'Split', 'Merge']
gdat.nameproptype += ['brth', 'deth', 'splt', 'merg']
gdat.numbproptype = len(gdat.lablproptype)
gdat.nameproptype = np.array(gdat.nameproptype)
cntr = tdpy.cntr()
if gmod.numbparaelem > 0.:
# birth
gdat.indxproptypebrth = cntr.incr()
# death
gdat.indxproptypedeth = cntr.incr()
if gdat.probspmr > 0.:
# split
gdat.indxproptypesplt = cntr.incr()
# merge
gdat.indxproptypemerg = cntr.incr()
gdat.indxproptype = np.arange(gdat.numbproptype)
gmod.indxpara.prop = np.arange(gmod.numbparagenrbase)
gdat.numbstdpparagenrscalbase = gmod.numbparagenrbase - gmod.numbpopl
#### filter for model elements
gdat.listnamefilt = ['']
if gdat.priofactdoff != 1.:
gdat.listnamefilt += ['pars']
#### model elements inside the image
if gdat.boolelempsfnanyy:
gdat.listnamefilt += ['bndr']
#### model subhalos inside high normalized relevance region
if 'lens' in gdat.typeelem:
gdat.listnamefilt += ['nrel']
if gdat.typedata == 'inpt':
proc_cntpdata(gdat)
# interpolated prior for models
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
lpdfprio = [None for l in gmod.indxpopl]
lpdfprioobjt = [None for l in gmod.indxpopl]
lpdfpriointp = [None for l in gmod.indxpopl]
for l in gmod.indxpopl:
for strgfeat, strgpdfn in zip(gmod.namepara.genrelem, gmod.listscalparagenrelem):
if strgpdfn == 'tmplgrad':
pdfnpriotemp = np.empty((gdat.numbsidecart + 1, gdat.numbsidecart + 1))
lpdfprio, lpdfprioobjt = retr_spatprio(gdat, pdfnpriotemp)
lpdfpriointp = lpdfprioobjt(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart)
gdat.indxpoplcrin = 0
if gmod.numbparaelem > 0:
if gdat.rtagmock is not None:
path = gdat.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
gdat.liststrgvarbhist = []
cntr = 0
for l0 in gmod.indxpopl:
for a, strgfeatfrst in enumerate(gmod.namepara.genrelem[l0]):
if strgfeatfrst == 'spec':
continue
gdat.liststrgvarbhist.append([[] for k in range(5)])
gdat.liststrgvarbhist[cntr][0] = 'hist' + strgfeatfrst + 'pop%d' % l
gdat.liststrgvarbhist[cntr][1] = strgfeatfrst
if gdat.rtagmock is not None:
# cmpl
gdat.liststrgvarbhist[cntr][3] = [[] for qq in gdatmock.indxrefr]
# fdis
gdat.liststrgvarbhist[cntr][4] = [[] for qq in gdatmock.indxrefr]
booltemp = True
if strgfeatfrst[-4:] in gdat.listnamerefr:
q = gdat.listnamerefr.index(strgfeatfrst[-4:])
booltemp = not strgfeatfrst in gdat.refr.namepara.elemonly[q][l]
if booltemp:
gdat.liststrgvarbhist[cntr][3][qq] = strgfeatfrst + 'pop%dpop%d' % (l, qq)
gdat.liststrgvarbhist[cntr][4][qq] = strgfeatfrst + 'pop%dpop%d' % (qq, l)
cntr += 1
for b, strgfeatseco in enumerate(gmod.namepara.genrelem[l0]):
if strgfeatseco == 'spec':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
gdat.liststrgvarbhist.append([[] for k in range(5)])
gdat.liststrgvarbhist[cntr][0] = 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0
gdat.liststrgvarbhist[cntr][1] = strgfeatfrst
gdat.liststrgvarbhist[cntr][2] = strgfeatseco
gdat.liststrgvarbhist[cntr][3] = [[] for qq in gdat.indxrefr]
gdat.liststrgvarbhist[cntr][4] = [[] for qq in gdat.indxrefr]
if gdat.rtagmock is not None:
booltempfrst = True
booltempseco = True
if strgfeatfrst[-4:] in gdat.listnamerefr:
q = gdat.listnamerefr.index(strgfeatfrst[-4:])
booltempfrst = not strgfeatfrst in gdat.refr.namepara.elemonly[q][l]
if strgfeatseco[-4:] in gdat.listnamerefr:
q = gdat.listnamerefr.index(strgfeatseco[-4:])
booltempseco = not strgfeatseco in gdat.refr.namepara.elemonly[q][l]
for qq in gdatmock.indxrefr:
if booltempfrst and booltempseco:
gdat.liststrgvarbhist[cntr][3][qq] = strgfeatfrst + strgfeatseco + 'pop%dpop%d' % (l0, qq)
gdat.liststrgvarbhist[cntr][4][qq] = strgfeatfrst + strgfeatseco + 'pop%dpop%d' % (qq, l0)
elif booltempfrst:
gdat.liststrgvarbhist[cntr][3][qq] = strgfeatfrst + 'pop%dpop%d' % (l0, qq)
gdat.liststrgvarbhist[cntr][4][qq] = strgfeatfrst + 'pop%dpop%d' % (qq, l0)
elif booltempseco:
gdat.liststrgvarbhist[cntr][3][qq] = strgfeatseco + 'pop%dpop%d' % (l0, qq)
gdat.liststrgvarbhist[cntr][4][qq] = strgfeatseco + 'pop%dpop%d' % (qq, l0)
cntr += 1
# selection effects
if gdat.typedata == 'inpt' and gmod.numbparaelem > 0:
if gdat.numbsampboot is None:
gdat.numbsampboot = gdat.numbsamp
gdat.boolcrex = False
if gdat.rtagmock is not None:
for qq in gdatmock.indxrefr:
for q in gdat.indxrefr:
for l in gmod.indxpopl:
for strgfeatfrst in gmod.namepara.genrelem[l]:
if gdat.typeexpr == 'chan' and strgfeatfrst == 'redswo08':
crex = (1. + gdat.meanpara.redswo08)**2
else:
crex = None
setattr(gdat, 'crex' + strgfeatfrst + 'pop%dpop%dpop%d' % (q, qq, l), crex)
for strgfeatseco in gmod.namepara.genrelem[l]:
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
if gdat.typeexpr == 'chan' and (strgfeatfrst == 'redswo08' or strgfeatseco == 'redswo08'):
crex = np.empty((gdat.numbbinsplot, gdat.numbbinsplot))
if strgfeatfrst == 'redswo08':
crex[:, :] = (1. + gdat.meanpara.redswo08[:, None])**2
else:
crex[:, :] = (1. + gdat.meanpara.redswo08[None, :])**2
else:
crex = None
setattr(gdat, 'crex' + strgfeatfrst + strgfeatseco + 'pop%dpop%dpop%d' % (q, qq, l), crex)
if gdat.refr.numbelemtotl > 0:
for listtemp in gdat.liststrgvarbhist:
strgvarb = listtemp[0]
for qq in gdatmock.indxrefr:
for q in gdat.indxrefr:
nametemp = listtemp[1]
if len(listtemp[2]) > 0:
nametemp += listtemp[2]
l = int(listtemp[4][qq].split('pop')[2][0])
nametemp += 'pop%dpop%dpop%d' % (q, qq, l)
crexhist = getattr(gdat, 'crex' + nametemp)
if crexhist is not None:
gdat.boolcrex = True
## internal correction
gdat.boolcrin = gdat.typedata == 'inpt' and gdat.rtagmock is not None
if gmod.numbparaelem > 0:
# variables for which two dimensional functions will be plotted
gdat.liststrgelemtdimvarbinit = ['hist']
gdat.liststrgelemtdimvarbfram = deepcopy(gdat.liststrgelemtdimvarbinit)
if gdat.boolinforefr:
gdat.liststrgelemtdimvarbfram += ['cmpl', 'fdis']
gdat.liststrgelemtdimvarbfinl = deepcopy(gdat.liststrgelemtdimvarbfram)
if gdat.typedata == 'inpt':
if gdat.boolcrex:
gdat.liststrgelemtdimvarbfinl += ['excr']
if gdat.boolcrin:
gdat.liststrgelemtdimvarbfinl += ['incr']
gdat.liststrgelemtdimvarbanim = deepcopy(gdat.liststrgelemtdimvarbfram)
gdat.liststrgfoldinit = ['']
if gmod.numbparaelem > 0 or gdat.typedata == 'mock' and gmod.numbparaelem > 0:
gdat.liststrgfoldinit += ['', 'histodim/', 'histtdim/', 'scattdim/', 'cmpltdim/']
gdat.liststrgfoldfram = ['']
if gmod.numbparaelem > 0:
gdat.liststrgfoldfram += ['scattdim/']
gdat.liststrgfoldfinl = ['']
if gdat.boolinforefr and gmod.numbparaelem > 0:
gdat.liststrgfoldfram += ['assc']
gdat.liststrgfoldfinl += ['assc']
gdat.liststrgfoldanim = deepcopy(gdat.liststrgfoldfram)
if gmod.numbparaelem > 0:
for strgdims in ['odim/', 'tdim/']:
for strgelemtdimvarb in gdat.liststrgelemtdimvarbfram:
gdat.liststrgfoldfram += [strgelemtdimvarb + strgdims]
for strgelemtdimvarb in gdat.liststrgelemtdimvarbfinl:
gdat.liststrgfoldfinl += [strgelemtdimvarb + strgdims]
# make folders
#gdat.pathprio = gdat.pathplotrtag + 'prio/'
#gdat.pathpost = gdat.pathplotrtag + 'post/'
make_fold(gdat)
setp_indxswepsave(gdat)
if gdat.typeopti == 'hess':
pathopti = gdat.pathoutprtag + 'opti.h5'
if os.path.exists(pathopti):
thisfile = h5py.File(pathopti, 'r')
if thisfile['stdp'][()].size == gdat.stdp.size:
print('Recovering the proposal scale from the previous run...')
gdat.stdp = thisfile['stdp'][()]
thisfile.close()
if gdat.rtagmock is not None:
if gdat.typedata == 'inpt':
path = gdat.pathoutprtagmock + 'gdatfinlpost'
booltemp = True
try:
gdatmock = readfile(path)
except:
booltemp = False
gdat.rtagmock = None
if booltemp:
numbparaelem = gdatmock.true.numbparaelem
if gdatmock.trueindxpopl != gmod.indxpopl:
raise Exception('')
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':
continue
if strgfeat[-4:] in gdat.listnamerefr:
continue
reca = getattr(gdatmock.true, 'reca' + strgfeat + 'pop%d' % l)
setattr(gdat.true, 'reca' + strgfeat + 'pop%d' % l, reca)
gmod.namepara.genrelem = gdatmock.truegmod.namepara.genrelem
if gmod.typeelemspateval[l] == 'locl' and gmod.numbparaelem > 0 or \
gdat.typedata == 'mock' and gmod.typeelemspateval[l] == 'locl' and gmod.numbparaelem > 0:
gdat.numbprox = 3
gdat.indxprox = np.arange(gdat.numbprox)
minmparagenrscalelemampl = getattr(gdat.fitt.minmpara, gmod.nameparagenrelemampl[0])
maxmparagenrscalelemampl = getattr(gdat.fitt.maxmpara, gmod.nameparagenrelemampl[0])
gdat.binspara.prox = np.logspace(np.log10(minmparagenrscalelemampl), np.log10(maxmparagenrscalelemampl), gdat.numbprox + 1)
# determine the maximum angle at which the contribution of the element will be computed
if gdat.boolbinsspat:
if gdat.maxmangleval is None:
if gdat.typeexpr == 'chan':
gdat.maxmangleval = np.array([5., 6., 9.]) / gdat.anglfact
elif gdat.typeexpr == 'gene':
gdat.maxmangleval = np.array([0.1, 0.2, 0.3]) / gdat.anglfact
elif gdat.typeexpr == 'ferm':
gdat.maxmangleval = np.array([7., 9., 15.]) / gdat.anglfact
else:
gdat.maxmangleval = np.empty(gdat.numbprox)
for h in gdat.indxprox:
if gdat.specfraceval == 0:
gdat.maxmangleval[h] = 3. * gdat.maxmgang
else:
frac = min(1e-2, gdat.specfraceval * gdat.binspara.prox[0] / gdat.binspara.prox[h+1])
psfnwdth = retr_psfnwdth(gdat, gmodstat.psfn, frac)
gdat.indxmaxmangl = np.unravel_index(np.argmax(psfnwdth), psfnwdth.shape)
gdat.maxmangleval[h] = psfnwdth[gdat.indxmaxmangl]
if gdat.typeverb > 1:
if gmod.typeelemspateval == 'locl':
print('maxmangleval')
print(gdat.anglfact * gdat.maxmangleval[l], ' [%s]' % gdat.strganglunit)
setp_varb(gdat, 'angl', minm=0., maxm=10.)
if gdat.boolelempsfnanyy and gdat.maxmpara.angl < np.amax(gdat.maxmangleval):
print('gdat.maxmpara.angl')
print(gdat.maxmpara.angl)
print('gdat.maxmangleval')
print(gdat.maxmangleval)
raise Exception('Angular axis is too short.')
# make a look-up table of nearby pixels for each pixel
path = gdat.pathprox + 'indxprox_%08d_%s_%0.4g_%0.4g_%04d.p' % (gdat.numbpixl, gdat.typepixl, 1e2 * np.amin(gdat.maxmangleval), \
1e2 * np.amax(gdat.maxmangleval), gdat.numbprox)
if gdat.typeverb > 1:
print('gdat.typepixl')
print(gdat.typepixl)
print('gdat.minmlgaldata')
print(gdat.minmlgaldata)
print('gdat.minmbgaldata')
print(gdat.minmbgaldata)
print('gdat.maxmlgaldata')
print(gdat.maxmlgaldata)
print('gdat.maxmbgaldata')
print(gdat.maxmbgaldata)
if gdat.typeverb > 0:
print('Element evaluation will be performed up to')
if gdat.boolbinsspat:
print(gdat.maxmangleval * gdat.anglfact)
if os.path.isfile(path):
if gdat.typeverb > 0:
print('Previously computed nearby pixel look-up table will be used.')
print('Reading %s...' % path)
fobj = open(path, 'rb')
gdat.indxpixlprox = pickle.load(fobj)
fobj.close()
else:
if gdat.typeverb > 0:
print('Computing the look-up table...')
gdat.indxpixlprox = [[] for h in gdat.indxprox]
cntrsave = -1.
# temp
for j in gdat.indxpixl:
dist = retr_angldistunit(gdat, gdat.lgalgrid[j], gdat.bgalgrid[j], gdat.indxpixl)
dist[j] = 0.
for h in gdat.indxprox:
indxpixlproxtemp = np.where(dist < gdat.maxmangleval[h])[0]
if indxpixlproxtemp.size > 2e4:
indxpixlproxtemp = -1
if gdat.maxmangl < np.sqrt(2.) * gdat.maxmgangdata:
raise Exception('Angular axis used to interpolate the PSF should be longer.')
if indxpixlproxtemp.size < 10:
raise Exception('Pixel hash list should not have fewer than 10 pixels.')
gdat.indxpixlprox[h].append(indxpixlproxtemp)
cntrsave = tdpy.show_prog(j, gdat.numbpixl, cntrsave)
fobj = open(path, 'wb')
pickle.dump(gdat.indxpixlprox, fobj, protocol=pickle.HIGHEST_PROTOCOL)
fobj.close()
gdat.numbpixlprox = np.zeros(gdat.numbprox)
for h in gdat.indxprox:
for j in gdat.indxpixl:
gdat.numbpixlprox[h] += len(gdat.indxpixlprox[h][j])
gdat.numbpixlprox[h] /= len(gdat.indxpixlprox[h])
if (gdat.numbpixlprox - np.mean(gdat.numbpixlprox) == 0.).all():
raise Exception('Number of pixels in the hash lists should be different.')
gdat.minmgang = 1e-3 * np.sqrt(2.) * gdat.maxmgangdata
gdat.maxmgang = np.sqrt(2.) * gdat.maxmgangdata
# try to pass true metamodel minima and maxima to common minima and maxima when that feature does not exist in the fitting metamodel
if gdat.typedata == 'mock':
for q in gdat.indxrefr:
for strgfeat in gmod.namepara.genrelem[q]:
booltemp = False
for l in gmod.indxpopl:
if strgfeat in gmod.namepara.genrelem[l]:
booltemp = True
if not booltemp:
try:
setattr(gdat.minmpara, 'minm' + strgfeat + gdat.listnamerefr[q], getattr(gdat.true.minm, strgfeat))
setattr(gdat.maxmpara, 'maxm' + strgfeat + gdat.listnamerefr[q], getattr(gdat.true.maxm, strgfeat))
except:
pass
## reference spectra
if gdat.listprefsbrtlabltotl is None:
if gdat.typeexpr == 'chan' and gdat.boolbinsspat:
gdat.listprefsbrtener = [[[] for k in range(3)]]
gdat.listprefsbrtsbrt = [[[] for k in range(3)]]
gdat.listprefsbrtlabltotl = ['Moretti+(2012)']
gdat.listprefsbrttype = ['shad']
for k, strgextn in enumerate(['', '_lower', '_higher']):
path = gdat.pathinpt + 'Moretti2012%s.csv' % strgextn
enerrefrplot = np.loadtxt(path, delimiter=',')[:, 0]
sbrtrefrplot = np.loadtxt(path, delimiter=',')[:, 1] / gdat.factergskevv / enerrefrplot**2 * (180. / np.pi)**2
gdat.listprefsbrtener[0][k] = enerrefrplot
gdat.listprefsbrtsbrt[0][k] = sbrtrefrplot
# temp
if gdat.numbener > 1:
if gdat.enerpivt == 0.:
raise Exception('Pivot energy cannot be zero.')
#if gdat.typeexpr != 'fire':
# gdat.enerexpcfact = gdat.enerpivt - gdat.meanpara.ener
#if gmod.numbparaelem > 0 and gdat.numbener > 1:
# minmsinddistmeanpop0 = getattr(gmod, 'minmsinddistmeanpop0')
# factspecener = (gdat.meanpara.ener / gdat.enerpivt)**(-np.sqrt(np.amin(minmsinddistmeanpop0) * np.amax(maxmsinddistmeanpop0)))
else:
pass
#gdat.factspecener = np.array([1.])
# temp -- this assumes square ROI
if gdat.boolbinsspat:
gdat.frambndrmodl = gdat.maxmlgaldata * gdat.anglfact
if gmod.boollenshost or gdat.typedata == 'mock' and gmod.boollenshost:
if gdat.typesers == 'intp':
# construct pixel-convolved Sersic surface brightness template
gdat.factsersusam = 10
maxmlgal = 4. * np.sqrt(2.) * gdat.maxmlgal
gdat.numblgalsers = int(np.ceil(maxmlgal / gdat.sizepixl))
gdat.numblgalsersusam = (1 + gdat.numblgalsers) * gdat.factsersusam
retr_axis(gdat, 'lgalsers')
retr_axis(gdat, 'lgalsersusam')
retr_axis(gdat, 'bgalsersusam')
gdat.numbhalfsers = 20
gdat.numbindxsers = 20
retr_axis(gdat, 'halfsers')
retr_axis(gdat, 'indxsers')
gdat.binspara.lgalsersusammesh, gdat.binspara.bgalsersusammesh = np.meshgrid(gdat.binspara.lgalsersusam, gdat.binspara.bgalsersusam, indexing='ij')
gdat.binspara.radisersusam = np.sqrt(gdat.binspara.lgalsersusammesh**2 + gdat.binspara.bgalsersusammesh**2)
gdat.sersprofcntr = np.empty((gdat.numblgalsers + 1, gdat.numbhalfsers + 1, gdat.numbindxsers + 1))
gdat.sersprof = np.empty((gdat.numblgalsers + 1, gdat.numbhalfsers + 1, gdat.numbindxsers + 1))
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
profusam = retr_sbrtsersnorm(gdat.binspara.radisersusam, gdat.binspara.halfsers[k], indxsers=gdat.binspara.indxsers[n])
## take the pixel average
indxbgallowr = gdat.factsersusam * (gdat.numblgalsers + 1) / 2
indxbgaluppr = gdat.factsersusam * (gdat.numblgalsers + 3) / 2
for a in range(gdat.numblgalsers):
indxlgallowr = gdat.factsersusam * a
indxlgaluppr = gdat.factsersusam * (a + 1) + 1
gdat.sersprofcntr[a, k, n] = profusam[(indxlgallowr+indxlgaluppr)/2, 0]
gdat.sersprof[a, k, n] = np.mean(profusam[indxlgallowr:indxlgaluppr, :])
temp, indx = unique(gdat.binspara.lgalsers, return_index=True)
gdat.binspara.lgalsers = gdat.binspara.lgalsers[indx]
gdat.sersprof = gdat.sersprof[indx, :, :]
gdat.sersprofcntr = gdat.sersprofcntr[indx, :, :]
indx = np.argsort(gdat.binspara.lgalsers)
gdat.binspara.lgalsers = gdat.binspara.lgalsers[indx]
gdat.sersprof = gdat.sersprof[indx, :, :]
gdat.sersprofcntr = gdat.sersprofcntr[indx, :, :]
#for strg, valu in gmod.cmappara.__dict__.items():
# retr_ticklabl(gdat, strg)
# generate true data
if gdat.typedata == 'mock':
if gdat.typeverb > 0:
print('Generating mock data...')
if gdat.seedtype == 'rand':
np.random.seed()
else:
if gdat.typeverb > 0:
print('Setting the seed for the RNG to %d...' % gdat.seedtype)
np.random.seed(gdat.seedtype)
## unit sample vector
gdat.true.this.paragenrunitfull = np.random.rand(gdat.true.numbparagenrfull)
gdat.true.this.paragenrscalfull = np.zeros(gdat.true.numbparagenrfull)
if gdat.true.numbparaelem > 0:
gdat.true.this.numbelempopl = np.empty(gdat.true.maxmpara.numbelem[l], dtype=int)
for l in gdat.true.indxpopl:
gdat.true.this.paragenrunitfull[gdat.true.indxpara.numbelem[l]] = getattr(gdat.true.this, 'numbelempop%d' % l)
gdat.true.this.numbelempopl[l] = getattr(gdat.true.this, 'numbelempop%d' % l)
gdat.true.this.indxelemfull = [[] for l in gdat.true.indxpopl]
for l in gdat.true.indxpopl:
gdat.true.this.indxelemfull[l] = list(range(gdat.true.numbelem[l]))
gdat.true.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.true.this.indxelemfull, 'true')
else:
gdat.true.this.indxelemfull = []
gdat.true.this.indxparagenrfullelem = None
if gdat.true.numbparaelem > 0:
if gdat.seedelem is None:
np.random.seed()
else:
np.random.seed(gdat.seedelem)
gdat.true.this.paragenrunitfull[gdat.true.numbparagenrbase:] = np.random.rand(gdat.true.numbparagenrelemtotl)
gdat.true.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'true', gdat.true.this.paragenrunitfull, gdat.true.this.indxparagenrfullelem)
# impose true values (valu)
for k in gdat.true.indxparagenr:
if gdat.true.numbparaelem > 0 and (k in gdat.true.indxpara.numbelem or \
gdat.true.typemodltran == 'pois' and k in gdat.true.indxpara.meanelem):
continue
# assume the true PSF
if gdat.true.typeevalpsfn != 'none' and gdat.numbpixl > 1 and k in gdat.true.indxpara.psfp:
gdat.true.this.paragenrscalfull[k] = gdat.true.psfpexpr[k-gdat.true.indxpara.psfp[0]]
else:
## read input mock model parameters
try:
# impose user-defined true parameter
gdat.true.this.paragenrscalfull[k] = getattr(gdat.true, gdat.true.namepara.genrscalfull[k])
except:
pass
if gdat.typeverb > 0:
show_paragenrscalfull(gdat, None, strgmodl='true')
if gmod.boollenshost:
proc_samp(gdat, None, 'this', 'true', boolinit=True)
#for strgmodl in gdat.liststrgmodl:
# gmod = getattr(gdat, strgmodl)
# print('gmod.minmpara.numbelempop0')
# print(gmod.minmpara.numbelempop0)
# print('gmod.minmpara.numbelem')
# print(gmod.minmpara.numbelem)
#raise Exception('')
# construct bins for element parameters of the true model
for strgmodl in ['true']:
gmod = getattr(gdat, strgmodl)
# list of names for element parameters, concatenated across all populations
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
# temp -- does not cover the case when different populations have parameters with the same name
for strgfeat in gmod.listnameparaglob:
#for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat[:-4] == 'etag':
continue
#retr_axis(gdat, strgfeat)
#if strgfeat in gmod.namepara.elem:
# retr_axis(gdat, strgfeat + 'prio')
proc_samp(gdat, None, 'this', 'true', boolinit=True)
# transfer current state of the true model to the reference model
for strg, valu in gdat.true.this.__dict__.items():
if strg == 'dictelem':
# modify the current state of the element parameters of the true model to include uncertainty
valutemp = [[] for l in gdat.true.indxpopl]
for l in gdat.true.indxpopl:
valutemp[l] = dict()
for nameparaelem in gdat.true.this.dictelem[l]:
valutemp[l][nameparaelem] = np.zeros((3, gdat.true.this.dictelem[l][nameparaelem].size))
valutemp[l][nameparaelem][0, :] = gdat.true.this.dictelem[l][nameparaelem]
else:
valutemp = valu
setattr(gdat.refr, strg, valutemp)
if gdat.makeplot and gdat.makeplotinit:
plot_samp(gdat, None, 'this', 'true', 'init')
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
print('gmod.minmpara.numbelempop0')
print(gmod.minmpara.numbelempop0)
print('gmod.minmpara.numbelem')
print(gmod.minmpara.numbelem)
## initialization
gdat.fitt.this = tdpy.gdatstrt()
gdat.fitt.next = tdpy.gdatstrt()
init_stat(gdat)
# process the parameter vector
proc_samp(gdat, None, 'this', 'fitt', boolinit=True)
#liststrgcbar = ['llikmaps', 'perc', 'percresi', 'expo', 'lpdfspatpriointp', 'conv', 'magn', 'deflcomp', 'resiconvelem', 'resimagn']
#for strgcbar in liststrgcbar:
# retr_ticklabl(gdat, strgcbar)
# temp
#for strgmodl in gdat.liststrgmodl:
# for namesele in gdat.listnamesele:
# for namefeat in gdat.listnamefeatsele:
# for strglimt in gdat.liststrglimt:
# try:
# getattr(gdat, strglimt + namefeat + namesele)
# except:
# setattr(gdat, strglimt + namefeat + namesele, getattr(gdat, strglimt + namefeat))
# construct bins for element parameters of the fitting model
#for strgmodl in ['fitt']:
#
# gmod = getattr(gdat, strgmodl)
# # list of names for element parameters, concatenated across all populations
# for l in gmod.indxpopl:
# if gmod.maxmpara.numbelem[l] > 0:
# # temp -- does not cover the case when different populations have parameters with the same name
# for strgfeat in gmod.listnameparaglob:
# #for strgfeat in gmod.namepara.genrelem[l]:
# if strgfeat[:-4] == 'etag':
# continue
# #retr_axis(gdat, strgfeat)
# #if strgfeat in gmod.namepara.elem:
# # retr_axis(gdat, strgfeat + 'prio')
gdat.numbbinspdfn = 50
# scalar variable setup continued
for strgbins in ['lowr', 'higr']:
for strgecom in ['dfnc', 'dfncsubt']:
setattr(gdat, 'scalhistcntp' + strgbins + strgecom + 'en00evt0', 'self')
setattr(gdat, 'minmhistcntp' + strgbins + strgecom + 'en00evt0', 0.)
setattr(gdat, 'maxmhistcntp' + strgbins + strgecom + 'en00evt0', gdat.numbpixl)
setattr(gdat, 'facthistcntp' + strgbins + strgecom + 'en00evt0', 1.)
for i in gdat.indxener:
setattr(gdat, 'scalfracsdenmeandarkdfncsubten%02d' % i, 'self')
setattr(gdat, 'minmfracsdenmeandarkdfncsubten%02d' % i, 0.)
setattr(gdat, 'maxmfracsdenmeandarkdfncsubten%02d' % i, 1.)
setattr(gdat, 'factfracsdenmeandarkdfncsubten%02d' % i, 1.)
gmod.scalbooldfncsubt = 'self'
gdat.minmbooldfncsubt = -0.5
gdat.maxmbooldfncsubt = 1.5
gdat.factbooldfncsubt = 1.
#sys.stdout = logg(gdat)
#gdat.log.close()
# initial plots
if gdat.makeplot and gdat.makeplotinit:
plot_init(gdat)
if gdat.typeverb > 0:
sizetotl = 0.
for root, dirs, listfile in os.walk(gdat.pathoutp):
for thisfile in listfile:
sizetotl += os.path.getsize(root + '/' + thisfile) / 2**30
if sizetotl > 10.:
print('Warning: PCAT data path size is %d GB' % sizetotl)
if gdat.typedata == 'inpt':
## rotate element coordinates to the ROI center
if gdat.typepixl == 'heal' and (gdat.lgalcntr != 0. or gdat.bgalcntr != 0.):
for q in gdat.indxrefr:
for l in gmod.indxpopl:
rttr = hp.rotator.Rotator(rot=[rad2deg(gdat.lgalcntr), rad2deg(gdat.bgalcntr), 0.], deg=True, eulertype='ZYX')
gdat.refr.dictelem[q]['bgal'][0, :], gdat.refrlgal[0, :] = rttr(pi / 2. - gdat.refrbgal[0, :], gdat.refrlgal[0, :])
gdat.refr.dictelem[q]['bgal'][0, :] = pi / 2. - gdat.refrbgal[0, :]
## assign zero to nonspecified uncertainties for the reference element features
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'gang' or strgfeat == 'aang':
continue
if strgfeat == 'etag':
continue
refrfeat = getattr(gdat.refr, strgfeat)
if refrfeat[q].ndim == 1:
refrfeat[q] = np.tile(refrfeat[q], (3, 1))
# temp
#if gdat.refr.numbelem > 0:
# gdat.refrfluxbrgt, gdat.refrfluxbrgtassc = retr_fluxbrgt(gdat, gdat.refrlgal, gdat.refrbgal, gdat.refrflux[0, :])
print('gdat.liketype')
print(gdat.liketype)
print('Data settings')
print('gdat.numbener')
print(gdat.numbener)
print('gdat.numbevtt')
print(gdat.numbevtt)
print('Model settings')
print('gdat.fitt.numbpopl')
print(gdat.fitt.numbpopl)
print('gdat.fitt.numbparagenrbase')
print(gdat.fitt.numbparagenrbase)
for strgmodl in gdat.liststrgmodl:
for l in gmod.indxpopl:
for strgfeat, strgpdfn in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):
if strgpdfn == 'tmpl':
if gdat.lgalprio is None or gdat.bgalprio is None:
gdat.lgalprio = np.concatenate((gmod.lgal))
gdat.bgalprio = np.concatenate((gmod.bgal))
gdat.numbspatprio = gdat.lgalprio.size
# spatial template for the catalog prior
# temp -- this should move outside the if
gdat.pdfnspatpriotemp = np.zeros((gdat.numbsidecart + 1, gdat.numbsidecart + 1))
for k in range(gdat.numbspatprio):
gdat.pdfnspatpriotemp[:] += 1. / np.sqrt(2. * np.pi) / gdat.stdvspatprio * \
exp(-0.5 * (gdat.binspara.lgalcartmesh - gdat.lgalprio[k])**2 / gdat.stdvspatprio**2) * \
exp(-0.5 * (gdat.binspara.bgalcartmesh - gdat.bgalprio[k])**2 / gdat.stdvspatprio**2)
gdat.pdfnspatpriotemp /= np.amax(gdat.pdfnspatpriotemp)
if gdat.typedata == 'inpt':
# rotate reference elements to the spatial coordinate system of PCAT
# temp -- this does not rotate the uncertainties!
if gdat.typeverb > 0:
print('Rotating the reference elements...')
for q in gdat.indxrefr:
# temp -- this should depend on q
if len(gdat.listpathwcss) > 0:
listhdun = ap.io.fits.open(gdat.listpathwcss)
wcso = ap.wcs.WCS(listhdun[0].header)
skycobjt = ap.coordinates.SkyCoord("galactic", l=gdat.refr.dictelem[q]['lgal'][0, :] * 180. / pi, b=gdat.refr.dictelem[q]['bgal'][0, :] * 180. / pi, unit='deg')
rasc = skycobjt.fk5.ra.degree
decl = skycobjt.fk5.dec.degree
lgal, bgal = wcso.wcs_world2pix(rasc, decl, 0)
lgal -= gdat.numbpixllgalshft + gdat.numbsidecarthalf
bgal -= gdat.numbpixlbgalshft + gdat.numbsidecarthalf
lgal *= gdat.sizepixl
bgal *= gdat.sizepixl
gdat.refr.dictelem[q]['lgal'][0, :] = bgal
gdat.refr.dictelem[q]['bgal'][0, :] = lgal
## preprocess reference element features
for q in gdat.indxrefr:
# temp -- this should depend on q
# temp -- this does not properly calculate uncertainties
gdat.refrgang[q] = np.zeros((3, gdat.refr.dictelem[q]['lgal'].shape[1]))
gdat.refraang[q] = np.zeros((3, gdat.refr.dictelem[q]['lgal'].shape[1]))
gdat.refrgang[q][:, :] = retr_gang(gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :])[None, :]
gdat.refraang[q][:, :] = retr_aang(gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :])[None, :]
# save all reference element features
for strgfeat in gdat.refr.namepara.elemtotl:
refrfeattotl = [[] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
refrfeat = getattr(gdat.refr, strgfeat)
for l in gmod.indxpopl:
if len(refrfeat[q]) > 0:
refrfeattotl[q] = refrfeat[q]
setattr(gdat.refr, strgfeat + 'totl', refrfeattotl)
# find the reference elements inside the ROI
gdat.indxrefrpntsrofi = [[] for q in gdat.indxrefr]
for q in gdat.indxrefr:
gdat.indxrefrpntsrofi[q] = np.where((np.fabs(gdat.refr.dictelem[q]['lgal'][0, :]) < gdat.maxmgangdata) & \
(np.fabs(gdat.refr.dictelem[q]['bgal'][0, :]) < gdat.maxmgangdata))[0]
for strgfeat in gdat.refr.namepara.elemtotl:
refrfeat = getattr(gdat.refr, strgfeat)
refrfeatrofi = [[] for q in gdat.indxrefr]
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrfeatrofi[q] = refrfeat[q][..., gdat.indxrefrpntsrofi[q]]
setattr(gdat.refr, strgfeat, refrfeatrofi)
# temp -- gdat.refr.numbelem is defined twice, one before and one after the filter. The initial definition is needed for strgfeat definitions.
gdat.refr.numbelem = [[] for q in gdat.indxrefr]
gdat.refr.numbelemtotl = 0
for q in gdat.indxrefr:
gdat.refr.numbelem[q] = 0
gdat.refr.numbelem[q] = gdat.refr.dictelem[q]['lgal'].shape[1]
gdat.refr.numbelem[q] = np.sum(gdat.refr.numbelem[q])
gdat.refr.numbelemtotl += np.sum(gdat.refr.numbelem[q])
## check that all reference element features are finite
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'etag':
continue
refrfeat = getattr(gdat.refr, strgfeat)
if len(refrfeat[q]) > 0:
indxbadd = np.where(np.logical_not(np.isfinite(refrfeat[q])))
if indxbadd[0].size > 0:
refrfeat[q][indxbadd] = 0.
if gdat.typeverb > 0:
print('Warning: Provided reference element feature is not finite. Defaulting to 0...')
if refrfeat[q].size == 0:
print('Warning! A reference element feature has length zero!')
print('strgfeat')
print(strgfeat)
else:
if np.amin(refrfeat[q]) == 0. and np.amax(refrfeat[q]) == 0.:
print('Warning! A reference element feature is all np.zeros!')
raise Exception('')
## element feature indices ordered with respect to the amplitude variable
refrfeatsort = [[] for q in gdat.indxrefr]
if not (gdat.typedata == 'mock' and gmod.numbparaelem == 0):
for q in gdat.indxrefr:
refrparagenrscalelemampl = getattr(gdat.refr, gdat.refr.nameparagenrelemampl[q])
if len(refrparagenrscalelemampl[q]) > 0:
indxelem = np.argsort(refrparagenrscalelemampl[q][0, :])[::-1]
for strgfeat in gdat.refr.namepara.elem[q]:
refrfeat = getattr(gdat.refr, strgfeat)
if len(refrfeat[q]) > 0:
refrfeatsort[q] = refrfeat[q][..., indxelem]
setattr(gdat.refr, strgfeat, refrfeatsort)
# bin reference element features
for q in gdat.indxrefr:
for strgfeatfrst in gdat.refr.namepara.elem[q]:
if strgfeatfrst.startswith('etag'):
continue
refrfeatfrst = getattr(gdat.refr, strgfeatfrst)
if len(refrfeatfrst[q]) > 0:
binsfrst = getattr(gdat.binspara, strgfeatfrst)
hist = np.histogram(refrfeatfrst[q][0, :], binsfrst)[0]
setattr(gdat.refr, 'hist' + strgfeatfrst + 'pop%d' % q, hist)
for strgfeatseco in gdat.refr.namepara.elem[q]:
if strgfeatseco.startswith('etag'):
continue
refrfeatseco = getattr(gdat.refr, strgfeatseco)
strgfeattdim = strgfeatfrst + strgfeatseco + 'pop%d' % q
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
if len(refrfeatseco[q]) > 0:
binsseco = getattr(gdat.binspara, strgfeatseco)
hist = np.histogram2d(refrfeatfrst[q][0, :], refrfeatseco[q][0, :], bins=(binsfrst, binsseco))[0]
setattr(gdat.refr, 'hist' + strgfeattdim, hist)
if gmod.numbparaelem > 0:
# plot settings
## upper limit of histograms
if gdat.limtydathistfeat is None:
gdat.limtydathistfeat = [0.5, max(100., 10**np.ceil(np.log10(gdat.refr.numbelemtotl)))]
#gdat.limtydathistfeat = [0.5, max(100., 10**np.ceil(np.log10(gmod.maxmpara.numbelemtotl)))]
# initial plots
if gdat.makeplot and gdat.makeplotinit:
# problem-specific plots
if gdat.makeplotintr:
plot_intr(gdat)
#plot_pert()
#plot_king(gdat)
plot_lens(gdat)
#plot_3fgl_thrs(gdat)
#if gdat.typeexpr == 'ferm':
# plot_fgl3(gdat)
# find the pixels at which data count maps have local maxima
if gdat.typepixl == 'cart':
for i in gdat.indxener:
for m in gdat.indxevtt:
# temp
gdat.indxxdatmaxm, gdat.indxydatmaxm = tdpy.retr_indximagmaxm(gdat.cntpdatacart[i, :, m])
if not gdat.boolsqzeexpo and np.amax(gdat.cntpdata) < 1.:
raise Exception('Data counts per pixel is less than 1.')
# check the data
if (np.fabs(gdat.cntpdata - np.round(gdat.cntpdata)) > 1e-3).any():
raise Exception('')
if np.amin(gdat.cntpdata) < 0.:
raise Exception('')
# list of variables for which the posterior is collected at each proposal
gdat.liststrgvarbarryswep = ['memoresi', 'accpprob', 'boolpropfilt', 'boolpropaccp', 'indxproptype', 'amplpert']
for namechro in gdat.listnamechro:
gdat.liststrgvarbarryswep += ['chro' + namechro]
gdat.liststrgvarbarryswep += ['ltrp']
if gdat.probtran > 0.:
for l in gmod.indxpopl:
gdat.liststrgvarbarryswep += ['auxiparapop%d' % l]
gdat.liststrgvarbarryswep += ['ljcb']
# write the numpy RNG state to file
with open(gdat.pathoutprtag + 'stat.p', 'wb') as thisfile:
pickle.dump(np.random.get_state(), thisfile)
# process lock for simultaneous plotting
lock = mp.Manager().Lock()
if gdat.typeverb > 0:
print('Writing the global state to the disc before spawning workers...')
path = gdat.pathoutprtag + 'gdatinit'
writfile(gdat, path)
gdat.filestat = open(gdat.pathoutprtag + 'stat.txt', 'w')
gdat.filestat.write('gdatinit written.\n')
gdat.filestat.close()
# exit before running the sampler
if gdat.boolmockonly:
if gdat.typeverb > 0:
print('Mock dataset is generated. Quitting...')
return gdat.rtag
# perform an initial run, sampling from the prior
if gdat.checprio:
if gdat.typeverb > 0:
print('Sampling from the prior...')
## perform sampling
worksamp(gdat, lock, strgpdfn='prio')
## post process the samples
proc_finl(gdat=gdat, strgpdfn='prio')
if gdat.typeverb > 0:
print('Sampling from the posterior...')
# run the sampler
worksamp(gdat, lock)
# post process the samples
proc_finl(gdat=gdat)
# make animations
if gdat.makeanim and gdat.numbplotfram > 1:
proc_anim(gdat.rtag)
if gdat.typeverb > 0:
print('The output is at ' + gdat.pathoutprtag)
if gdat.makeplot:
print('The plots are at ' + gdat.pathplotrtag)
print('PCAT has run successfully. Returning to the OS...')
return gdat.rtag
def initarry( \
dictvarbvari, \
dictvarb, \
listnamecnfgextn, \
forcneww=False, \
forcprev=False, \
strgpara=False, \
# Boolean flag to execute the runs in parallel
boolexecpara=True, \
strgcnfgextnexec=None, \
listnamevarbcomp=[], \
listscalvarbcomp=[], \
listlablvarbcomp=[], \
listtypevarbcomp=[], \
listpdfnvarbcomp=[], \
listgdatvarbcomp=[], \
# parameter name, axis label, tick values and scaling of the input variable changed across PCAT runs
namexaxivari=None, \
lablxaxivari=None, \
tickxaxivari=None, \
scalxaxivari=None, \
):
print('Running PCAT in array mode...')
numbiter = len(dictvarbvari)
indxiter = np.arange(numbiter)
cntrcomp = 0
if boolexecpara:
cntrproc = 0
listrtag = []
listpridchld = []
for k, strgcnfgextn in enumerate(listnamecnfgextn):
if strgcnfgextnexec is not None:
if strgcnfgextn != strgcnfgextnexec:
continue
strgcnfg = inspect.stack()[1][3] + '_' + strgcnfgextn
dictvarbtemp = deepcopy(dictvarb)
for strgvarb, valu in dictvarbvari[strgcnfgextn].items():
dictvarbtemp[strgvarb] = valu
dictvarbtemp['strgcnfg'] = strgcnfg
listrtagprev = retr_listrtagprev(strgcnfg, gdat.pathpcat)
cntrcomp += 1
if (not forcneww and strgcnfgextnexec is None or forcprev and strgcnfgextnexec is not None) and len(listrtagprev) > 0:
print('Found at least one previous run with the configuration %s' % strgcnfg)
print('Skipping...')
listrtag.append(listrtagprev[-1])
else:
if len(listrtagprev) > 0:
print('Found at least one previous run. But, repeating the run anways...')
else:
print('Did not find any previous run.')
if boolexecpara and strgcnfgextnexec is None:
cntrproc += 1
prid = os.fork()
if prid > 0:
listpridchld.append(prid)
else:
print('Forking a child process to run the configuration extension...')
rtag = init(**dictvarbtemp)
os._exit(0)
else:
print('Calling the main PCAT function without forking a child...')
listrtag.append(init(**dictvarbtemp))
if boolexecpara and strgcnfgextnexec is None:
for prid in listpridchld:
os.waitpid(prid, 0)
if cntrproc > 0:
print('Exiting before comparion plots because of parallel execution...')
return
if cntrcomp == 0:
print('Found no runs...')
print('Final-processing run outputs...')
for rtag in listrtag:
print(rtag)
proc_finl(rtag=rtag, strgpdfn='post')
proc_anim(rtag)
strgtimestmp = tdpy.retr_strgtimestmp()
if strgcnfgextnexec is not None or namexaxivari is None:
return
print('Making plots to compare the output of different PCAT runs...')
if 'boolmockonly' in dictvarb and dictvarb['boolmockonly']:
listgdat = retr_listgdat(listrtag, typegdat='init')
else:
listgdat = retr_listgdat(listrtag)
numbgdat = len(listgdat)
for namevarbscal in listgdat[0].listnamevarbscal:
booltemp = True
for k in range(1, numbgdat - 1):
if not namevarbscal in listgdat[k].listnamevarbscal:
booltemp = False
if booltemp:
if namevarbscal in listnamevarbcomp:
raise Exception('')
listnamevarbcomp += [namevarbscal]
listscalvarbcomp += [getattr(listgdat[0], 'scal' + namevarbscal)]
listlablvarbcomp += [getattr(listgdat[0], 'labl' + namevarbscal + 'totl')]
listtypevarbcomp += ['pctl']
listpdfnvarbcomp += ['post']
listgdatvarbcomp += ['post']
# add others to the variable list
listnamevarbcomp += ['lliktotl', 'lliktotl', 'infopost', 'bcom', 'lliktotl', 'lliktotl', 'lliktotl', 'levipost']
listscalvarbcomp += ['self', 'self', 'self', 'self', 'self', 'self', 'self', 'self']
listlablvarbcomp += ['$\ln P(D|M_{min})$', '$\ln P(D|M_{max})$', '$D_{KL}$', '$\eta_B$', '$\sigma_{P(D|M)}$', r'$\gamma_{P(D|M)}$', \
r'$\kappa_{P(D|M)}$', '$\ln P_H(D)$']
listtypevarbcomp += ['minm', 'maxm', '', '', 'stdv', 'skew', 'kurt', '']
listpdfnvarbcomp += ['post', 'post', 'post', 'post', 'post', 'post', 'post', 'post']
listgdatvarbcomp += ['post', 'post', 'post', 'post', 'post', 'post', 'post', 'post']
arrytemp = np.array([len(listnamevarbcomp), len(listscalvarbcomp), len(listlablvarbcomp), len(listtypevarbcomp), len(listpdfnvarbcomp), len(listgdatvarbcomp)])
if (arrytemp - np.mean(arrytemp) != 0.).all():
raise Exception('')
# add log-evidence to the variable list, if prior is also sampled
booltemp = True
for k in range(numbgdat):
if not listgdat[k].checprio:
booltemp = False
if booltemp:
listgdatprio = retr_listgdat(listrtag, typegdat='finlprio')
listnamevarbcomp += ['leviprio']
listscalvarbcomp += ['self']
listlablvarbcomp += ['$\ln P_{pr}(D)$']
listtypevarbcomp += ['']
listpdfnvarbcomp += ['prio']
listgdatvarbcomp += ['prio']
# time stamp
strgtimestmp = tdpy.retr_strgtimestmp()
dictoutp = dict()
liststrgvarbtotl = []
for (typevarbcomp, pdfnvarbcomp, namevarbcomp) in zip(listtypevarbcomp, listpdfnvarbcomp, listnamevarbcomp):
strgtemp = typevarbcomp + pdfnvarbcomp + namevarbcomp
liststrgvarbtotl.append(strgtemp)
dictoutp[strgtemp] = [[] for k in range(numbiter)]
for k in indxiter:
for a, strgvarbtotl in enumerate(liststrgvarbtotl):
if listgdatvarbcomp[a] == 'prio':
gdattemp = listgdatprio[k]
else:
gdattemp = listgdat[k]
dictoutp[strgvarbtotl][k] = getattr(gdattemp, strgvarbtotl)
pathbase = '%s/imag/%s_%s/' % (gdat.pathpcat, strgtimestmp, inspect.stack()[1][3])
cmnd = 'mkdir -p %s' % pathbase
os.system(cmnd)
cmnd = 'gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=%smrgd.pdf' % pathbase
for strgvarbtotl, varboutp in dictoutp.items():
figr, axis = plt.subplots(figsize=(6, 6))
ydat = np.empty(numbiter)
yerr = np.zeros((2, numbiter))
indxlist = liststrgvarbtotl.index(strgvarbtotl)
if listscalvarbcomp is None:
scalyaxi = getattr(listgdat[0], 'scal' + listnamevarbcomp[indxlist])
else:
scalyaxi = listscalvarbcomp[indxlist]
lablyaxi = listlablvarbcomp[indxlist]
try:
if listtypevarbcomp[indxlist] == 'pctl':
trueyaxi = getattr(listgdat[0], 'true' + listnamevarbcomp[indxlist])
else:
trueyaxi = getattr(listgdat[0], 'true' + listtypevarbcomp[indxlist] + listnamevarbcomp[indxlist])
except:
trueyaxi = None
for k in indxiter:
if isinstance(varboutp[k], list) or isinstance(varboutp[k], np.ndarray) and varboutp[k].ndim > 2:
raise Exception('')
elif isinstance(varboutp[k], float):
ydat[k] = varboutp[k]
else:
if listtypevarbcomp[indxlist] != 'pctl':
yerr[:, k] = 0.
if varboutp[k].ndim == 2:
if varboutp[k].shape[1] != 1:
raise Exception('varboutp format is wrong.')
varboutp[k] = varboutp[k][:, 0]
if listtypevarbcomp[indxlist] == 'pctl':
yerr[:, k] = getattr(listgdat[k], 'errr' + listpdfnvarbcomp[indxlist] + listnamevarbcomp[indxlist])[:, 0]
else:
if listtypevarbcomp[indxlist] == 'pctl':
yerr[:, k] = getattr(listgdat[k], 'errr' + listpdfnvarbcomp[indxlist] + listnamevarbcomp[indxlist])
ydat[k] = varboutp[k][0]
axis.errorbar(indxiter+1., ydat, yerr=yerr, color='b', ls='', markersize=15, marker='o', lw=3)
indxrtagyerr = np.where((yerr[0, :] > 0.) | (yerr[1, :] > 0.))[0]
if indxrtagyerr.size > 0:
temp, listcaps, temp = axis.errorbar(indxiter[indxrtagyerr]+1., ydat[indxrtagyerr], yerr=yerr[:, indxrtagyerr], \
color='b', ls='', capsize=15, markersize=15, marker='o', lw=3)
for caps in listcaps:
caps.set_markeredgewidth(3)
if trueyaxi is not None:
axis.axhline(trueyaxi, ls='--', color='g')
if lablxaxivari is None:
lablxaxivari = getattr(listgdat[0], 'labl' + namexaxivari + 'totl')
if scalxaxivari is None:
scalxaxivari = getattr(listgdat[0], 'scal' + namexaxivari)
axis.set_xlabel(lablxaxivari)
axis.set_xticks(indxiter+1.)
axis.set_xticklabels(tickxaxivari)
axis.set_ylabel(lablyaxi)
if scalyaxi == 'logt':
axis.set_yscale('log')
plt.tight_layout()
pathfull = '%s%s_%s_%s.pdf' % (pathbase, strgtimestmp, inspect.stack()[1][3], liststrgvarbtotl[indxlist])
print('Writing to %s...' % pathfull)
plt.savefig(pathfull)
plt.close(figr)
cmnd += ' %s' % pathfull
print(cmnd)
os.system(cmnd)
print('Making animations...')
for rtag in listrtag:
print('Working on %s...' % rtag)
proc_anim(rtag=rtag)
print('Compiling run plots...')
cmnd = 'python comp_rtag.py'
for rtag in listrtag:
cmnd += ' %s' % rtag
os.system(cmnd)
return listrtag
def retr_rtag(strgcnfg, strgnumbswep):
rtag = strgcnfg + '_' + strgnumbswep
return rtag
class logg(object):
def __init__(self, gdat):
self.terminal = sys.stdout
gdat.pathstdo = gdat.pathoutprtag + 'stdo.txt'
self.log = open(gdat.pathstdo, 'a')
pathlink = gdat.pathplotrtag + 'stdo.txt'
os.system('ln -s %s %s' % (gdat.pathstdo, pathlink))
def write(self, strg):
self.terminal.write(strg)
self.log.write(strg)
def flush(self):
pass
def worktrac(pathoutprtag, lock, strgpdfn, indxprocwork):
try:
return work(pathoutprtag, lock, strgpdfn, indxprocwork)
except:
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
def opti_hess(gdat, gdatmodi):
gmod = gdat.fitt
if gmod.numbparaelem > 0:
cntr = 0
for l in gmod.indxpopl:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
gdatmodi.indxparastdp[gmod.numbparagenrbase-gmod.numbpopl+cntr] = np.concatenate(gdatmodi.this.indxparagenrfullelem[nameparagenrelem])
cntr += 1
if gmod.numbparaelem > 0:
gdatmodi.next.indxelemfull = gdatmodi.this.indxelemfull
gdatmodi.next.indxparagenrfullelem = gdatmodi.this.indxparagenrfullelem
else:
gdatmodi.next.indxparagenrfullelem = None
gdatmodi.stdpmatr = np.zeros((gdat.numbstdp, gdat.numbstdp))
gdatmodi.hess = np.zeros((gdat.numbstdp, gdat.numbstdp))
deltlpos = np.zeros((3, 3))
diffpara = np.empty(gdat.numbstdp)
for k, indxparatemp in enumerate(gdatmodi.indxparastdp):
if len(indxparatemp) == 0:
diffpara[k] = 0.
else:
diffpara[k] = min(min(np.amin(gdatmodi.this.paragenrunitfull[indxparatemp]) * 0.9, np.amin(1. - gdatmodi.this.paragenrunitfull[indxparatemp]) * 0.9), 1e-5)
#gdatmodi.this.sampunitsave = np.copy(gdatmodi.this.paragenrunitfull)
#if gmod.numbparaelem > 0:
# gdatmodi.dictmodi = [[] for l in gmod.indxpopl]
# for l in gmod.indxpopl:
# gdatmodi.dictmodi[l] = dict()
# gdatmodi.dictmodi[l][gmod.nameparagenrelemampl[l] + 'indv'] = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]]
# for nameparagenrelem in gmod.namepara.genrelem[l]:
# gdatmodi.dictmodi[l]['stdv' + nameparagenrelem + 'indv'] = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]]
#if gmod.numbparaelem > 0:
# gdatmodi.this.indxparagenrfullelemconc = np.concatenate([gdatmodi.this.indxparagenrfullelem[l]['full'] for l in gmod.indxpopl])
#if gdat.boolpropcomp:
# indxsamptranprop = gdatmodi.this.indxparagenrfullelemconc
#else:
# indxsamptranprop = []
deltlpos[1, 1] = gdatmodi.this.lliktotl
for indxstdpfrst in gdat.indxstdpprop:
for indxstdpseco in gdat.indxstdpprop:
if indxstdpfrst > indxstdpseco:
continue
if indxstdpfrst == indxstdpseco:
#if gmod.numbparaelem > 0:
# if k in gdatmodi.this.indxparagenrfullelemconc:
# indxtrapmoditemp = k - gmod.indxparagenrfulleleminit
# indxpoplmoditemp = np.array([np.amin(np.where(indxtrapmoditemp // gmod.numbparagenrelemcumr == 0))])
# numbparapoplinittemp = indxtrapmoditemp - gmod.numbparagenrelemcuml[indxpoplmoditemp[0]]
# indxelemmoditemp = [numbparapoplinittemp // gmod.numbparagenrelemsing[indxpoplmoditemp[0]]]
# gmod.indxparagenrelemmoditemp = numbparapoplinittemp % gmod.numbparagenrelemsing[indxpoplmoditemp[0]]
# nameparagenrelem = gmod.namepara.genrelem[indxpoplmoditemp[0]][gmod.indxparagenrelemmoditemp]
# indxsampampltemp = k - gmod.indxparagenrelemmoditemp + gmod.indxparagenrelemampl[indxpoplmoditemp[0]]
# #amplfact = gdatmodi.this.paragenrscalfull[indxsampampltemp] / getattr(gdat, 'minm' + gmod.nameparagenrelemampl[indxpoplmoditemp[0]])
# stdv = 1. / np.sqrt(gdatmodi.hess[indxstdpfrst, indxstdpseco])
# gdatmodi.stdpmatr[indxstdpfrst, indxstdpseco] += stdv
# gdatmodi.dictmodi[indxpoplmoditemp[0]]['stdv' + nameparagenrelem + 'indv'][indxelemmoditemp[0]] = stdv
# gdatmodi.dictmodi[indxpoplmoditemp[0]][gmod.nameparagenrelemampl[indxpoplmoditemp[0]] + 'indv'][indxelemmoditemp[0]] = \
# gdatmodi.this.paragenrscalfull[indxsampampltemp]
if len(gdatmodi.indxparastdp[indxstdpseco]) == 0:
continue
for a in range(2):
gdatmodi.next.paragenrunitfull = np.copy(gdatmodi.this.paragenrunitfull)
if a == 0:
gdatmodi.next.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] -= diffpara[indxstdpseco]
if a == 1:
gdatmodi.next.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] += diffpara[indxstdpseco]
gdatmodi.next.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gdatmodi.next.paragenrunitfull, gdatmodi.next.indxparagenrfullelem)
proc_samp(gdat, gdatmodi, 'next', 'fitt')
if a == 0:
deltlpos[0, 1] = gdatmodi.next.lliktotl
if a == 1:
deltlpos[2, 1] = gdatmodi.next.lliktotl
gdatmodi.hess[indxstdpfrst, indxstdpseco] = 1. / 4. / diffpara[indxstdpseco]**2 * np.fabs(deltlpos[0, 1] + \
deltlpos[2, 1] - 2. * deltlpos[1, 1])
else:
# temp
continue
for a in range(4):
gdatmodi.this.paragenrunitfull = np.copy(gdatmodi.this.sampunitsave)
if a == 0:
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] -= diffpara
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] -= diffpara
if a == 1:
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] += diffpara
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] += diffpara
if a == 2:
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] -= diffpara
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] += diffpara
if a == 3:
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] += diffpara
gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] -= diffpara
proc_samp(gdat, gdatmodi, 'this', 'fitt')
if a == 0:
deltlpos[0, 0] = gdatmodi.this.lpostotl
if a == 1:
deltlpos[2, 2] = gdatmodi.this.lpostotl
if a == 2:
deltlpos[1, 2] = gdatmodi.this.lpostotl
if a == 3:
deltlpos[2, 1] = gdatmodi.this.lpostotl
gdatmodi.hess[indxstdpfrst, indxstdpseco] = 1. / 4. / diffpara**2 * \
(deltlpos[2, 2] + deltlpos[0, 0] - deltlpos[1, 2] - deltlpos[2, 1])
if not np.isfinite(gdatmodi.hess[indxstdpfrst, indxstdpseco]):
raise Exception('')
if gdat.booldiagmode and not np.isfinite(gdatmodi.next.paragenrscalfull).all():
raise Exception('')
if gdatmodi.hess[indxstdpfrst, indxstdpseco] == 0.:
raise Exception('')
gdatmodi.hess[np.where(gdatmodi.hess == 0)] = 10.
# temp
#gdatmodi.stdpmatr = np.sqrt(linalg.inv(gdatmodi.hess))
numbdoffefff = gmod.numbparagenrbase
if gmod.numbparaelem > 0:
numbdoffefff += gmod.numbparagenrelem * 10
gdatmodi.stdpmatr = np.sqrt(1. / gdatmodi.hess) / np.sqrt(numbdoffefff)
if (gdatmodi.stdpmatr == 0).any():
raise Exception('')
gdatmodi.stdp = gdatmodi.stdpmatr[gdat.indxstdp, gdat.indxstdp]
def worksamp(gdat, lock, strgpdfn='post'):
pathorig = gdat.pathoutprtag + 'stat.txt'
pathlink = gdat.pathplotrtag + 'stat.txt'
os.system('ln -s %s %s' % (pathorig, pathlink))
if gdat.numbproc == 1:
worktrac(gdat.pathoutprtag, lock, strgpdfn, 0)
else:
if gdat.typeverb > 0:
print('Forking the sampler...')
# process pool
pool = mp.Pool(gdat.numbproc)
# spawn the processes
workpart = functools.partial(worktrac, gdat.pathoutprtag, lock, strgpdfn)
pool.map(workpart, gdat.indxproc)
pool.close()
pool.join()
gdat.filestat = open(gdat.pathoutprtag + 'stat.txt', 'a')
gdat.filestat.write('gdatmodi%s written.\n' % strgpdfn)
gdat.filestat.close()
def work(pathoutprtag, lock, strgpdfn, indxprocwork):
print('Worker #%d' % indxprocwork)
# read the initial global object, gdatinit
path = pathoutprtag + 'gdatinit'
gdat = readfile(path)
gmod = gdat.fitt
# define time functions
timereal = time.time()
timeproc = time.clock()
# re-seed the random number generator for this chain
if gdat.boolseedchan:
np.random.seed(indxprocwork + 1000)
# construct a global object for the walker
gdatmodi = tdpy.gdatstrt()
gdatmodi.this = tdpy.gdatstrt()
gdatmodi.next = tdpy.gdatstrt()
gdatmodi.indxprocwork = indxprocwork
gdatmodi.this = gdat.fitt.this
# path of gdatmodi
gdatmodi.pathgdatmodi = gdat.pathoutprtag + 'gdatmodi%04d' % gdatmodi.indxprocwork + gdat.strgpdfn
print('Determining the parameter indices of the fitting model with only the floating parameters...')
gdatmodi.booldone = False
gdatmodi.lock = lock
gdatmodi.indxprocwork = indxprocwork
# plotting factors for scalar variables
for name in gmod.namepara.scal:
if name in gmod.nameparagenrbase:
gmod.indxpara.temp = np.where(gmod.nameparagenrbase == name)[0]
# find the list of variables for which the posterior will be calculated
if not gdat.boolmockonly:
if gdat.typeverb > 1:
print('gdatmodi.this.paragenrunitfull')
print(gdatmodi.this.paragenrunitfull)
show_paragenrscalfull(gdat, gdatmodi)
proc_samp(gdat, gdatmodi, 'this', 'fitt')
gdat.liststrgvarbarrysamp = []
gdat.liststrgvarblistsamp = []
for strg, valu in gdatmodi.this.__dict__.items():
if not strg in gdat.liststrgvarbarryswep:
if isinstance(valu, np.ndarray) or isinstance(valu, float):
gdat.liststrgvarbarrysamp.append(strg)
elif isinstance(valu, list) and strg != 'indxparagenrfullelem' and strg != 'psfnconv' and \
strg != 'trueindxelemasscmiss' and strg != 'trueindxelemasschits':
gdat.liststrgvarblistsamp.append(strg)
if gdat.typeverb == 2:
print('gdat.liststrgvarbarrysamp')
print(gdat.liststrgvarbarrysamp)
print('gdat.liststrgvarblistsamp')
print(gdat.liststrgvarblistsamp)
gdat.liststrgvarblistswep = []
if gdat.typeverb == 2:
print('gdat.liststrgvarblistswep')
print(gdat.liststrgvarblistswep)
gdat.liststrgvarblist = gdat.liststrgvarblistsamp + gdat.liststrgvarblistswep
gdatmodi.liststrgchan = gdat.liststrgvarbarryswep + ['paragenrscalbase'] + gmod.namepara.scal
if gdat.typeverb == 2:
print('gdatmodi.liststrgchan')
print(gdatmodi.liststrgchan)
gdat.liststrgvarbarry = gdat.liststrgvarbarrysamp + gdat.liststrgvarbarryswep
## sample index
gdatmodi.cntrswep = 0
if gdat.booldiagmode:
if gdat.indxswepsave.size != gdat.numbsamp:
raise Exception('Inappropriate number of samples.')
# initialize the worker sampler
# definitions required for the initial sample
gdatmodi.this.boolpropfilt = True
gdatmodi.this.boolpropaccp = True
# dummy definitions required for logs
gdatmodi.this.indxproptype = np.zeros(1, dtype=int)
for l in gmod.indxpopl:
setattr(gdatmodi.this, 'auxiparapop%d' % l, np.zeros(gmod.numbparagenrelemsing[l]))
gdatmodi.this.lpri = np.zeros(gmod.numblpri)
gdatmodi.this.lpau = np.zeros(1)
gdatmodi.this.ltrp = np.zeros(1)
gdatmodi.this.ljcb = np.zeros(1)
gdatmodi.this.accpprob = np.zeros(1)
gdatmodi.this.memoresi = np.zeros(1)
gdatmodi.this.amplpert = | np.zeros(1) | numpy.zeros |
"""
Team: <NAME>, <NAME>, <NAME>, and <NAME>
To run this code:
requirements: numpy, matplotlib
At the bottom of the code, after the " if __name__ == "__main__": " statement
- Change the parameters you desire to change such as size of board
- Change the policy that you want to run
- change the parameters that affect that policy
The following parameters are available to be changed by the user:
General parameters:
relocation_policy: Options - "random", "social", "priority_location", "community", "closest", "swap"
k: number of agents of own typ in neighborhood for agent j to be happy
sim_env_width: width of the board
sim_env_height: height of the board
population_dens: how much of the environment is occupied by agents
epochs: epochs to run simulation for
cells_to_check_for_relocation: max cells to check for relocation, used in random and closest_distance
Social Policy Parameters:
number_of_friends: Number of friends for the social policy
friend_neighborhood: Size of the friend neighborhood
*Execute this file using Python 3.8
Plots of the mean happiness with standard deviation over the number of simulations run are generated
"""
import numpy as np
import copy
import matplotlib.pyplot as plt
import random
class SchellingSegregationModel:
def __init__(
self,
k: int,
simulation_environment_width: int,
simulation_environment_height: int,
population_density: float,
epochs: int,
q: int = 100,
relocation_type: str = "random",
number_friends: int = 2,
p: int = 3,
max_distance: int = 10,
):
self.k = k
self.simulation_environment_width = simulation_environment_width
self.simulation_environment_height = simulation_environment_height
self.population_density = population_density
self.epochs = epochs
self.environment = []
self.q = q
self.friends = []
self.friendsReverse = []
self.number_friends = number_friends
self.p = p
self.relocation_type = relocation_type
self.max_dist = max_distance
self.priority1 = []
self.priority2 = []
def create_environment(self) -> None:
environment = np.random.choice(
[0, 1, 2],
self.simulation_environment_width * self.simulation_environment_height,
p=[1 - self.population_density, self.population_density / 2, self.population_density / 2],
)
self.environment = environment.reshape(self.simulation_environment_width, self.simulation_environment_height)
self.initial_enve = copy.deepcopy(self.environment)
def create_priorities(self, pr1, pr2) -> None:
self.priority1 = []
self.priority2 = []
for j in range(self.simulation_environment_height):
row1 = []
row2 = []
for i in range(self.simulation_environment_width):
if (i < self.simulation_environment_width // 2) and (j < self.simulation_environment_height // 2):
row1.append(pr1[0])
row2.append(pr2[0])
elif (i >= self.simulation_environment_width // 2) and (j < self.simulation_environment_height // 2):
row1.append(pr1[1])
row2.append(pr2[1])
elif (i < self.simulation_environment_width // 2) and (j >= self.simulation_environment_height // 2):
row1.append(pr1[2])
row2.append(pr2[2])
elif (i >= self.simulation_environment_width // 2) and (j >= self.simulation_environment_height // 2):
row1.append(pr1[3])
row2.append(pr2[3])
self.priority1.append(row1)
self.priority2.append(row2)
def check_happiness(self, cell_j, cell_i, cell_type):
if cell_type == 0:
return 0
# first arge in array[_, _ ] is row, then col
i_left = cell_i - 1
i_right = cell_i + 1
j_down = cell_j + 1
j_up = cell_j - 1
# implement wraparound environment
if cell_i == 0:
i_left = self.simulation_environment_width - 1
elif cell_i == self.simulation_environment_width - 1:
i_right = 0
if cell_j == 0:
j_up = self.simulation_environment_height - 1
elif cell_j == self.simulation_environment_height - 1:
j_down = 0
# Build array of neighbor coordinates to check
neighbors_coord = [
[i_left, cell_j],
[i_right, cell_j],
[cell_i, j_up],
[cell_i, j_down],
[i_left, j_up],
[i_right, j_up],
[i_left, j_down],
[i_right, j_down],
]
# increase happy level for each neighbor of same type
happy_level = sum(int(self.environment[int(i), int(j)] == cell_type) for j, i in neighbors_coord)
return happy_level
def create_friends(self) -> None:
# set all friends to zero
self.friends = np.zeros(
[self.simulation_environment_width * self.simulation_environment_height, self.number_friends], dtype=int
)
# line up all the cells
environment_friends = self.initial_enve.reshape(
self.simulation_environment_width * self.simulation_environment_height, 1
)
for i in range(self.simulation_environment_width * self.simulation_environment_height):
self.friendsReverse.append([])
# check if the cell is occupied and that friend is different from the current cell
for i in range(self.simulation_environment_width * self.simulation_environment_height):
if environment_friends[i] != 0: # check that there is someone there
for j in range(self.number_friends): # choose a random cell number for a potential friend
friend = np.random.randint(
0, self.simulation_environment_width * self.simulation_environment_height
)
friendsAlready = False
for l in range(j): # check if this cell is already our friend
if self.friends[i][l] == friend:
friendsAlready = True
# if friend is self or empty cell or a friend already choose another one
while (friend == i) or (environment_friends[friend] == 0) or (friendsAlready == True):
friend = np.random.randint(
0, self.simulation_environment_width * self.simulation_environment_height
)
friendsAlready = False
for l in range(j):
if self.friends[i][l] == friend:
friendsAlready = True
self.friends[i][j] = friend
self.friendsReverse[friend].append([i, j])
def getClusters(self):
# print(self.environment)
height = self.simulation_environment_height
width = self.simulation_environment_width
# an array to keep track of which cells have been visited
memberOf = np.full((height, width), -1) # np.empty((height, width), dtype=int).fill(-1)
array = self.environment
# two lists to keep track of the parents of clusters and cluster membership
# parentNodes = []
clusterMembership = []
childNodes = []
# keeps track of the number of clusters
numClusters = 0
for i in range(height):
for j in range(width):
if (array[i][j]) and (memberOf[i][j] == -1):
# a stack to keep track of which cells need to be visited
toVisit = []
# add the first node to the list of parent nodes
# parentNodes.append([i, j])
childNodes.append([[i, j]])
clusterMembership.append(1)
toVisit.append([i, j])
memberOf[i][j] = numClusters
# perform a depth-first traversal of neighbors of the parent node
while toVisit:
# get coordinates of node at the top of the stack
y = toVisit[-1][0]
x = toVisit[-1][1]
# two lists to hold the coordinates in which to check for neighbors
yCoords = []
xCoords = []
# determine coordinates to check
# NOTE: could make this worse on memory but better on computing time
# by putting newCellFound outside the while loop
# to let the program know whether it needs to generate all this
# and just saving it the first time in the stack structure
if y == 0:
yCoords.append(height - 1)
yCoords.append(y)
yCoords.append(y + 1)
elif y == height - 1:
yCoords.append(0)
yCoords.append(y - 1)
yCoords.append(y)
else:
yCoords.append(y - 1)
yCoords.append(y)
yCoords.append(y + 1)
if x == 0:
xCoords.append(width - 1)
xCoords.append(x)
xCoords.append(x + 1)
elif x == width - 1:
xCoords.append(0)
xCoords.append(x - 1)
xCoords.append(x)
else:
xCoords.append(x - 1)
xCoords.append(x)
xCoords.append(x + 1)
# keeps track if a new cell has been found
newCellFound = False
for yCoord in yCoords:
for xCoord in xCoords:
# for an unvisited, occupied cell in the cluster
if array[yCoord][xCoord] and memberOf[yCoord][xCoord] == -1:
newCellFound = True
# increment the number of cells in this cluster
clusterMembership[numClusters] += 1
# add the cell to the list of child cells in the cluster
childNodes[numClusters].append([yCoord, xCoord])
# push the new cell onto the stack
toVisit.append([yCoord, xCoord])
memberOf[yCoord][xCoord] = numClusters
break
if newCellFound:
break
if not newCellFound:
toVisit.pop()
numClusters += 1
return (childNodes, memberOf)
def relocation_policy_random(self, current_agent_row, current_agent_col):
z = 3
available_locations = np.where(self.environment == 0)
[available_locations_j, available_locations_i] = available_locations
# counter for the number of new locations checked (no to exceed certain number)
checked = 0
# backup list in case cell does not reach happiness level
checked_happy_levels = []
while checked < self.q:
# get random location
random_location = np.random.randint(len(available_locations_i))
rand_i = available_locations_i[random_location] # column
rand_j = available_locations_j[random_location] # row
# remove this location from lists
available_locations_i = np.delete(available_locations_i, random_location)
available_locations_j = np.delete(available_locations_j, random_location)
cell_type = self.environment[current_agent_row, current_agent_col]
happy_level = self.check_happiness(rand_j, rand_i, cell_type)
if happy_level >= self.k:
return [rand_j, rand_i]
else:
checked_happy_levels.append([rand_j, rand_i, happy_level])
checked += 1
best_option = np.where(checked_happy_levels == max(checked_happy_levels[-1]))[0][0]
return checked_happy_levels[best_option][0:2]
def relocation_policy_social(self, cell_j, cell_i):
new_location = [cell_j, cell_i]
cell_type = self.environment[cell_j, cell_i]
available_happy_places = []
D1index = self.simulation_environment_width * cell_j + cell_i # position in the linear stretched representation
for friend in self.friends[D1index]:
friend_j = friend // self.simulation_environment_width # friend's location on the lattice
friend_i = friend % self.simulation_environment_width
for n in range(-(self.p // 2), self.p // 2 + 1):
for m in range(-(self.p // 2), self.p // 2 + 1):
if (
self.check_happiness(
(friend_j + n) % self.simulation_environment_width,
(friend_i + m) % self.simulation_environment_height,
cell_type,
)
> 2
) and (
self.environment[
(friend_j + n) % self.simulation_environment_width,
(friend_i + m) % self.simulation_environment_height,
]
== 0
):
available_happy_places.append(
[
(friend_j + n) % self.simulation_environment_width,
(friend_i + m) % self.simulation_environment_height,
]
)
if available_happy_places != []:
new_location = random.choice(available_happy_places)
return new_location
def relocation_policy_closest(self, current_agent_row, current_agent_col):
# Find all available empty locations
available_locations = np.where(self.environment == 0)
[available_locations_j, available_locations_i] = available_locations
distance_to_locations = (
(available_locations_j - current_agent_row) ** 2 + (available_locations_i - current_agent_col) ** 2
) ** 0.5
locations_w_distance = np.vstack((available_locations_j, available_locations_i, distance_to_locations))
locations_sorted = locations_w_distance[:, locations_w_distance[-1].argsort()]
idx_that_exceed_dist = np.where(locations_sorted[-1] > self.max_dist)
# not to exceed maximum distance to move
try:
available_locations = locations_sorted[:, 0 : idx_that_exceed_dist[0][0]]
except IndexError:
# none exceeded max distance, then go to max cells checked
available_locations = locations_sorted[:, 0 : self.q]
[available_locations_j, available_locations_i, distances] = available_locations
# backup list in case cell does not reach happiness level
checked_happy_levels = []
while len(available_locations_i) != 0:
# get location
coord_i = available_locations_i[0] # column
coord_j = available_locations_j[0] # row
distance = distances[0]
# remove this location from lists
available_locations_i = np.delete(available_locations_i, 0)
available_locations_j = np.delete(available_locations_j, 0)
distances = np.delete(distances, 0)
cell_type = self.environment[current_agent_row, current_agent_col]
happy_level = self.check_happiness(coord_j, coord_i, cell_type)
# make sure it does not count itself as a neighbor when moving to a neighboring position!
if distance == 1 or distance == 2 ** 0.5:
happy_level -= 1
if happy_level >= self.k:
return [coord_j, coord_i, distance]
else:
checked_happy_levels.append([coord_j, coord_i, distance, happy_level])
try:
best_option = np.where(checked_happy_levels == max(checked_happy_levels[-1]))[0][0]
except IndexError:
# no locations to move to that are close enough
return [current_agent_row, current_agent_col, 0]
return checked_happy_levels[best_option][0:3]
def relocation_policy_priorities(self, cell_j, cell_i, priority1, priority2):
new_location = [cell_j, cell_i]
cell_type = self.environment[cell_j, cell_i]
for best_place in range(4):
if cell_type == 1:
best_location = np.where(priority1 == best_place)
available_locations = np.where((self.environment == 0) & (self.priority1 == priority1[best_location]))
elif cell_type == 2:
best_location = | np.where(priority2 == best_place) | numpy.where |
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#import os
import argparse
import time
import numpy as np
import cv2
from dpLoadh5 import dpLoadh5
from dpWriteh5 import dpWriteh5
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*20, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += | np.arange(h) | numpy.arange |
import numpy as np
from scipy.signal import resample, blackmanharris, triang
from scipy.fftpack import fft, ifft, fftshift
import math, copy, sys, os
from scipy.io.wavfile import write, read
from sys import platform
import subprocess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), './utilFunctions_C/'))
try:
import utilFunctions_C as UF_C
except ImportError:
print ("\n")
print ("-------------------------------------------------------------------------------")
print ("Warning:")
print ("Cython modules for some of the core functions were not imported.")
print ("Please refer to the README.md file in the 'sms-tools' directory,")
print ("for the instructions to compile the cython modules.")
print ("Exiting the code!!")
print ("-------------------------------------------------------------------------------")
print ("\n")
sys.exit(0)
winsound_imported = False
if sys.platform == "win32":
try:
import winsound
winsound_imported = True
except:
print ("You won't be able to play sounds, winsound could not be imported")
def isPower2(num):
"""
Check if num is power of two
"""
return ((num & (num - 1)) == 0) and num > 0
INT16_FAC = (2**15)-1
INT32_FAC = (2**31)-1
INT64_FAC = (2**63)-1
norm_fact = {'int16':INT16_FAC, 'int32':INT32_FAC, 'int64':INT64_FAC,'float32':1.0,'float64':1.0}
def wavread(filename):
"""
Read a sound file and convert it to a normalized floating point array
filename: name of file to read
returns fs: sampling rate of file, x: floating point array
"""
if (os.path.isfile(filename) == False): # raise error if wrong input file
raise ValueError("Input file is wrong")
fs, x = read(filename)
if (len(x.shape) !=1): # raise error if more than one channel
raise ValueError("Audio file should be mono")
if (fs !=44100): # raise error if more than one channel
raise ValueError("Sampling rate of input sound should be 44100")
#scale down and convert audio into floating point number in range of -1 to 1
x = np.float32(x)/norm_fact[x.dtype.name]
return fs, x
def wavplay(filename):
"""
Play a wav audio file from system using OS calls
filename: name of file to read
"""
if (os.path.isfile(filename) == False): # raise error if wrong input file
print("Input file does not exist. Make sure you computed the analysis/synthesis")
else:
if sys.platform == "linux" or sys.platform == "linux2":
# linux
subprocess.call(["aplay", filename])
elif sys.platform == "darwin":
# OS X
subprocess.call(["afplay", filename])
elif sys.platform == "win32":
if winsound_imported:
winsound.PlaySound(filename, winsound.SND_FILENAME)
else:
print("Cannot play sound, winsound could not be imported")
else:
print("Platform not recognized")
def wavwrite(y, fs, filename):
"""
Write a sound file from an array with the sound and the sampling rate
y: floating point array of one dimension, fs: sampling rate
filename: name of file to create
"""
x = copy.deepcopy(y) # copy array
x *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range
x = np.int16(x) # converting to int16 type
write(filename, fs, x)
def peakDetection(mX, t):
"""
Detect spectral peak locations
mX: magnitude spectrum, t: threshold
returns ploc: peak locations
"""
thresh = np.where(np.greater(mX[1:-1],t), mX[1:-1], 0); # locations above threshold
next_minor = np.where(mX[1:-1]>mX[2:], mX[1:-1], 0) # locations higher than the next one
prev_minor = np.where(mX[1:-1]>mX[:-2], mX[1:-1], 0) # locations higher than the previous one
ploc = thresh * next_minor * prev_minor # locations fulfilling the three criteria
ploc = ploc.nonzero()[0] + 1 # add 1 to compensate for previous steps
return ploc
def peakInterp(mX, pX, ploc):
"""
Interpolate peak values using parabolic interpolation
mX, pX: magnitude and phase spectrum, ploc: locations of peaks
returns iploc, ipmag, ipphase: interpolated peak location, magnitude and phase values
"""
val = mX[ploc] # magnitude of peak bin
lval = mX[ploc-1] # magnitude of bin at left
rval = mX[ploc+1] # magnitude of bin at right
iploc = ploc + 0.5*(lval-rval)/(lval-2*val+rval) # center of parabola
ipmag = val - 0.25*(lval-rval)*(iploc-ploc) # magnitude of peaks
ipphase = np.interp(iploc, np.arange(0, pX.size), pX) # phase of peaks by linear interpolation
return iploc, ipmag, ipphase
def sinc(x, N):
"""
Generate the main lobe of a sinc function (Dirichlet kernel)
x: array of indexes to compute; N: size of FFT to simulate
returns y: samples of the main lobe of a sinc function
"""
y = np.sin(N * x/2) / np.sin(x/2) # compute the sinc function
y[np.isnan(y)] = N # avoid NaN if x == 0
return y
def genBhLobe(x):
"""
Generate the main lobe of a Blackman-Harris window
x: bin positions to compute (real values)
returns y: main lobe os spectrum of a Blackman-Harris window
"""
N = 512 # size of fft to use
f = x*np.pi*2/N # frequency sampling
df = 2*np.pi/N
y = np.zeros(x.size) # initialize window
consts = [0.35875, 0.48829, 0.14128, 0.01168] # window constants
for m in range(0,4): # iterate over the four sincs to sum
y += consts[m]/2 * (sinc(f-df*m, N) + sinc(f+df*m, N)) # sum of scaled sinc functions
y = y/N/consts[0] # normalize
return y
def genSpecSines(ipfreq, ipmag, ipphase, N, fs):
"""
Generate a spectrum from a series of sine values, calling a C function
ipfreq, ipmag, ipphase: sine peaks frequencies, magnitudes and phases
N: size of the complex spectrum to generate; fs: sampling frequency
returns Y: generated complex spectrum of sines
"""
Y = UF_C.genSpecSines(N*ipfreq/float(fs), ipmag, ipphase, N)
return Y
def genSpecSines_p(ipfreq, ipmag, ipphase, N, fs):
"""
Generate a spectrum from a series of sine values
iploc, ipmag, ipphase: sine peaks locations, magnitudes and phases
N: size of the complex spectrum to generate; fs: sampling rate
returns Y: generated complex spectrum of sines
"""
Y = np.zeros(N, dtype = complex) # initialize output complex spectrum
hN = N//2 # size of positive freq. spectrum
for i in range(0, ipfreq.size): # generate all sine spectral lobes
loc = N * ipfreq[i] / fs # it should be in range ]0,hN-1[
if loc==0 or loc>hN-1: continue
binremainder = round(loc)-loc;
lb = np.arange(binremainder-4, binremainder+5) # main lobe (real value) bins to read
lmag = genBhLobe(lb) * 10**(ipmag[i]/20) # lobe magnitudes of the complex exponential
b = np.arange(round(loc)-4, round(loc)+5, dtype='int')
for m in range(0, 9):
if b[m] < 0: # peak lobe crosses DC bin
Y[-b[m]] += lmag[m]*np.exp(-1j*ipphase[i])
elif b[m] > hN: # peak lobe croses Nyquist bin
Y[b[m]] += lmag[m]*np.exp(-1j*ipphase[i])
elif b[m] == 0 or b[m] == hN: # peak lobe in the limits of the spectrum
Y[b[m]] += lmag[m]*np.exp(1j*ipphase[i]) + lmag[m]*np.exp(-1j*ipphase[i])
else: # peak lobe in positive freq. range
Y[b[m]] += lmag[m]*np.exp(1j*ipphase[i])
Y[hN+1:] = Y[hN-1:0:-1].conjugate() # fill the negative part of the spectrum
return Y
def sinewaveSynth(freqs, amp, H, fs):
"""
Synthesis of one sinusoid with time-varying frequency
freqs, amps: array of frequencies and amplitudes of sinusoids
H: hop size, fs: sampling rate
returns y: output array sound
"""
t = np.arange(H)/float(fs) # time array
lastphase = 0 # initialize synthesis phase
lastfreq = freqs[0] # initialize synthesis frequency
y = np.array([]) # initialize output array
for l in range(freqs.size): # iterate over all frames
if (lastfreq==0) & (freqs[l]==0): # if 0 freq add zeros
A = np.zeros(H)
freq = np.zeros(H)
elif (lastfreq==0) & (freqs[l]>0): # if starting freq ramp up the amplitude
A = np.arange(0,amp, amp/H)
freq = np.ones(H)*freqs[l]
elif (lastfreq>0) & (freqs[l]>0): # if freqs in boundaries use both
A = np.ones(H)*amp
if (lastfreq==freqs[l]):
freq = np.ones(H)*lastfreq
else:
freq = np.arange(lastfreq,freqs[l], (freqs[l]-lastfreq)/H)
elif (lastfreq>0) & (freqs[l]==0): # if ending freq ramp down the amplitude
A = np.arange(amp,0,-amp/H)
freq = np.ones(H)*lastfreq
phase = 2*np.pi*freq*t+lastphase # generate phase values
yh = A * np.cos(phase) # compute sine for one frame
lastfreq = freqs[l] # save frequency for phase propagation
lastphase = np.remainder(phase[H-1], 2*np.pi) # save phase to be use for next frame
y = np.append(y, yh) # append frame to previous one
return y
def cleaningTrack(track, minTrackLength=3):
"""
Delete fragments of one single track smaller than minTrackLength
track: array of values; minTrackLength: minimum duration of tracks in number of frames
returns cleanTrack: array of clean values
"""
nFrames = track.size # number of frames
cleanTrack = np.copy(track) # copy arrat
trackBegs = np.nonzero((track[:nFrames-1] <= 0) # beginning of track contours
& (track[1:]>0))[0] + 1
if track[0]>0:
trackBegs = np.insert(trackBegs, 0, 0)
trackEnds = np.nonzero((track[:nFrames-1] > 0) & (track[1:] <=0))[0] + 1
if track[nFrames-1]>0:
trackEnds = np.append(trackEnds, nFrames-1)
trackLengths = 1 + trackEnds - trackBegs # lengths of trach contours
for i,j in zip(trackBegs, trackLengths): # delete short track contours
if j <= minTrackLength:
cleanTrack[i:i+j] = 0
return cleanTrack
def f0Twm(pfreq, pmag, ef0max, minf0, maxf0, f0t=0):
"""
Function that wraps the f0 detection function TWM, selecting the possible f0 candidates
and calling the function TWM with them
pfreq, pmag: peak frequencies and magnitudes,
ef0max: maximum error allowed, minf0, maxf0: minimum and maximum f0
f0t: f0 of previous frame if stable
returns f0: fundamental frequency in Hz
"""
if (minf0 < 0): # raise exception if minf0 is smaller than 0
raise ValueError("Minimum fundamental frequency (minf0) smaller than 0")
if (maxf0 >= 10000): # raise exception if maxf0 is bigger than 10000Hz
raise ValueError("Maximum fundamental frequency (maxf0) bigger than 10000Hz")
if (pfreq.size < 3) & (f0t == 0): # return 0 if less than 3 peaks and not previous f0
return 0
f0c = np.argwhere((pfreq>minf0) & (pfreq<maxf0))[:,0] # use only peaks within given range
if (f0c.size == 0): # return 0 if no peaks within range
return 0
f0cf = pfreq[f0c] # frequencies of peak candidates
f0cm = pmag[f0c] # magnitude of peak candidates
if f0t>0: # if stable f0 in previous frame
shortlist = np.argwhere(np.abs(f0cf-f0t)<f0t/2.0)[:,0] # use only peaks close to it
maxc = np.argmax(f0cm)
maxcfd = f0cf[maxc]%f0t
if maxcfd > f0t/2:
maxcfd = f0t - maxcfd
if (maxc not in shortlist) and (maxcfd>(f0t/4)): # or the maximum magnitude peak is not a harmonic
shortlist = np.append(maxc, shortlist)
f0cf = f0cf[shortlist] # frequencies of candidates
if (f0cf.size == 0): # return 0 if no peak candidates
return 0
f0, f0error = UF_C.twm(pfreq, pmag, f0cf) # call the TWM function with peak candidates
if (f0>0) and (f0error<ef0max): # accept and return f0 if below max error allowed
return f0
else:
return 0
def TWM_p(pfreq, pmag, f0c):
"""
Two-way mismatch algorithm for f0 detection (by Beauchamp&Maher)
[better to use the C version of this function: UF_C.twm]
pfreq, pmag: peak frequencies in Hz and magnitudes,
f0c: frequencies of f0 candidates
returns f0, f0Error: fundamental frequency detected and its error
"""
p = 0.5 # weighting by frequency value
q = 1.4 # weighting related to magnitude of peaks
r = 0.5 # scaling related to magnitude of peaks
rho = 0.33 # weighting of MP error
Amax = max(pmag) # maximum peak magnitude
maxnpeaks = 10 # maximum number of peaks used
harmonic = np.matrix(f0c)
ErrorPM = np.zeros(harmonic.size) # initialize PM errors
MaxNPM = min(maxnpeaks, pfreq.size)
for i in range(0, MaxNPM) : # predicted to measured mismatch error
difmatrixPM = harmonic.T * np.ones(pfreq.size)
difmatrixPM = abs(difmatrixPM - np.ones((harmonic.size, 1))*pfreq)
FreqDistance = np.amin(difmatrixPM, axis=1) # minimum along rows
peakloc = | np.argmin(difmatrixPM, axis=1) | numpy.argmin |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 21:09:01 2019
@author: <NAME> (<EMAIL>)
"""
'''
Utility functions to make regression plots
'''
import os
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from scipy.stats import norm
import seaborn as sns
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
'''
Plot the regression results
'''
def predict_y(x, intercept, J_nonzero):
y = np.dot(x, J_nonzero) + intercept
return y
def cal_path(alphas, model, X_cv_train, y_cv_train, X_cv_test, y_cv_test, fit_int_flag):
'''
Calculate both RMSE and number of coefficients path for plotting purpose
'''
RMSE_path = []
coef_path = []
for j in range(len(X_cv_train)):
test_scores = np.zeros(len(alphas))
coefs_i = np.zeros(len(alphas))
for i, ai in enumerate(alphas):
estimator = model(alpha = ai, max_iter = 1e7, tol = 0.001, fit_intercept=fit_int_flag, random_state = 0)
estimator.fit(X_cv_train[j], y_cv_train[j])
# Access the errors, error per cluster
test_scores[i] = np.sqrt(mean_squared_error(y_cv_test[j], estimator.predict(X_cv_test[j]))) #RMSE
coefs_i[i] = len(np.nonzero(estimator.coef_)[0])
RMSE_path.append(test_scores)
coef_path.append(coefs_i)
RMSE_path = np.transpose(np.array(RMSE_path))
coef_path = np.transpose(np.array(coef_path))
return RMSE_path, coef_path
def plot_coef_path(alpha, alphas, coef_path, model_name, output_dir = os.getcwd()):
'''
#plot alphas vs the number of nonzero coefficents along the path
'''
fig = plt.figure(figsize=(6, 6))
plt.plot(-np.log10(alphas), coef_path, ':', linewidth= 0.8)
plt.plot(-np.log10(alphas), np.mean(coef_path, axis = 1),
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(alpha), linestyle='--' , color='r', linewidth=3,
label='Optimal alpha')
plt.legend(frameon=False, loc='best')
plt.xlabel(r'$-log10(\lambda)$')
plt.ylabel("Number of Nonzero Coefficients ")
plt.tight_layout()
fig.savefig(os.path.join(output_dir, model_name + '_a_vs_n.png'))
def plot_RMSE_path(alpha, alphas, RMSE_path, model_name, output_dir = os.getcwd()):
'''
#plot alphas vs RMSE along the path
'''
fig = plt.figure(figsize=(6, 6))
plt.plot(-np.log10(alphas), RMSE_path, ':', linewidth= 0.8)
plt.plot(-np.log10(alphas), np.mean(RMSE_path, axis = 1),
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(alpha), linestyle='--' , color='r', linewidth=3,
label='Optimal alpha')
plt.legend(frameon=False,loc='best')
plt.xlabel(r'$-log10(\lambda)$')
plt.ylabel("RMSE (eV)")
plt.tight_layout()
fig.savefig(os.path.join(output_dir, model_name + '_a_vs_cv.png'))
def plot_path(X, y, alpha, alphas, RMSE_path, coef_path, model, model_name, output_dir = os.getcwd()):
'''
Overall plot function for lasso/elastic net
'''
plot_coef_path(alpha, alphas, coef_path, model_name, output_dir)
plot_RMSE_path(alpha, alphas, RMSE_path, model_name, output_dir)
'''
#make performance plot - optional
'''
#plot_performance(X, y, model, model_name, output_dir)
def plot_ridge_path(alpha, alphas, RMSE_path, model_name, output_dir = os.getcwd()):
fig = plt.figure(figsize=(6, 6))
plt.plot(-np.log10(alphas), np.mean(RMSE_path, axis = 1),
label='Average across the folds', linewidth=2)
plt.axvline(- | np.log10(alpha) | numpy.log10 |
import sdeint
import complex_analysis
import numpy as np
import numexpr as ne
from typing import Type, List, NoReturn, Union, Dict
RealNumber = Union[np.float16, np.float32, np.float64, float];
ComplexNumber = Union[np.complex64, np.complex128];
def vslit_zip(z:Union[ComplexNumber,Type[np.ndarray]], dt:RealNumber, u:RealNumber) -> Union[ComplexNumber,Type[np.ndarray]]:
#
# Discretization of Chordal Loewner chains.
# This function is the inverse of the solution to the Chordal Loewner Equation with driving function
# U(t) = du
# where t runs in [0,dt].
# Notice that it maps the origin to the point w = du + 2j * dt**0.5.
#
return ne.evaluate("1j * sqrt(4 * dt - (z-u) ** 2) + u")
def multiple_slits(t:Type[np.ndarray], *driving_fncs:Type[np.ndarray]) -> Type[np.ndarray]:
#
# Compute the discretized Loewner trace of a multiple SLEs with prescribed driving functions,
# which are sampled at discrete time instants.
# The trace is defined as the union z(t) of curves z_i(t) such that
# z(t) = f_1(t, u_1(t)) + ... + f_n(t, u_n(t))
# where f_i(t, w) is the inverse of g_i(t, w), which is the solution to the chordal Loewner Equation
#
# dg_i 2
# ------ = ------------------
# dt g_i(t, w) - u_i(t)
#
#
# Input:
# t : 1d ndarray of floats
# driving_fncs : n 1d ndarrays of floats
#
# Output:
# z : list of n 1d ndarray of complex
#
nsteps = len(t);
#if n driving functions are given, use them!
if driving_fncs:
nslits = len(driving_fncs);
u = np.array(driving_fncs);
#else, just use a constant one!
else :
nslits = 1;
u = np.zeros(nsteps);
#initializing the trace array...
z = np.empty([nslits,nsteps], dtype=np.complex)
for slit in range(0,nslits) :
z[slit][:nsteps] += (u[slit][:nsteps]+0.000001j);
#getting the trace...
for step in range(nsteps-1, 0, -1):
dt = (t[step] - t[step-1]) / nslits
for slit1 in range(0, nslits, 1):
for slit2 in range(0, nslits, 1):
z[slit2][step:] = vslit_zip(z[slit2][step:], dt, u[slit1][step]);
#done!
return z[:]
def single_slit(t:Type[np.ndarray], u:Type[np.ndarray]) -> Type[np.ndarray]:
#
# Compute the discretized Loewner trace of a one slit with prescribed driving function,
# which are sampled at discrete time instants.
#
# Input:
# t : 1d ndarray of floats
# u : 1d ndarrays of floats
#
# Output:
# z : 1d ndarray of complex
#
z = multiple_slits(t,u)
return z[0][:]
def multiple_driving_functions(x0:RealNumber, t:RealNumber, kappa:RealNumber=1.0) -> List[RealNumber]:
#number of slits: it will be the number of starting points
nslits = len(x0);
#nslits independent Brownian motions with scaling factor kappa
B = np.diag( | np.zeros(nslits, dtype=np.complex) | numpy.zeros |
""" part of source code from PointNetLK (https://github.com/hmgoforth/PointNetLK),
Deep Global Registration (https://github.com/chrischoy/DeepGlobalRegistration),
SECOND (https://github.com/traveller59/second.pytorch), modified. """
import os
import glob
import numpy as np
import torch
import torch.utils.data
import six
import copy
import csv
import open3d as o3d
import utils
def load_3dmatch_batch_data(p0_fi, p1_fi, voxel_ratio):
p0 = np.load(p0_fi)['pcd']
p1 = np.load(p1_fi)['pcd']
# voxelization
pcd0 = o3d.geometry.PointCloud()
pcd0.points = o3d.utility.Vector3dVector(p0)
p0_downsampled_pcd = pcd0.voxel_down_sample(voxel_size=voxel_ratio) # open3d 0.8.0.0+
p0_downsampled = np.array(p0_downsampled_pcd.points)
pcd1 = o3d.geometry.PointCloud()
pcd1.points = o3d.utility.Vector3dVector(p1)
p1_downsampled_pcd = pcd1.voxel_down_sample(voxel_size=voxel_ratio) # open3d 0.8.0.0+
p1_downsampled = np.array(p1_downsampled_pcd.points)
return p0_downsampled, p1_downsampled
def find_voxel_overlaps(p0, p1, voxel):
xmin, ymin, zmin = np.max(np.stack([np.min(p0, 0), np.min(p1, 0)]), 0)
xmax, ymax, zmax = np.min(np.stack([np.max(p0, 0), np.max(p1, 0)]), 0)
# truncate the point cloud
eps = 1e-6
p0_ = p0[np.all(p0>[xmin+eps,ymin+eps,zmin+eps], axis=1) & np.all(p0<[xmax-eps,ymax-eps,zmax-eps], axis=1)]
p1_ = p1[np.all(p1>[xmin+eps,ymin+eps,zmin+eps], axis=1) & np.all(p1<[xmax-eps,ymax-eps,zmax-eps], axis=1)]
# recalculate the constraints
xmin, ymin, zmin = np.max(np.stack([np.min(p0, 0), np.min(p1, 0)]), 0)
xmax, ymax, zmax = np.min(np.stack([np.max(p0, 0), np.max(p1, 0)]), 0)
vx = (xmax - xmin) / voxel
vy = (ymax - ymin) / voxel
vz = (zmax - zmin) / voxel
return p0_, p1_, xmin, ymin, zmin, xmax, ymax, zmax, vx, vy, vz
class ThreeDMatch_Testing(torch.utils.data.Dataset):
def __init__(self, dataset_path, category, overlap_ratio, voxel_ratio, voxel, max_voxel_points, num_voxels, rigid_transform, vis):
self.dataset_path = dataset_path
self.pairs = []
with open(category, 'r') as fi:
cinfo_fi = fi.read().split() # category names
for i in range(len(cinfo_fi)):
cat_name = cinfo_fi[i]
cinfo_name = cat_name + '*%.2f.txt' % overlap_ratio
cinfo = glob.glob(os.path.join(self.dataset_path, cinfo_name))
for fi_name in cinfo:
with open(fi_name) as fi:
fi_list = [x.strip().split() for x in fi.readlines()]
for fi in fi_list:
self.pairs.append([fi[0], fi[1]])
self.voxel_ratio = voxel_ratio
self.voxel = int(voxel)
self.max_voxel_points = max_voxel_points
self.num_voxels = num_voxels
self.perturbation = load_pose(rigid_transform, len(self.pairs))
self.vis = vis
def __len__(self):
return len(self.pairs)
def do_transform(self, p0, x):
# p0: [N, 3]
# x: [1, 6], twist-params
g = utils.exp(x).to(p0) # [1, 4, 4]
p1 = utils.transform(g, p0)
igt = g.squeeze(0) # igt: p0 -> p1
return p1, igt
def __getitem__(self, index):
p0_pre, p1_pre = load_3dmatch_batch_data(os.path.join(self.dataset_path, self.pairs[index][0]), os.path.join(self.dataset_path, self.pairs[index][1]), self.voxel_ratio)
# voxelization
p0, p1, xmin, ymin, zmin, xmax, ymax, zmax, vx, vy, vz = find_voxel_overlaps(p0_pre, p1_pre, self.voxel) # constraints of P1 ^ P2, where contains roughly overlapped area
voxels_p0, coords_p0, num_points_per_voxel_p0 = points_to_voxel_second(p0, (xmin, ymin, zmin, xmax, ymax, zmax),
(vx, vy, vz), self.max_voxel_points, reverse_index=False, max_voxels=self.num_voxels)
voxels_p1, coords_p1, num_points_per_voxel_p1 = points_to_voxel_second(p1, (xmin, ymin, zmin, xmax, ymax, zmax),
(vx, vy, vz), self.max_voxel_points, reverse_index=False, max_voxels=self.num_voxels)
coords_p0_idx = coords_p0[:,1]*(int(self.voxel**2)) + coords_p0[:,0]*(int(self.voxel)) + coords_p0[:,2]
coords_p1_idx = coords_p1[:,1]*(int(self.voxel**2)) + coords_p1[:,0]*(int(self.voxel)) + coords_p1[:,2]
# calculate for the voxel medium
xm_x = np.linspace(xmin+vx/2, xmax-vx/2, int(self.voxel))
xm_y = np.linspace(ymin+vy/2, ymax-vy/2, int(self.voxel))
xm_z = np.linspace(zmin+vz/2, zmax-vz/2, int(self.voxel))
mesh3d = np.vstack(np.meshgrid(xm_x,xm_y,xm_z)).reshape(3,-1).T
voxel_coords_p0 = mesh3d[coords_p0_idx]
voxel_coords_p1 = mesh3d[coords_p1_idx]
# find voxels where number of points >= 80% of the maximum number of points
idx_conditioned_p0 = coords_p0_idx[np.where(num_points_per_voxel_p0>=0.1*self.max_voxel_points)]
idx_conditioned_p1 = coords_p1_idx[np.where(num_points_per_voxel_p1>=0.1*self.max_voxel_points)]
idx_conditioned, _, _ = np.intersect1d(idx_conditioned_p0, idx_conditioned_p1, assume_unique=True, return_indices=True)
_, _, idx_p0 = np.intersect1d(idx_conditioned, coords_p0_idx, assume_unique=True, return_indices=True)
_, _, idx_p1 = np.intersect1d(idx_conditioned, coords_p1_idx, assume_unique=True, return_indices=True)
voxel_coords_p0 = voxel_coords_p0[idx_p0]
voxel_coords_p1 = voxel_coords_p1[idx_p1]
voxels_p0 = voxels_p0[idx_p0]
voxels_p1 = voxels_p1[idx_p1]
x = torch.from_numpy(self.perturbation[index][np.newaxis,...])
voxels_p1_, igt = self.do_transform(torch.from_numpy(voxels_p1.reshape(-1,3)), x)
voxels_p1 = voxels_p1_.reshape(voxels_p1.shape)
voxel_coords_p1, _ = self.do_transform(torch.from_numpy(voxel_coords_p1).double(), x)
p1, _ = self.do_transform(torch.from_numpy(p1), x)
if self.vis:
return voxels_p0, voxel_coords_p0, voxels_p1, voxel_coords_p1, igt, p0, p1
else:
return voxels_p0, voxel_coords_p0, voxels_p1, voxel_coords_p1, igt
class ToyExampleData(torch.utils.data.Dataset):
def __init__(self, p0, p1, voxel_ratio, voxel, max_voxel_points, num_voxels, rigid_transform, vis):
self.voxel_ratio = voxel_ratio
self.voxel = int(voxel)
self.max_voxel_points = max_voxel_points
self.num_voxels = num_voxels
self.perturbation = rigid_transform
self.p0 = p0
self.p1 = p1
self.vis = vis
def __len__(self):
return len(self.p0)
def do_transform(self, p0, x):
# p0: [N, 3]
# x: [1, 6], twist-params
g = utils.exp(x).to(p0) # [1, 4, 4]
p1 = utils.transform(g, p0)
igt = g.squeeze(0) # igt: p0 -> p1
return p1, igt
def __getitem__(self, index):
p0_pre = self.p0[index]
p1_pre = self.p1[index]
# voxelization
p0, p1, xmin, ymin, zmin, xmax, ymax, zmax, vx, vy, vz = find_voxel_overlaps(p0_pre, p1_pre, self.voxel) # constraints of P1 ^ P2, where contains roughly overlapped area
voxels_p0, coords_p0, num_points_per_voxel_p0 = points_to_voxel_second(p0, (xmin, ymin, zmin, xmax, ymax, zmax),
(vx, vy, vz), self.max_voxel_points, reverse_index=False, max_voxels=self.num_voxels)
voxels_p1, coords_p1, num_points_per_voxel_p1 = points_to_voxel_second(p1, (xmin, ymin, zmin, xmax, ymax, zmax),
(vx, vy, vz), self.max_voxel_points, reverse_index=False, max_voxels=self.num_voxels)
coords_p0_idx = coords_p0[:,1]*(int(self.voxel**2)) + coords_p0[:,0]*(int(self.voxel)) + coords_p0[:,2]
coords_p1_idx = coords_p1[:,1]*(int(self.voxel**2)) + coords_p1[:,0]*(int(self.voxel)) + coords_p1[:,2]
# calculate for the voxel medium
xm_x = np.linspace(xmin+vx/2, xmax-vx/2, int(self.voxel))
xm_y = np.linspace(ymin+vy/2, ymax-vy/2, int(self.voxel))
xm_z = np.linspace(zmin+vz/2, zmax-vz/2, int(self.voxel))
mesh3d = np.vstack(np.meshgrid(xm_x,xm_y,xm_z)).reshape(3,-1).T
voxel_coords_p0 = mesh3d[coords_p0_idx]
voxel_coords_p1 = mesh3d[coords_p1_idx]
# find voxels where number of points >= 80% of the maximum number of points
idx_conditioned_p0 = coords_p0_idx[np.where(num_points_per_voxel_p0>=0.1*self.max_voxel_points)]
idx_conditioned_p1 = coords_p1_idx[np.where(num_points_per_voxel_p1>=0.1*self.max_voxel_points)]
idx_conditioned, _, _ = np.intersect1d(idx_conditioned_p0, idx_conditioned_p1, assume_unique=True, return_indices=True)
_, _, idx_p0 = np.intersect1d(idx_conditioned, coords_p0_idx, assume_unique=True, return_indices=True)
_, _, idx_p1 = np.intersect1d(idx_conditioned, coords_p1_idx, assume_unique=True, return_indices=True)
voxel_coords_p0 = voxel_coords_p0[idx_p0]
voxel_coords_p1 = voxel_coords_p1[idx_p1]
voxels_p0 = voxels_p0[idx_p0]
voxels_p1 = voxels_p1[idx_p1]
x = torch.from_numpy(self.perturbation[index][np.newaxis,...])
voxels_p1_, igt = self.do_transform(torch.from_numpy(voxels_p1.reshape(-1,3)), x)
voxels_p1 = voxels_p1_.reshape(voxels_p1.shape)
voxel_coords_p1, _ = self.do_transform(torch.from_numpy(voxel_coords_p1).double(), x)
p1, _ = self.do_transform(torch.from_numpy(p1), x)
if self.vis:
return voxels_p0, voxel_coords_p0, voxels_p1, voxel_coords_p1, igt, p0, p1
else:
return voxels_p0, voxel_coords_p0, voxels_p1, voxel_coords_p1, igt
class RandomTransformSE3:
""" randomly generate rigid transformations """
def __init__(self, mag=1, mag_randomly=True):
self.mag = mag
self.randomly = mag_randomly
self.gt = None
self.igt = None
def generate_transform(self):
amp = self.mag
if self.randomly:
amp = torch.rand(1, 1) * self.mag
x = torch.randn(1, 6)
x = x / x.norm(p=2, dim=1, keepdim=True) * amp
return x
def apply_transform(self, p0, x):
# p0: [N, 3]
# x: [1, 6], twist params
g = utils.exp(x).to(p0) # [1, 4, 4]
gt = utils.exp(-x).to(p0) # [1, 4, 4]
p1 = utils.transform(g, p0)
self.gt = gt # p1 --> p0
self.igt = g # p0 --> p1
return p1
def transform(self, tensor):
x = self.generate_transform()
return self.apply_transform(tensor, x)
def __call__(self, tensor):
return self.transform(tensor)
def add_noise(pointcloud, sigma=0.01, clip=0.05):
N, C = pointcloud.shape
pointcloud += torch.clamp(sigma * torch.randn(N, C), -1 * clip, clip)
return pointcloud
class PointRegistration(torch.utils.data.Dataset):
def __init__(self, dataset, rigid_transform, sigma=0.00, clip=0.00):
self.dataset = dataset
self.transf = rigid_transform
self.sigma = sigma
self.clip = clip
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
pm, _ = self.dataset[index] # one point cloud
p_ = add_noise(pm, sigma=self.sigma, clip=self.clip)
p1 = self.transf(p_)
igt = self.transf.igt.squeeze(0)
p0 = pm
# p0: template, p1: source, igt:transform matrix from p0 to p1
return p0, p1, igt
class PointRegistration_fixed_perturbation(torch.utils.data.Dataset):
def __init__(self, dataset, rigid_transform, sigma=0.00, clip=0.00):
torch.manual_seed(713)
self.dataset = dataset
self.transf_ = load_pose(rigid_transform, len(self.dataset))
list_order = torch.randperm(len(self.dataset))
self.transf = self.transf_[list_order]
self.sigma = sigma
self.clip = clip
def __len__(self):
return len(self.dataset)
def transform(self, p0, x):
# p0: [N, 3]
# x: [1, 6], twist-vector (rotation and translation)
g = utils.exp(x).to(p0) # [1, 4, 4]
p1 = utils.transform(g, p0)
igt = g.squeeze(0)
return p1, igt
def __getitem__(self, index):
pm, _ = self.dataset[index] # one point cloud
p_ = add_noise(pm, sigma=self.sigma, clip=self.clip)
p0 = pm
x = torch.from_numpy(self.transf[index][np.newaxis, ...]).to(p0)
p1, igt = self.transform(p_, x)
# p0: template, p1: source, igt:transform matrix from p0 to p1
return p0, p1, igt
# adapted from SECOND: https://github.com/nutonomy/second.pytorch/blob/master/second/core/point_cloud/point_cloud_ops.py
def _points_to_voxel_kernel(points,
voxel_size,
coords_range,
num_points_per_voxel,
coor_to_voxelidx,
voxels,
coors,
max_points=35,
max_voxels=20000):
# need mutex if write in cuda, but numba.cuda don't support mutex.
# in addition, pytorch don't support cuda in dataloader(tensorflow support this).
# put all computations to one loop.
# we shouldn't create large array in main jit code, otherwise
# decrease performance
N = points.shape[0]
ndim = 3
grid_size = (coords_range[3:] - coords_range[:3]) / voxel_size
grid_size = np.around(grid_size, 0, grid_size).astype(np.int32)
coor = np.zeros(shape=(3, ), dtype=np.int32)
voxel_num = 0
failed = False
for i in range(N):
failed = False
for j in range(ndim):
c = np.floor((points[i, j] - coords_range[j]) / voxel_size[j])
if c < 0 or c >= grid_size[j]:
failed = True
break
coor[j] = c
if failed:
continue
voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]]
if voxelidx == -1:
voxelidx = voxel_num
# print(voxel_num)
if voxel_num > max_voxels:
break
voxel_num += 1
coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx
coors[voxelidx] = coor
num = num_points_per_voxel[voxelidx]
if num < max_points:
voxels[voxelidx, num] = points[i]
num_points_per_voxel[voxelidx] += 1
return voxel_num
# adapted from SECOND: https://github.com/nutonomy/second.pytorch/blob/master/second/core/point_cloud/point_cloud_ops.py
def points_to_voxel_second(points,
coords_range,
voxel_size,
max_points=100,
reverse_index=False,
max_voxels=20000):
"""convert kitti points(N, >=3) to voxels. This version calculate
everything in one loop. now it takes only 4.2ms(complete point cloud)
with jit and 3.2ghz cpu.(don't calculate other features)
Note: this function in ubuntu seems faster than windows 10.
Args:
points: [N, ndim] float tensor. points[:, :3] contain xyz points and
points[:, 3:] contain other information such as reflectivity.
voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size
coords_range: [6] list/tuple or array, float. indicate voxel range.
format: xyzxyz, minmax
max_points: int. indicate maximum points contained in a voxel.
reverse_index: boolean. indicate whether return reversed coordinates.
if points has xyz format and reverse_index is True, output
coordinates will be zyx format, but points in features always
xyz format.
max_voxels: int. indicate maximum voxels this function create.
for second, 20000 is a good choice. you should shuffle points
before call this function because max_voxels may drop some points.
Returns:
voxels: [M, max_points, ndim] float tensor. only contain points.
coordinates: [M, 3] int32 tensor.
num_points_per_voxel: [M] int32 tensor.
"""
if not isinstance(voxel_size, np.ndarray):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if not isinstance(coords_range, np.ndarray):
coords_range = np.array(coords_range, dtype=points.dtype)
voxelmap_shape = (coords_range[3:] - coords_range[:3]) / voxel_size
voxelmap_shape = tuple(np.around(voxelmap_shape).astype(np.int32).tolist())
if reverse_index:
voxelmap_shape = voxelmap_shape[::-1]
# don't create large array in jit(nopython=True) code.
num_points_per_voxel = np.zeros(shape=(max_voxels, ), dtype=np.int32)
coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32)
voxels = np.ones(
shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype) * | np.mean(points, 0) | numpy.mean |
#!/usr/bin/python
import numpy as np
import xgboost as xgb
### load data in do training
dtrain = xgb.DMatrix('../data/agaricus.txt.train')
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'}
num_round = 2
print('running cross validation')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'error'}, seed=0,
callbacks=[xgb.callback.print_evaluation(show_stdv=True)])
print('running cross validation, disable standard deviation display')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value
res = xgb.cv(param, dtrain, num_boost_round=10, nfold=5,
metrics={'error'}, seed=0,
callbacks=[xgb.callback.print_evaluation(show_stdv=False),
xgb.callback.early_stop(3)])
print(res)
print('running cross validation, with preprocessing function')
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'auc'}, seed=0, fpreproc=fpreproc)
###
# you can also do cross validation with customized loss function
# See custom_objective.py
##
print('running cross validation, with cutomsized loss function')
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + | np.exp(-preds) | numpy.exp |
import gc
import glob
import os
import cv2
import numpy as np
import scipy.io as sio
from PIL import Image
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
class DataHandler:
def __init__(self):
print('data handler')
self.train_labels = None
self.test_labels = None
self.valid_labels = None
def preprocess(self, data_batch, label_batch):
meta_data = self.meta_data
rotate_degree = meta_data['random_rotate_value']
translate_std_ratio = meta_data['random_translate_ratio_value']
crop_width = meta_data['crop_width']
crop_height = meta_data['crop_height']
resize_width = meta_data['resize_width']
resize_height = meta_data['resize_height']
scale_data_value = meta_data['scale_data']
scale_label_value = meta_data['scale_label']
stream_specific_scale_label = meta_data['stream_specific_scale_label']
reshape_batch = meta_data['reshape_batch']
# cv2.imshow('original', data_batch[0])
if (resize_height, resize_width) is not (None, None):
data_batch, label_batch = self.resize(data_batch, label_batch, resize_width, resize_height)
if rotate_degree is not None:
data_batch, label_batch = self.rotate_random(data_batch, label_batch, rotate_degree)
if self.meta_data['subtract_mean'] is True:
data_batch = self.subtract_mean_image(data_batch)
if translate_std_ratio is not None:
self.translate_random(data_batch, label_batch, translate_std_ratio)
if (crop_height, crop_width) is not (None, None):
data_batch, label_batch = self.crop_middle(data_batch, label_batch, crop_width, crop_height)
if scale_data_value != 1:
data_batch = self.scale_data(data_batch, scale_data_value)
if stream_specific_scale_label is not None:
label_batch = self.streamspecific_scale_label(label_batch, stream_specific_scale_label, self.current_view)
data_batch = np.asarray(data_batch)
if reshape_batch == 'tf':
data_batch = self.match_TensorFlow_shape(data_batch)
elif reshape_batch == 'caffe':
data_batch = self.match_caffe_shape(data_batch)
return (data_batch, label_batch)
def scale_data(self, data_batch, scale_data_value):
return [img / scale_data_value for img in data_batch]
def streamspecific_scale_label(self, labels, stream_label_ranges, current_stream):
temp = [l / stream_label_ranges[current_stream] for l in labels]
# fix error in labels if above 1
temp = [label if label <= 1 else 1 for label in temp]
return temp
def match_caffe_shape(self, imgs):
num_seq = self.meta_data['sequence_length']
channels = self.meta_data['channels']
if self.meta_data['sequence_length'] != 1:
raise NotImplementedError
else:
imgs = np.swapaxes(imgs, 1, 3)
imgs = np.swapaxes(imgs, 2, 3)
if channels == 1:
imgs = imgs.reshape(imgs.shape[0], # batch size
channels, # channels
imgs.shape[1], # width
imgs.shape[2], # height
)
else:
# todo: test
pass
return imgs
def match_Theano_shape(self, imgs):
raise NotImplementedError
def match_TensorFlow_shape(self, imgs):
# For 3D data, "tf" assumes (conv_dim1, conv_dim2, conv_dim3, channels)
# while "th" assumes (channels, conv_dim1, conv_dim2, conv_dim3).
num_seq = self.meta_data['sequence_length']
channels = self.meta_data['channels']
if self.meta_data['sequence_length'] != 1:
imgs = np.swapaxes(imgs, 1, 3)
imgs = np.swapaxes(imgs, 2, 3)
imgs = imgs.reshape(imgs.shape[0], # batch size
imgs.shape[1], # num_frames
imgs.shape[2], # width
imgs.shape[3], # height
channels # channels
)
else:
imgs = imgs.reshape(imgs.shape[0], # batch size
imgs.shape[1], # width
imgs.shape[2], # height
channels # channels
)
return imgs
def _read_train_valid_from_list_file(self, train_list, valid_list):
load_to_memory = self.meta_data['load_to_memory']
split_ratio = self.meta_data['split_ratio']
train_images = train_labels = valid_images = valid_labels = []
if train_list is not None and valid_list is not None:
train_images, train_labels = self.read_data_from_list_file(train_list)
valid_images, valid_labels = self.read_data_from_list_file(valid_list)
elif train_list is not None and split_ratio != 0:
images, labels = self.read_data_from_list_file(train_list)
train_images, valid_images, train_labels, valid_labels = \
train_test_split(images, labels, test_size=split_ratio)
elif train_list is not None:
train_images, train_labels = self.read_data_from_list_file(train_list)
print('No validation set is defined, and no split ratio is set.')
[valid_labels, valid_images] = [[], []]
return train_images, train_labels, valid_images, valid_labels
def _read_train_valid_from_folder(self, train_folder, valid_folder, image_format, split_ratio):
train_images = train_labels = valid_images = valid_labels = []
if train_folder is not None and valid_folder is not None:
train_images, train_labels = self._read_data_from_folder(train_folder, image_format)
valid_images, valid_labels = self._read_data_from_folder(valid_folder, image_format)
elif train_folder is not None and split_ratio != 0:
images, labels = self._read_data_from_folder(train_folder, image_format)
train_images, valid_images, train_labels, valid_labels = \
train_test_split(images, labels, test_size=split_ratio)
elif train_folder is not None:
train_images, train_labels = self._read_data_from_folder(train_folder, image_format)
print('No validation set is defined, and no split ratio is set.')
valid_labels, valid_images = [[], []]
return train_images, train_labels, valid_images, valid_labels
def _set_data_list_file(self, train_list_file, valid_list_file):
self.valid_images = []
self.train_images = []
self.valid_labels = []
self.train_labels = []
for i in range(len(train_list_file)):
train_images, train_labels, \
valid_images, valid_labels = \
self._read_train_valid_from_list_file(train_list_file[i], valid_list_file[i])
self.train_images.append(train_images)
self.train_labels.append(train_labels)
self.valid_images.append(valid_images)
self.valid_labels.append(valid_labels)
def _set_data(self, data):
self.train_images, self.train_labels, self.valid_images, self.valid_labels = \
data[0], data[1], data[2], data[3]
def _set_data_folder(self, train_folder, valid_folder):
self.train_images, self.train_labels, self.valid_images, self.valid_labels = \
self._read_train_valid_from_folder(train_folder, valid_folder,
self.meta_data['file_format'], self.meta_data['split_ratio'])
def set_test_data(self, meta_data):
self.set_meta_data(meta_data)
load_to_memory = meta_data['load_to_memory']
label_type = meta_data['label_type']
test_list_file = meta_data['test_list_file']
self.test_images = []
self.test_labels = []
for i in range(len(test_list_file)):
test_images, test_labels = self.read_data_from_list_file(test_list_file[i])
self.test_images.append(test_images)
self.test_labels.append(test_labels)
if 'main_label_index' in self.meta_data:
main_label_index = self.meta_data['main_label_index']
else:
main_label_index = 0
self.test_label_map, self.test_label_headers = self.create_label_map(self.test_labels, main_label_index)
self.test_iterator = np.zeros(self.get_num_views(), np.int)
def set_data(self, data, meta_data):
self.set_meta_data(meta_data)
load_to_memory = meta_data['load_to_memory']
label_type = meta_data['label_type']
#set data
if load_to_memory:
print('loading all data into memory: %r')
if label_type == 'single_value':
if meta_data['train_list'] is not None:
self._set_data_list_file(meta_data['train_list'], meta_data['valid_list'])
elif data is not None:
self._set_data(data)
elif label_type == 'mask_image':
# reading data from folder
if meta_data['train_folder'] is not None:
self._set_data_folder(meta_data['train_folder'], meta_data['valid_folder'])
#update meta data
self.update_meta_data({'training_size': self.get_dataset_size()[0],
'valid_size': self.get_dataset_size()[1]})
if meta_data['subtract_mean']:
print('calculating mean image...')
self.mean_train_image = self.set_train_mean()
if 'main_label_index' in self.meta_data:
main_label_index = self.meta_data['main_label_index']
else:
main_label_index = 0
self.train_label_map, self.train_label_headers = self.create_label_map(self.train_labels, main_label_index)
self.valid_label_map, self.valid_label_headers = self.create_label_map(self.valid_labels, main_label_index)
self.train_iterator = np.zeros(self.get_num_views(), np.int)
self.valid_iterator = np.zeros(self.get_num_views(), np.int)
def _read_data_from_folder(self, train_folder, image_format):
print('_read_data_from_folder not implemented yet')
return None, None
def set_meta_data(self, meta_data):
self.meta_data = meta_data
def set_meta_data_json(self, meta_data):
self.meta_data = meta_data
def update_meta_data(self, input_meta_data):
self.meta_data.update(input_meta_data)
def get_meta_data(self):
return self.meta_data
def set_train_mean(self):
load_to_memory = self.meta_data['load_to_memory']
images = self.train_images;
n = len(images)
counter = 0
if load_to_memory:
h, w = images[0].size
else:
img = Image.open(images[0])
h, w = img.size
mean_im = np.zeros((w, h), np.float)
for im in images:
counter+=1
if counter%100==0:
print('read %d/%d images to calculate mean image' % (counter, n))
if load_to_memory:
imarr = np.array(im, dtype=float)/n
else:
imarr = np.array(Image.open(im), dtype=float)/n
mean_im = mean_im + imarr
return mean_im
def subtract_mean_image(self, imgs):
for i in range(len(imgs)):
imgs[i] = Image.fromarray(np.array(imgs[i], dtype=float) - self.mean_train_image)
return imgs
def read_data_from_list_file(self, list_file):
delimiter = self.meta_data['delimiter']
load_to_memory = self.meta_data['load_to_memory']
multi_cine_per_patient = self.meta_data['multi_cine_per_patient']
file_format = self.meta_data['file_format']
num_frames = self.meta_data['sequence_length']
images = []
labels = []
counter = 0
num_lines = sum(1 for line in open(list_file))
print('opening list file: {0}'.format(list_file))
with open(list_file) as f:
for line in f:
counter+=1
if counter%100==0:
print("read %d/%d lines from file: %s" % (counter, num_lines, list_file))
str = line.rstrip().split(delimiter)
file_dir = str[0]
print(str)
label = list(map(float, str[1::]))
if os.path.isfile(file_dir):
if load_to_memory:
if file_format == 'mat':
cines = self.read_patient_from_mat(file_dir, multi_cine_per_patient=multi_cine_per_patient)
images.extend(cines)
for i in range(len(cines)):
labels.append(label)
# matfile = sio.loadmat(file_dir)
# cine = matfile['Patient']['DicomImage'][0][0] # todo: generalize this
# images.append(cine[:, :, :num_frames])
else:
images.append(Image.open(file_dir))
images[-1].load()
labels.append(label)
else:
images.append(file_dir)
labels.append(label)
elif os.path.isdir(file_dir):
for sub_file in glob.iglob(file_dir+'/*'+file_format):
images.append(Image.open(sub_file) if load_to_memory else sub_file) # .resize((400, 266), Image.BILINEAR)
if file_format == 'image':
if load_to_memory:
images[-1].load()
# img_arrray = cv2.imread(sub_file, 0)
# img_arrray = img_arrray.dtype(np.uint8)
# images.append(cv2.imread(sub_file,0 ) if flag else sub_file)
labels.append(int(label))
# print len(images), ' ', self.memory_usage_ps()-init_memory
else:
print('file ', file_dir, ' does not exist')
assert False
return images, labels
def create_label_map(self, labels, main_label_index=0):
number_of_streams = len(labels)
labels_map = []
label_headers = []
for stream in range(number_of_streams):
sub_labels = labels[stream]
sub_main_labels = []
for t in range(len(sub_labels)):
sub_main_labels.append(sub_labels[t][main_label_index])
sub_label_headers = sorted(set(sub_main_labels))
number_of_classes = len(sub_label_headers)
sub_labels_map = [[] for i in range(number_of_classes)]
for i in range(len(sub_main_labels)):
sub_labels_map[sub_label_headers.index(sub_main_labels[i])].append(i)
labels_map.append(sub_labels_map)
label_headers.append(sub_label_headers)
return labels_map, label_headers
def get_dataset_size(self):
train_size = sum(len(l) for l in self.train_labels)
valid_size = sum(len(l) for l in self.valid_labels)
return train_size, valid_size
def get_testset_size(self):
# test_size = sum(len(l) for l in self.test_labels)
test_size = max(len(l) for l in self.test_labels)
return test_size
def get_testset_size_per_view(self):
return [len(l) for l in self.test_labels]
def get_num_views(self):
if self.train_labels is not None:
return len(self.train_labels)
elif self.test_labels is not None:
return len(self.test_labels)
else:
return 0
def get_batch(self, batch_size, train_valid='train', data_traversing='random',
stream_index=0):
interclass_selection_method = self.meta_data['interclass_batch_selection']
self.current_view = stream_index
main_label_index = self.meta_data['main_label_index']
load_to_memory = self.meta_data['load_to_memory']
label_type = self.meta_data['label_type']
num_frames = self.meta_data['sequence_length']
multi_stream_flag = self.meta_data['multi_stream']
if train_valid == 'train':
images = self.train_images[stream_index]
labels = self.train_labels[stream_index]
label_map = self.train_label_map[stream_index]
iter = self.train_iterator[stream_index]
elif train_valid == 'valid':
images = self.valid_images[stream_index]
labels = self.valid_labels[stream_index]
label_map = self.valid_label_map[stream_index]
iter = self.valid_iterator[stream_index]
elif train_valid == 'test':
images = self.test_images[stream_index]
labels = self.test_labels[stream_index]
label_map = self.test_label_map[stream_index]
iter = self.test_iterator[stream_index]
if data_traversing == 'random':
if interclass_selection_method == 'random':
selected_indices = np.random.permutation(len(images))[:batch_size]
elif interclass_selection_method == 'uniform':
num_classes = len(label_map)
samples_per_class = batch_size//num_classes
selected_indices = []
for i in range(num_classes):
indices = np.random.permutation(len(label_map[i]))[:samples_per_class]
while len(indices) < samples_per_class:
indices = np.append(indices, np.random.permutation(len(label_map[i]))[:samples_per_class-len(indices)])
selected_indices.extend([label_map[i][j] for j in indices])
if batch_size % num_classes != 0:
selected_classes = np.random.permutation(num_classes)[:batch_size%num_classes]
for i in range(len(selected_classes)):
index = np.random.randint(len(label_map[selected_classes[i]]))
selected_indices.extend([label_map[selected_classes[i]][index]])
else:
assert False
elif data_traversing == 'iterative':
selected_indices = np.array(range(iter, iter + batch_size))
selected_indices[selected_indices >= len(images)] = \
selected_indices[selected_indices >= len(images)] - len(images)
iter = selected_indices[batch_size - 1] + 1 # todo: switch to view specific iter
if train_valid == 'train':
self.train_iterator[stream_index] = iter
elif train_valid == 'valid':
self.valid_iterator[stream_index] = iter
elif train_valid == 'test':
self.test_iterator[stream_index] = iter
batch_images = []
batch_labels = []
if load_to_memory:
batch_images = [images[i] for i in selected_indices]
batch_labels = [labels[i][main_label_index] for i in selected_indices]
else:
for i in selected_indices:
if self.meta_data['file_format'] == 'image':
im = cv2.imread(images[i], 0)
batch_images.append(im)
elif self.meta_data['file_format'] == 'mat':
cines = self.read_patient_from_mat(images[i], multi_cine_per_patient=False)
batch_images.extend(cines) # todo: read entire cine
# print('Number of frames = ', str(cines[0].shape[2]))
if label_type == 'single_value':
batch_labels = [labels[i][main_label_index] for i in selected_indices]
elif label_type == 'mask_image':
for i in selected_indices:
batch_labels.append(Image.open(labels[i]))
batch_labels[-1].load()
return batch_images, batch_labels
def read_patient_from_mat(self, file, multi_cine_per_patient=False):
cine_selection_if_not_multi = self.meta_data['cine_selection_if_not_multi']
num_frames = self.meta_data['sequence_length']
matfile = sio.loadmat(file)
cine = matfile['Patient']['DicomImage'][0][0] # todo: generalize this
cines = []
if cine.shape[2] == num_frames:
cines.append(np.copy(cine))
elif cine.shape[2] > num_frames:
if multi_cine_per_patient:
# consider every num_frames frames as one patient
i = 0
while i+num_frames < cine.shape[2]:
temp_cine = cine[:, :, i:i+num_frames]
cines.append(np.copy(temp_cine))
i += num_frames
else:
if cine_selection_if_not_multi == 'random': # choose one random sequence of num_frames length
from random import randint
i = randint(0, cine.shape[2] - num_frames)
elif cine_selection_if_not_multi == 'first': # choose the first cine sequence
i = 0
cines.append(np.copy(cine[:, :, i:i + num_frames]))
elif cine.shape[2] < num_frames:
# cycle over
cine = np.concatenate((cine, cine[:, :, :num_frames-cine.shape[2]]), axis=2)
cines.append(np.copy(cine))
gc.collect()
return cines
def translate_random(self, imgs, labels, value=20):
label_type = self.meta_data['label_type']
method = self.meta_data['random_translate_method']
origh, origw, __ = imgs[0].shape
for i in range(len(imgs)):
if method == 'normal':
transX = np.random.normal(0, origw / value)
transY = np.random.normal(0, origh / value)
if np.abs(transX) > 2*origw/value:
transX = np.sign(transX)*2*origw / value
if np.abs(transY) > 2 * origh / value:
transY = np.sign(transY) * 2 * origh / value
elif method == 'uniform':
transX = np.random.uniform(-(origw / value),
(origw / value))
transY = np.random.uniform(- (origh / value),
(origh / value))
M = | np.float32([[1, 0, transX], [0, 1, transY]]) | numpy.float32 |
##Syntax: run dssp_output_analysis.py length_of_protein dssp_output*.txt
import sys
from numpy import genfromtxt
import numpy as np
import os
from shutil import copy
phi_psi_outfile = 'output_phi_phi.txt'
tco_outfile = 'output_tco.txt'
racc_outfile = 'output_racc.txt'
hbond_outfile = 'output_hbond.txt'
hbond_total_outfile = 'output_hbondtotal.txt'
acc_total_outfile = 'output_acc_total.txt'
phi_psi_2his_outfile = 'output_phi_psi_2his.txt'
phi_psi_2his_no_GLY_outfile = 'output_phi_psi_no_GLY_2his.txt'
import_for_length = genfromtxt(sys.argv[1], delimiter='\t', dtype=float)
length = len(import_for_length)
#Creating Keys for computing relative solvent accessible surface area
#Values obtained from Wilke: Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635
aa_acc_max = { \
'A': 129.0, 'R': 274.0, 'N': 195.0, 'D': 193.0,\
'C': 167.0, 'Q': 225.0, 'E': 223.0, 'G': 104.0,\
'H': 224.0, 'I': 197.0, 'L': 201.0, 'K': 236.0,\
'M': 224.0, 'F': 240.0, 'P': 159.0, 'S': 155.0,\
'T': 172.0, 'W': 285.0, 'Y': 263.0, 'V': 174.0}
#Creating Key for linking each amino acid to a Phi-Psi matrix
ALA = []
ARG = []
ASN = []
ASP = []
CYS = []
GLN = []
GLU = []
GLY = []
HIS = []
ILE = []
LEU = []
LYS = []
MET = []
PHE = []
PRO = []
SER = []
THR = []
TRP = []
TYR = []
VAL = []
aa_phi_mat = { \
'A': ALA, 'R': ARG, 'N': ASN, 'D': ASP,\
'C': CYS, 'Q': GLN, 'E': GLU, 'G': GLY,\
'H': HIS, 'I': ILE, 'L': LEU, 'K': LYS,\
'M': MET, 'F': PHE, 'P': PRO, 'S': SER,\
'T': THR, 'W': TRP, 'Y': TYR, 'V': VAL}
ALA_2 = []
ARG_2 = []
ASN_2 = []
ASP_2 = []
CYS_2 = []
GLN_2 = []
GLU_2 = []
GLY_2 = []
HIS_2 = []
ILE_2 = []
LEU_2 = []
LYS_2 = []
MET_2 = []
PHE_2 = []
PRO_2 = []
SER_2 = []
THR_2 = []
TRP_2 = []
TYR_2 = []
VAL_2 = []
Full_phi_psi_matrix = [ALA, ALA_2, ARG, ARG_2, ASN, ASN_2, ASP, ASP_2, CYS, CYS_2, GLN, GLN_2, GLU, GLU_2, GLY, GLY_2, HIS, HIS_2, ILE, ILE_2, LEU, LEU_2, LYS, LYS_2, MET, MET_2, PHE, PHE_2, PRO, PRO_2, SER, SER_2, THR, THR_2, TRP, TRP_2, TYR, TYR_2, VAL, VAL_2]
aa_psi_mat = { \
'A': ALA_2, 'R': ARG_2, 'N': ASN_2, 'D': ASP_2,\
'C': CYS_2, 'Q': GLN_2, 'E': GLU_2, 'G': GLY_2,\
'H': HIS_2, 'I': ILE_2, 'L': LEU_2, 'K': LYS_2,\
'M': MET_2, 'F': PHE_2, 'P': PRO_2, 'S': SER_2,\
'T': THR_2, 'W': TRP_2, 'Y': TYR_2, 'V': VAL_2}
#Building Matricies for Holding/Analyzing Data
racc_matrix = np.empty([len(sys.argv), int(length)])
tco_matrix = np.empty([len(sys.argv), int(length)])
full_hbonding_matrix = np.empty([len(sys.argv), 14])
total_acc_matrix = []
total_hbond_matrix = []
percent_data_array = np.zeros([length, 3]) # Helix, Sheet, Loop
for fnu,fna in enumerate(sys.argv[2:]):
lines = open(fna).readlines()
total_acc_matrix.append(float(lines[7][1:8]))
total_hbond_matrix.append(float(lines[8][2:6]))
for idx,item in enumerate(lines[8:22]):
full_hbonding_matrix[fnu][idx] = int(item[2:6])
for idx,item in enumerate(lines[28:]):
res_num = int(item[6:10])
res_aa = item[13]
if res_aa == 'X':
res_aa = 'Y'
max_for_rel = aa_acc_max[res_aa]
res_ss = item[16]
res_acc = float(int(item[35:38]))
res_rel_acc = res_acc/max_for_rel
racc_matrix[fnu][idx] = res_rel_acc
res_tco = float(item[85:92])
#if res_tco > 0.75:
# res_ss = 'H'
#if res_tco < -0.75:
# res_ss = 'E'
if res_ss == 'E' or res_ss == 'B':
percent_data_array[idx][1] += 1
elif res_ss == 'H' or res_ss == 'G' or res_ss == 'I':
percent_data_array[idx][0] += 1
else:
percent_data_array[idx][2] += 1
tco_matrix[fnu][idx] = res_tco
res_phi = float(item[103:109])
aa_phi_mat[res_aa].append(res_phi)
res_psi = float(item[109:115])
aa_psi_mat[res_aa].append(res_psi)
#Full_phi_psi_matrix_map = map(None, *Full_phi_psi_matrix)
#pp_out = open(phi_psi_outfile, 'w')
#for i in range(len(Full_phi_psi_matrix_map)):
# for j in range(len(Full_phi_psi_matrix_map[0])):
# pp_out.write("%s\t" % Full_phi_psi_matrix_map[i][j])
# pp_out.write("\n")
#pp_out.close()
full_phi_list = np.empty((0,0))
full_phi_list = np.append(full_phi_list, ALA)
full_phi_list = np.append(full_phi_list, ARG)
full_phi_list = np.append(full_phi_list, ASN)
full_phi_list = np.append(full_phi_list, ASP)
full_phi_list = np.append(full_phi_list, CYS)
full_phi_list = np.append(full_phi_list, GLN)
full_phi_list = np.append(full_phi_list, GLU)
full_phi_list = np.append(full_phi_list, GLY)
full_phi_list = np.append(full_phi_list, HIS)
full_phi_list = np.append(full_phi_list, ILE)
full_phi_list = np.append(full_phi_list, LEU)
full_phi_list = np.append(full_phi_list, LYS)
full_phi_list = np.append(full_phi_list, MET)
full_phi_list = np.append(full_phi_list, PHE)
full_phi_list = np.append(full_phi_list, PRO)
full_phi_list = np.append(full_phi_list, SER)
full_phi_list = np.append(full_phi_list, THR)
full_phi_list = np.append(full_phi_list, TRP)
full_phi_list = np.append(full_phi_list, TYR)
full_phi_list = np.append(full_phi_list, VAL)
full_phi_list_no_GLY = []
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ALA)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ARG)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ASN)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ASP)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, CYS)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, GLN)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, GLU)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, HIS)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ILE)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, LEU)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, LYS)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, MET)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, PHE)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, PRO)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, SER)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, THR)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, TRP)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, TYR)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, VAL)
full_psi_list = []
full_psi_list = np.append(full_psi_list, ALA_2)
full_psi_list = np.append(full_psi_list, ARG_2)
full_psi_list = np.append(full_psi_list, ASN_2)
full_psi_list = np.append(full_psi_list, ASP_2)
full_psi_list = np.append(full_psi_list, CYS_2)
full_psi_list = np.append(full_psi_list, GLN_2)
full_psi_list = np.append(full_psi_list, GLU_2)
full_psi_list = np.append(full_psi_list, GLY_2)
full_psi_list = np.append(full_psi_list, HIS_2)
full_psi_list = np.append(full_psi_list, ILE_2)
full_psi_list = np.append(full_psi_list, LEU_2)
full_psi_list = np.append(full_psi_list, LYS_2)
full_psi_list = np.append(full_psi_list, MET_2)
full_psi_list = np.append(full_psi_list, PHE_2)
full_psi_list = np.append(full_psi_list, PRO_2)
full_psi_list = np.append(full_psi_list, SER_2)
full_psi_list = np.append(full_psi_list, THR_2)
full_psi_list = np.append(full_psi_list, TRP_2)
full_psi_list = np.append(full_psi_list, TYR_2)
full_psi_list = np.append(full_psi_list, VAL_2)
full_psi_list_no_GLY = []
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ALA_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ARG_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ASN_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ASP_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, CYS_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, GLN_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, GLU_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, HIS_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ILE_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, LEU_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, LYS_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, MET_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, PHE_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, PRO_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, SER_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, THR_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, TRP_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, TYR_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, VAL_2)
phi_psi_2his_1, phi_psi_2his_2, phi_psi_2his_3 = np.histogram2d(full_phi_list, full_psi_list, bins=121, range=[[-180,180], [-180,180]])
phi_psi_2his_no_GLY_1, phi_psi_2his_no_GLY_2, phi_psi_2his_no_GLY_3 = np.histogram2d(full_phi_list_no_GLY, full_psi_list_no_GLY, bins=121, range=[[-180,0], [-180,180]])
tam_out = open(acc_total_outfile, 'w')
for i in range(len(total_acc_matrix)):
tam_out.write("%s\n" % total_acc_matrix[i])
tam_out.close()
thm_out = open(hbond_total_outfile, 'w')
for i in range(len(total_hbond_matrix)):
thm_out.write("%s\n" % total_hbond_matrix[i])
thm_out.close()
#percent_helix = percent_helix/len(sys.argv[2:])
#percent_sheet = percent_sheet/len(sys.argv[2:])
#percent_loop = percent_loop/len(sys.argv[2:])
#percent_array = [('% Helix --> ', percent_helix), ('% Sheet --> ', percent_sheet), ('% Loop --> ', percent_loop)]
percent_data_array = percent_data_array/len(sys.argv[2:])
np.savetxt('Percent_HEL.txt', percent_data_array, fmt='%s', delimiter=' ', newline='\n')
avg_hbonding_matrix = np.average(full_hbonding_matrix, axis=0)
avg_tco_matrix = np.average(tco_matrix, axis=0)
avg_racc_matrix = np.average(racc_matrix, axis=0)
std_hbonding_matrix = np.std(full_hbonding_matrix, axis=0)
std_tco_matrix = np.std(tco_matrix, axis=0)
std_racc_matrix = np.std(racc_matrix, axis=0)
comb_tco_matrix = | np.column_stack((avg_tco_matrix, std_tco_matrix)) | numpy.column_stack |
import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import pandas as pd
from tqdm import tqdm
import os
matplotlib.use('Agg')
@attr.s
class ConditionNumberResult(object):
form = attr.ib()
assembled_form = attr.ib()
condition_number = attr.ib()
sparse_operator = attr.ib()
number_of_dofs = attr.ib()
nnz = attr.ib()
is_operator_symmetric = attr.ib()
bcs = attr.ib(default=list())
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
f1_size = assembled_form.M[1, 1].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere( | np.all(Mnp[..., :] == 0, axis=0) | numpy.all |
#!/usr/bin/env python
import numpy as np
from astropy.io import fits
from astropy.table import Table
import smart
import sys, os, os.path, time
import copy
def GetModel(wavelow, wavehigh, method='pwv', wave=False, **kwargs):
"""
Get a telluric spectrum using the atmosphere models in Moehler et al. (2014).
Parameters
----------
wavelow : int
lower bound of the wavelength range
wavehigh : int
upper bound of the wavelength range
Optional Parameters
-------------------
airmass : str
airmass of the telluric model, either 1.0 or 1.5
alpha : float
the power alpha parameter of the telluric model
method : str
'pwv' or 'season'
The defulat method is 'pwv', with airmasses 1.0, 1.5, 2.0, 2.5, 3.0,
and PWV (in mm) of 0.5, 1.0, 1.5, 2.5, 3.5, 5.0, 7.5, 10.0, and 20.0
Another method is 'season', with airmasses 1.0, 1.5, 2.0, 2.5, 3.0,
and bi-monthly average PWV values (1 = December/January ...6 = October/November)
Returns
-------
telluric: model object
a telluric model with wavelength and flux
Examples
--------
>>> import smart
>>> telluric = smart.getTelluric(wavelow=22900, wavehigh=23250)
"""
FULL_PATH = os.path.realpath(__file__)
BASE, NAME = os.path.split(FULL_PATH)
airmass = kwargs.get('airmass', 1.5)
alpha = kwargs.get('alpha', 1.0)
# keyword argument for pwv
pwv = kwargs.get('pwv', 0.5)
# keyword argument for season
season = kwargs.get('season', 0)
airmass_str = str(int(10*airmass))
pwv_str = str(int(10*pwv)).zfill(3)
if method == 'pwv':
tfile = BASE + '/../libraries/telluric/pwv_R300k_airmass{}/LBL_A{}_s0_w{}_R0300000_T.fits'.format(airmass,
airmass_str, pwv_str)
#elif method == 'season':
# tfile = '/../libraries/telluric/season_R300k_airmass{}/LBL_A{}_s{}_R0300000_T.fits'.format(airmass,
# airmass_str, season_str)
tellurics = fits.open(tfile)
telluric = smart.Model()
telluric.wave = np.array(tellurics[1].data['lam'] * 10000) # convert to Angstrom
telluric.flux = np.array(tellurics[1].data['trans'])**(alpha)
# select the wavelength range
criteria = (telluric.wave > wavelow) & (telluric.wave < wavehigh)
telluric.wave = telluric.wave[criteria]
telluric.flux = telluric.flux[criteria]
if wave:
return telluric.wave
else:
return telluric.flux
def InterpTelluricModel(wavelow, wavehigh, airmass, pwv):
FULL_PATH = os.path.realpath(__file__)
BASE, NAME = os.path.split(FULL_PATH)
Gridfile = BASE + '/../libraries/telluric/pwv_R300k_gridparams.csv'
T1 = Table.read(Gridfile)
# Check if the model already exists (grid point)
if (airmass, pwv) in zip(T1['airmass'], T1['pwv']):
flux2 = GetModel(wavelow=wavelow, wavehigh=wavehigh, airmass=T1['airmass'][np.where( (T1['airmass'] == airmass) & (T1['pwv'] == pwv))][0], pwv=T1['pwv'][np.where((T1['airmass'] == airmass) & (T1['pwv'] == pwv))][0])
waves2 = GetModel(wavelow=wavelow, wavehigh=wavehigh, airmass=T1['airmass'][np.where( (T1['airmass'] == airmass) & (T1['pwv'] == pwv))][0], pwv=T1['pwv'][np.where((T1['airmass'] == airmass) & (T1['pwv'] == pwv))][0], wave=True)
return waves2, flux2
# Get the nearest models to the gridpoint (airmass)
x1 = np.max(T1['airmass'][np.where(T1['airmass'] <= airmass)])
x2 = np.min(T1['airmass'][np.where(T1['airmass'] >= airmass)])
y1 = np.max(list(set(T1['pwv'][np.where( ( (T1['airmass'] == x1) & (T1['pwv'] <= pwv) ) )]) & set(T1['pwv'][np.where( ( (T1['airmass'] == x2) & (T1['pwv'] <= pwv) ) )])))
y2 = np.min(list(set(T1['pwv'][np.where( ( (T1['airmass'] == x1) & (T1['pwv'] >= pwv) ) )]) & set(T1['pwv'][np.where( ( (T1['airmass'] == x2) & (T1['pwv'] >= pwv) ) )])))
# Check if the gridpoint exists within the model ranges
for x in [x1, x2]:
for y in [y1, y2]:
if (x, y) not in zip(T1['airmass'], T1['pwv']):
print('No Model', x, y)
return 1
# Get the four points
Points = [
[T1['airmass'][np.where( (T1['airmass'] == x1) & (T1['pwv'] == y1))], T1['pwv'][np.where((T1['airmass'] == x1) & (T1['pwv'] == y1))], np.log10(GetModel(wavelow=wavelow, wavehigh=wavehigh, airmass=T1['airmass'][np.where( (T1['airmass'] == x1) & (T1['pwv'] == y1))][0], pwv=T1['pwv'][np.where((T1['airmass'] == x1) & (T1['pwv'] == y1))][0]))],
[T1['airmass'][np.where( (T1['airmass'] == x1) & (T1['pwv'] == y2))], T1['pwv'][np.where((T1['airmass'] == x1) & (T1['pwv'] == y2))], np.log10(GetModel(wavelow=wavelow, wavehigh=wavehigh, airmass=T1['airmass'][np.where( (T1['airmass'] == x1) & (T1['pwv'] == y2))][0], pwv=T1['pwv'][np.where((T1['airmass'] == x1) & (T1['pwv'] == y2))][0]))],
[T1['airmass'][np.where( (T1['airmass'] == x2) & (T1['pwv'] == y1))], T1['pwv'][np.where((T1['airmass'] == x2) & (T1['pwv'] == y1))], np.log10(GetModel(wavelow=wavelow, wavehigh=wavehigh, airmass=T1['airmass'][np.where( (T1['airmass'] == x2) & (T1['pwv'] == y1))][0], pwv=T1['pwv'][np.where((T1['airmass'] == x2) & (T1['pwv'] == y1))][0]))],
[T1['airmass'][np.where( (T1['airmass'] == x2) & (T1['pwv'] == y2))], T1['pwv'][ | np.where((T1['airmass'] == x2) & (T1['pwv'] == y2)) | numpy.where |
import cv2
from scipy.ndimage.filters import maximum_filter1d
from skimage import draw
import numpy as np
import mxnet as mx
import mxnet.ndarray as nd
from mxnet import gpu, cpu
import time
from numba import njit, float32, int32
import os, pickle
def use_gpu(gpu_number=0):
try:
_ = mx.nd.array([1, 2, 3], ctx=mx.gpu(gpu_number))
return True
except mx.MXNetError:
return False
def taper_mask(bsize=224, sig=7.5):
xm = np.arange(bsize)
xm = np.abs(xm - xm.mean())
mask = 1/(1 + np.exp((xm - (bsize/2-20)) / sig))
mask = mask * mask[:, np.newaxis]
return mask
def diameters(masks):
unique, counts = np.unique(np.int32(masks), return_counts=True)
counts = counts[1:]
md = np.median(counts**0.5)
if np.isnan(md):
md = 0
return md, counts**0.5
def radius_distribution(masks, bins):
unique, counts = np.unique(masks, return_counts=True)
counts = counts[unique!=0]
nb, _ = np.histogram((counts**0.5)*0.5, bins)
nb = nb.astype(np.float32)
if nb.sum() > 0:
nb = nb / nb.sum()
md = np.median(counts**0.5)*0.5
if np.isnan(md):
md = 0
return nb, md, (counts**0.5)/2
def X2zoom(img, X2=1):
ny,nx = img.shape[:2]
img = cv2.resize(img, (int(nx * (2**X2)), int(ny * (2**X2))))
return img
def image_resizer(img, resize=512, to_uint8=False):
ny,nx = img.shape[:2]
if to_uint8:
if img.max()<=255 and img.min()>=0 and img.max()>1:
img = img.astype(np.uint8)
else:
img = img.astype(np.float32)
img -= img.min()
img /= img.max()
img *= 255
img = img.astype(np.uint8)
if np.array(img.shape).max() > resize:
if ny>nx:
nx = int(nx/ny * resize)
ny = resize
else:
ny = int(ny/nx * resize)
nx = resize
shape = (nx,ny)
img = cv2.resize(img, shape)
img = img.astype(np.uint8)
return img
def normalize99(img):
X = img.copy()
X = (X - np.percentile(X, 1)) / (np.percentile(X, 99) - np.percentile(X, 1))
return X
def gabors(npix):
''' npix - size of gabor patch (should be ODD)'''
y,x=np.meshgrid(np.arange(npix),np.arange(npix))
sigma = 1
f = 0.1
theta = np.linspace(0, 2*np.pi, 33)[:-1]
theta = theta[:,np.newaxis,np.newaxis]
ycent,xcent = y.mean(), x.mean()
yc = y - ycent
xc = x - xcent
ph = np.pi/2
xc = xc[np.newaxis,:,:]
yc = yc[np.newaxis,:,:]
G = np.exp(-(xc**2 + yc**2) / (2*sigma**2)) * np.cos(ph + f * (yc*np.cos(theta) + xc*np.sin(theta)))
return G
def format_data(X,Y):
nimg = len(Y)
vf = []
t0 = time.time()
Rs = np.zeros(nimg)
for j in range(nimg):
Ly, Lx = Y[j].shape
xm, ym = np.meshgrid(np.arange(Lx), np.arange(Ly))
unqY = np.unique(Y[j])
img = np.float32(X[j])
#img = (img - img.mean())/np.std(img)
labels = np.int32(Y[j])
Ly, Lx = img.shape
xm, ym = np.meshgrid(np.arange(Lx), np.arange(Ly))
unqY = np.unique(labels)
ix = labels==0
img = (img - np.percentile(img, 1)) / (np.percentile(img, 99) - np.percentile(img, 1))
V = np.zeros((4,Ly,Lx), 'float32')
V[0] = img
#V[1], V[2], maskE = compute_flow(Y[j])
#V[3] = np.float32(labels>.5) + np.float32(maskE>.5)
V[1], V[2] = new_flow(Y[j])
V[3] = np.float32(labels>.5)
vf.append(V)
if j%20==1:
print(j, time.time()-t0)
return vf
def extendROI(ypix, xpix, Ly, Lx,niter=1):
for k in range(niter):
yx = ((ypix, ypix, ypix, ypix-1, ypix+1), (xpix, xpix+1,xpix-1,xpix,xpix))
yx = np.array(yx)
yx = yx.reshape((2,-1))
yu = np.unique(yx, axis=1)
ix = np.all((yu[0]>=0, yu[0]<Ly, yu[1]>=0 , yu[1]<Lx), axis = 0)
ypix,xpix = yu[:, ix]
return ypix,xpix
def get_mask(y, rpad=20, nmax=20):
xp = y[1,:,:].flatten().astype('int32')
yp = y[0,:,:].flatten().astype('int32')
_, Ly, Lx = y.shape
xm, ym = np.meshgrid(np.arange(Lx), np.arange(Ly))
xedges = np.arange(-.5-rpad, xm.shape[1]+.5+rpad, 1)
yedges = np.arange(-.5-rpad, xm.shape[0]+.5+rpad, 1)
#xp = (xm-dx).flatten().astype('int32')
#yp = (ym-dy).flatten().astype('int32')
h,_,_ = np.histogram2d(xp, yp, bins=[xedges, yedges])
hmax = maximum_filter1d(h, 5, axis=0)
hmax = maximum_filter1d(hmax, 5, axis=1)
yo, xo = np.nonzero(np.logical_and(h-hmax>-1e-6, h>10))
Nmax = h[yo, xo]
isort = np.argsort(Nmax)[::-1]
yo, xo = yo[isort], xo[isort]
pix = []
for t in range(len(yo)):
pix.append([yo[t],xo[t]])
for iter in range(5):
for k in range(len(pix)):
ye, xe = extendROI(pix[k][0], pix[k][1], h.shape[0], h.shape[1], 1)
igood = h[ye, xe]>2
ye, xe = ye[igood], xe[igood]
pix[k][0] = ye
pix[k][1] = xe
ibad = np.ones(len(pix), 'bool')
for k in range(len(pix)):
#print(pix[k][0].size)
if pix[k][0].size<nmax:
ibad[k] = 0
#pix = [pix[k] for k in ibad.nonzero()[0]]
M = np.zeros(h.shape)
for k in range(len(pix)):
M[pix[k][0], pix[k][1]] = 1+k
M0 = M[rpad + xp, rpad + yp]
M0 = np.reshape(M0, xm.shape)
return M0, pix
def pad_image_CS0(img0, div=16):
Lpad = int(div * np.ceil(img0.shape[-2]/div) - img0.shape[-2])
xpad1 = Lpad//2
xpad2 = Lpad - xpad1
Lpad = int(div * np.ceil(img0.shape[-1]/div) - img0.shape[-1])
ypad1 = Lpad//2
ypad2 = Lpad - ypad1
if img0.ndim>3:
pads = np.array([[0,0], [0,0], [xpad1,xpad2], [ypad1, ypad2]])
else:
pads = np.array([[0,0], [xpad1,xpad2], [ypad1, ypad2]])
I = np.pad(img0,pads, mode='constant')
return I, pads
def pad_image_CS(img0, div=16, extra = 1):
Lpad = int(div * np.ceil(img0.shape[-2]/div) - img0.shape[-2])
xpad1 = extra*div//2 + Lpad//2
xpad2 = extra*div//2 + Lpad - Lpad//2
Lpad = int(div * np.ceil(img0.shape[-1]/div) - img0.shape[-1])
ypad1 = extra*div//2 + Lpad//2
ypad2 = extra*div//2+Lpad - Lpad//2
if img0.ndim>3:
pads = np.array([[0,0], [0,0], [xpad1,xpad2], [ypad1, ypad2]])
else:
pads = np.array([[0,0], [xpad1,xpad2], [ypad1, ypad2]])
I = np.pad(img0,pads, mode='constant')
return I,pads
def run_tile(net, imgi, bsize=224, device=mx.cpu()):
nchan, Ly0, Lx0 = imgi.shape[-3:]
if Ly0<bsize:
imgi = np.concatenate((imgi, np.zeros((nchan,bsize-Ly0, Lx0))), axis=1)
Ly0 = bsize
if Lx0<bsize:
imgi = np.concatenate((imgi, np.zeros((nchan,Ly0, bsize-Lx0))), axis=2)
Ly, Lx = imgi.shape[-2:]
ystart = np.arange(0, Ly-bsize//2, int(bsize//2))
xstart = np.arange(0, Lx-bsize//2, int(bsize//2))
ystart = np.maximum(0, np.minimum(Ly-bsize, ystart))
xstart = np.maximum(0, np.minimum(Lx-bsize, xstart))
ysub = []
xsub = []
IMG = np.zeros((len(ystart), len(xstart), nchan, bsize,bsize))
k = 0
for j in range(len(ystart)):
for i in range(len(xstart)):
ysub.append([ystart[j], ystart[j]+bsize])
xsub.append([xstart[i], xstart[i]+bsize])
IMG[j,i,:,:,:] = imgi[:, ysub[-1][0]:ysub[-1][1], xsub[-1][0]:xsub[-1][1]]
IMG = np.reshape(IMG, (-1, nchan, bsize,bsize))
if True:
for k in range(IMG.shape[0]):
if k%4==1:
IMG[k, :,:, :] = IMG[k, :,::-1, :]
if k%4==2:
IMG[k, :,:, :] = IMG[k, :,:, ::-1]
if k%4==3:
IMG[k, :,:, :] = IMG[k,:, ::-1, ::-1]
X = nd.array(IMG, ctx=device)
nbatch = 8
niter = int(np.ceil(IMG.shape[0]/nbatch))
nout = 3
y = np.zeros((IMG.shape[0], nout, bsize,bsize))
for k in range(niter):
irange = np.arange(nbatch*k, min(IMG.shape[0], nbatch*k+nbatch))
y0, style = net(X[irange])
y[irange] = y0[:,:,:,:].asnumpy()
if k==0:
styles = np.zeros(style.shape[1], np.float32)
styles += style.asnumpy().sum(axis=0)
styles /= IMG.shape[0]
if True:
for k in range(y.shape[0]):
if k%4==1:
y[k, :,:, :] = y[k, :,::-1, :]
y[k,0,:,:] *= -1
if k%4==2:
y[k, :,:, :] = y[k, :,:, ::-1]
y[k,1,:,:] *= -1
if k%4==3:
y[k, :,:, :] = y[k, :,::-1, ::-1]
y[k,0,:,:] *= -1
y[k,1,:,:] *= -1
Navg = np.zeros((Ly,Lx))
ytiled = np.zeros((nout, Ly, Lx), 'float32')
xm = np.arange(bsize)
xm = np.abs(xm - xm.mean())
sig = 10.
mask = 1/(1 + np.exp((xm - (bsize/2-20.)) / sig))
mask = mask * mask[:, np.newaxis]
for j in range(len(ysub)):
ytiled[:, ysub[j][0]:ysub[j][1], xsub[j][0]:xsub[j][1]] += y[j] * mask
Navg[ysub[j][0]:ysub[j][1], xsub[j][0]:xsub[j][1]] += mask
ytiled /=Navg
ytiled = ytiled[:,:Ly0, :Lx0]
return ytiled, styles
def run_resize_tile(net, img, rsz, bsize=224, device=mx.cpu()):
Ly = int(img.shape[0] * rsz)
Lx = int(img.shape[1] * rsz)
IMG = cv2.resize(img, (Lx, Ly))
if IMG.ndim<3:
IMG = np.expand_dims(IMG, axis=-1)
imgi = np.transpose(IMG, (2,0,1))
imgi, ysub, xsub = pad_image(imgi)
y,style = run_tile(net, imgi, bsize, device=device)
yup = np.transpose(y, (1,2,0))
yup = yup[np.ix_(ysub, xsub, np.arange(yup.shape[-1]))]
yup = cv2.resize(yup, (img.shape[1], img.shape[0]))
return yup, style
def run_resize(net, img, rsz, device=mx.cpu()):
Ly = int(img.shape[0] * rsz)
Lx = int(img.shape[1] * rsz)
IMG = cv2.resize(img, (Lx, Ly))
if IMG.ndim<3:
IMG = np.expand_dims(IMG, axis=-1)
imgi, ysub, xsub = pad_image(np.transpose(IMG, (2,0,1)))
imgi = np.expand_dims(imgi, 0)
X = nd.array(imgi, ctx=device)
y, style = net(X)
y = y.asnumpy()
style = style.asnumpy()
yup = | np.transpose(y[0], (1,2,0)) | numpy.transpose |
"""Module containing the integration tests for the `tVGP` class."""
import gpflow
import numpy as np
import pytest
import tensorflow as tf
from gpflow.likelihoods import Bernoulli, Gaussian
from gpflow.optimizers import NaturalGradient
from src.models.tvgp import t_VGP
LENGTH_SCALE = 2.0
VARIANCE = 2.25
NUM_DATA = 8
NOISE_VARIANCE = 0.3
rng = np.random.RandomState(123)
tf.random.set_seed(42)
@pytest.fixture(name="tvgp_gpr_optim_setup")
def _tvgp_gpr_optim_setup():
"""Creates a GPR model and a matched tVGP model (via natural gradient descent - single step)"""
time_points, observations, kernel, noise_variance = _setup()
input_data = (tf.constant(time_points), tf.constant(observations))
gpr = gpflow.models.GPR(
data=input_data,
kernel=kernel,
mean_function=None,
noise_variance=noise_variance,
)
likelihood = Gaussian(variance=noise_variance)
tvgp = t_VGP(
data=(time_points, observations),
kernel=kernel,
likelihood=likelihood,
)
tvgp.update_variational_parameters(beta=1.0)
return tvgp, gpr
@pytest.fixture(name="tvgp_qvgp_optim_setup")
def _tvgp_qvgp_optim_setup():
"""Creates a VGP model and a matched tVGP model"""
time_points, observations, kernel, noise_variance = _setup()
input_data = (
tf.constant(time_points),
tf.constant((observations > 0.5).astype(float)),
)
likelihood = Bernoulli()
tvgp = t_VGP(
data=input_data,
kernel=kernel,
likelihood=likelihood,
)
qvgp = gpflow.models.VGP(
data=input_data,
kernel=kernel,
mean_function=None,
likelihood=likelihood,
)
natgrad_rep = 20
# one step of natgrads for tVGP
[tvgp.update_variational_parameters(beta=1.0) for _ in range(natgrad_rep)]
# one step of natgrads for VGP
natgrad_opt = NaturalGradient(gamma=1.0)
variational_params = [(qvgp.q_mu, qvgp.q_sqrt)]
training_loss = qvgp.training_loss_closure()
[natgrad_opt.minimize(training_loss, var_list=variational_params) for _ in range(natgrad_rep)]
return tvgp, qvgp
def _setup():
"""Data, kernel and likelihood setup"""
def func(x):
return np.sin(x * 3 * 3.14) + 0.3 * np.cos(x * 9 * 3.14) + 0.5 * np.sin(x * 7 * 3.14)
input_points = rng.rand(NUM_DATA, 1) * 2 - 1 # X values
observations = func(input_points) + 0.2 * rng.randn(NUM_DATA, 1)
kernel = gpflow.kernels.SquaredExponential(lengthscales=LENGTH_SCALE, variance=VARIANCE)
variance = tf.constant(NOISE_VARIANCE, dtype=tf.float64)
return input_points, observations, kernel, variance
def test_tvgp_elbo_optimal(tvgp_gpr_optim_setup):
"""Test that the value of the ELBO at the optimum is the same as the GPR Log Likelihood."""
tvgp, gpr = tvgp_gpr_optim_setup
np.testing.assert_almost_equal(tvgp.elbo(), gpr.log_marginal_likelihood(), decimal=4)
def test_tvgp_unchanged_at_optimum(tvgp_gpr_optim_setup):
"""Test that the update does not change sites at the optimum"""
tvgp, _ = tvgp_gpr_optim_setup
# ELBO at optimum
optim_elbo = tvgp.elbo()
# site update step
tvgp.update_variational_parameters(beta=1.0)
# ELBO after step
new_elbo = tvgp.elbo()
np.testing.assert_almost_equal(optim_elbo, new_elbo, decimal=4)
def test_optimal_sites(tvgp_gpr_optim_setup):
"""Test that the optimal value of the exact sites match the true sites"""
tvgp, gpr = tvgp_gpr_optim_setup
tvgp_nat1 = tvgp.lambda_1.numpy()
tvgp_nat2 = tvgp.lambda_2.numpy()
# manually compute the optimal sites
s2 = gpr.likelihood.variance.numpy()
_, Y = gpr.data
gpr_nat1 = Y / s2
gpr_nat2 = 1.0 / s2 * np.ones_like(tvgp_nat2)
| np.testing.assert_allclose(tvgp_nat1, gpr_nat1) | numpy.testing.assert_allclose |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module tests the HRSModel class
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_array_equal
from astropy.tests.helper import pytest
from astropy import units as u
from ..hrsorder import HRSOrder
def create_hrsorder():
h = HRSOrder(order=16)
y = np.arange(25)
y = y.reshape(5, 5)
h.set_order_from_array(y)
h.set_flux_from_array(y, flux_unit=u.electron)
def f(x, y):
return 2 * x + y
h.set_wavelength_from_model(f, h.region, wavelength_unit=u.nm)
return h
def test_hrsorder_empty():
with pytest.raises(TypeError):
h = HRSOrder()
def test_hrsorder():
h = HRSOrder(order=67)
assert h.order == 67
# test setting it with an order
def test_hrsorder_bad():
with pytest.raises(TypeError):
h = HRSOrder(order=37.5)
# test order type
def test_hrsorder_order_type_object():
h = HRSOrder(order=37, order_type='object')
assert h.order_type == 'object'
def test_hrsorder_order_type_sky():
h = HRSOrder(order=37, order_type='sky')
assert h.order_type == 'sky'
def test_hrsorder_order_type_None():
h = HRSOrder(order=37, order_type='sky')
h.order_type = None
assert h.order_type is None
def test_hrsorder_order_type_bad():
with pytest.raises(TypeError):
h = HRSOrder(order=37, order_type='badtype')
# test defining a region
def test_hrsorder_region():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
h = HRSOrder(order=37, region=r)
assert h.region == r
def test_hrsorder_region_length_bad():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5)]
with pytest.raises(TypeError):
h = HRSOrder(order=37, region=r)
def test_hrsorder_region_pixels_bad():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2)]
with pytest.raises(TypeError):
h = HRSOrder(order=37, region=r)
# test setting the flux
def test_hrsorder_flux():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
f = np.arange(len(r[0]))
h = HRSOrder(order=37, region=r, flux=f)
assert_array_equal(h.flux, f)
def test_hrsorder_flux_length():
with pytest.raises(TypeError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
f = np.arange(len(r[0] - 1))
h = HRSOrder(order=37, region=r, flux=f)
def test_hrsorder_flux_noregion():
with pytest.raises(ValueError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
f = np.arange(len(r[0]))
h = HRSOrder(order=37, flux=f)
# test setting the flux unit
def test_hrsorder_flux_unit():
h = HRSOrder(order=37, flux_unit=u.electron)
assert h.flux_unit == u.electron
# test setting the flux unit
def test_hrsorder_wavelength_unit():
h = HRSOrder(order=37, wavelength_unit=u.nm)
assert h.wavelength_unit == u.nm
# test setting the wavelength
def test_hrsorder_wavelength():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
w = np.arange(len(r[0]))
h = HRSOrder(order=37, region=r, wavelength=w)
assert_array_equal(h.wavelength, w)
def test_hrsorder_wavelength_length():
with pytest.raises(TypeError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
w = np.arange(len(r[0] - 1))
h = HRSOrder(order=37, region=r, wavelength=w)
def test_hrsorder_wavelength_noregion():
with pytest.raises(ValueError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
w = np.arange(len(r[0]))
h = HRSOrder(order=37, wavelength=w)
# test setting the array from the data
def test_hrsorder_set_order_from_array():
h = HRSOrder(order=16)
y = np.arange(25)
y = y.reshape(5, 5)
h.set_order_from_array(y)
assert h.region == (np.array([3]), np.array([1]))
assert y[h.region] == [16]
def test_hrsorder_set_order_from_array_baddata():
h = HRSOrder(order=16)
y = np.arange(25)
y = y.reshape(5, 5)
with pytest.raises(TypeError):
h.set_order_from_array(5)
def test_hrsorder_set_order_from_array_baddata_shape():
h = HRSOrder(order=16)
y = | np.arange(25) | numpy.arange |
from __future__ import division
from itertools import combinations
import numpy as np
import time
import os
import datetime
import logging
from scipy.spatial.distance import cdist
import scipy.io as sio
from sklearn.model_selection import KFold, GridSearchCV
from regressor import VectorRegressor
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self):
if self.name:
print('[%s]' % self.name, end=' ')
print('Elapsed: %s' % (time.time() - self.tstart))
class Evaluator:
def __init__(self, t_vec, subj, adjnoun, params, args):
self.t_vec = t_vec
self.subj = subj
self.adjnoun = adjnoun
self.params = params
self.args = args
def leave_two_out(self, X, y, a, foldZscore, Xp=None, yp=None, Xl=None, yl=None):
from scipy.stats import zscore
from numpy.matlib import repmat
all_ests = np.zeros((a.shape[0], 2, y.shape[1]))
all_targs = np.zeros((a.shape[0], 2, y.shape[1]))
p_all_ests = np.zeros((a.shape[0], 2, y.shape[1]))
p_all_targs = np.zeros((a.shape[0], 2, y.shape[1]))
l_all_ests = np.zeros((a.shape[0], 2, y.shape[1]))
l_all_targs = np.zeros((a.shape[0], 2, y.shape[1]))
for j in range(a.shape[0]):
folds = np.zeros(y.shape[0])
folds[a[j, :]] = 1
reg = VectorRegressor(fZscore=1, folds=folds)
## Train
reg.fit(X, y)
# print(reg.clf.alpha_)
## predict
all_ests[j, :, :], all_targs[j, :, :] = reg.predict(X, y)
if Xp is not None:
_, _, scaler = reg.transform(Xp, yp)
p_all_ests[j, :, :], p_all_targs[j, :, :] = reg.predict(Xp, yp, scaler)
if Xl is not None:
_, _, scaler = reg.transform(Xl, yl)
l_all_ests[j, :, :], l_all_targs[j, :, :] = reg.predict(Xl, yl, scaler)
return all_ests, all_targs, p_all_ests, p_all_targs, l_all_ests, l_all_targs
def do_2v2(self, taskindList):
# IMPORTANT: assumed the desired channels have been already selected in adjnoun
# IMPORTANT: labels must be 0-indexed
# IMPORTANT: taskindList is 1-indexed
all_2v2 = np.zeros((self.t_vec.shape[0], 5, 2))
for timeind, _ in enumerate(self.t_vec):
for taskind in taskindList: # taskind is chosen based on .mat file and is 1-indexed
if taskind == 5 and self.subj == 'A0056': # bad data for this task
continue
for wordind in range(0, 2): # label index (first word spoken, second word spoken)
if taskind > 3 and wordind > 0:
break
logging.info(
'{} Task {} word {} t {:.3f}\t'.format(self.subj, taskind, wordind, self.t_vec[timeind]))
### selecting data
data = self.data_select(taskind, timeind)
### average data
a, avrg_data, avrg_labels = self.avg_data(wordind, taskind, data)
### training
word_vecs = self.params.word_vecs[avrg_labels.astype(int), :]
word_vecs = word_vecs[:, self.params.word_dims]
all_ests, all_targs, _, _, _, _ = self.leave_two_out(avrg_data, word_vecs, a, 1)
all_res = self.make_results(all_ests, all_targs, self.params.dist_metric)
all_2v2[timeind, taskind - 1, wordind] = 100 * np.mean(all_res)
logging.critical('2v2 Acc {:.4f}\t\n{}\n\n'.format(100 * np.mean(all_res),
str(datetime.datetime.now())))
return all_2v2
def do_2v2_phrasal(self, taskindList):
# IMPORTANT: assumed the desired channels have been already selected in adjnoun
# IMPORTANT: labels must be 0-indexed
# IMPORTANT: taskindList is 1-indexed
all_2v2 = np.zeros((self.t_vec.shape[0], 5))
all_2v2_res = np.zeros((self.t_vec.shape[0], 5, 300)) # [0]*5 # np.zeros((self.t_vec.shape[0], 5))
for taskind in taskindList: # taskind is chosen based on .mat file and is 1-indexed
timepass_res = []
for timeind, _ in enumerate(self.t_vec):
if taskind > 3: # this function only works for two word utterance
break
logging.info(
'{} Task {} t {:.3f}\t'.format(self.subj, taskind, self.t_vec[timeind]))
### selecting data
data = self.data_select(taskind, timeind)
### average data
a, avrg_data, word_vecs = self.avg_data(0, taskind, data, self.params)
### training
all_ests, all_targs, _, _, _, _ = self.leave_two_out(avrg_data, word_vecs, a, 1)
all_res = self.make_results(all_ests, all_targs, self.params.dist_metric)
all_2v2[timeind, taskind - 1] = 100 * np.mean(all_res)
all_2v2_res[timeind, taskind, :] = all_res
logging.critical('2v2 Acc {:.4f}\t\n{}\n\n'.format(100 * np.mean(all_res),
str(datetime.datetime.now())))
return all_2v2, all_2v2_res
def do_2v2_train_test(self, trainTasklList, wordList): # testTaskList
# IMPORTANT: assumed the desired channels have been already selected in adjnoun
# IMPORTANT: labels must be 0-indexed
# IMPORTANT: trainTaskList and testTaskList are 1-indexed
all_2v2 = np.zeros((self.t_vec.shape[0], 5, 2, 2)) # Note: time, task, word, train,test
for timeind, _ in enumerate(self.t_vec):
for taskind in trainTasklList: # taskind is chosen based on .mat file and is 1-indexed
if taskind == 5 and self.subj == 'A0056': # bad data for this task
continue
for wordind in wordList: # label index (first word spoken, second word spoken)
if taskind > 3 and wordind > 0:
break
taskind_test = 5 - taskind
logging.critical(
'train:{}, test:{}, time:{}, wordind:{}'.format(taskind, taskind_test, self.t_vec[timeind],
wordind))
# logging.info('{} trainTask {} word {} t {:.3f}\t'.format(self.subj, taskind, wordind, self.t_vec[timeind]))
### selecting data
data_train = self.data_select(taskind, timeind)
data_test = self.data_select(taskind_test, timeind)
### average data
# check labels_train va labels_test yeki bashe
a, avrg_data_train, avrg_labels_train = self.avg_data(wordind, taskind, data_train)
a, avrg_data_test, avrg_labels_test = self.avg_data(wordind, taskind_test, data_test)
### training
word_vecs = self.params.word_vecs[avrg_labels_train.astype(int), :]
word_vecs = word_vecs[:, self.params.word_dims]
all_ests, all_targs, all_ests_test, all_targs_test, _, _ = \
self.leave_two_out(avrg_data_train, word_vecs, a, 1, Xp=avrg_data_test, yp=word_vecs)
all_res = self.make_results(all_ests, all_targs, self.params.dist_metric)
all_res_test = self.make_results(all_ests_test, all_targs_test, self.params.dist_metric)
all_2v2[timeind, taskind - 1, wordind, 0] = 100 * np.mean(all_res)
all_2v2[timeind, taskind_test - 1, wordind, 1] = 100 * np.mean(all_res_test)
logging.critical('2v2 Acc {:.4f}\t 2v2 Acc test {:.4f}\t\n{}\n\n'.format(100 * np.mean(all_res),
100 * np.mean(
all_res_test),
str(
datetime.datetime.now())))
return all_2v2
def do_2v2_train_test_noun_adj(self): # testTaskList
# TODO: when averaging based on utterance, there's different number of instances
# TODO: between 1-word and 2word conditions
# IMPORTANT: assumed the desired channels have been already selected in adjnoun
# IMPORTANT: labels must be 0-indexed
# IMPORTANT: trainTaskList and testTaskList are 1-indexed
all_2v2 = np.zeros((self.t_vec.shape[0], 5, 2)) # Note: time, task, word, train,test
for wordind in range(0, 2): # label index (first word spoken, second word spoken)
if self.args.whcond == 'noun_train':
taskind = 5
elif self.args.whcond == 'adj_train':
taskind = 4
else:
return
### noun begin
taskind_test = [2, 3] # , taskind] # taskind is chosen based on .mat file and is 1-indexed
if taskind == 5 and self.subj == 'A0056': # bad data for this task
break
if taskind == 5 and wordind == 0: # training on noun only but wordind is 0
continue
if taskind == 4 and wordind == 1:
continue
data_train = self.data_select(taskind)
data_test_0 = self.data_select(taskind_test[0])
data_test_1 = self.data_select(taskind_test[1])
### average data
# check labels_train va labels_test yeki bashe
a, avrg_alltime_train, avrg_labels_train = self.avg_data(wordind, taskind, data_train)
a, avrg_alltime_test_0, avrg_labels_test_0 = self.avg_data(wordind, taskind_test[0], data_test_0)
a, avrg_alltime_test_1, avrg_labels_test_1 = self.avg_data(wordind, taskind_test[1], data_test_1)
for timeind, _ in enumerate(self.t_vec):
if all_2v2[timeind, taskind - 1, wordind] != 0 and all_2v2[timeind, taskind_test[0] - 1, wordind] != 0\
and all_2v2[timeind, taskind_test[1] - 1, wordind]:
continue
logging.info('{} trainTask {} testTask {}, word {} t {:.3f}\t'.format(self.subj, taskind,
taskind_test, wordind,
self.t_vec[timeind]))
### selecting data
### average data
# check labels_train va labels_test yeki bashe
avrg_data_train = avrg_alltime_train[:, :,
np.logical_and(np.squeeze(self.adjnoun['time'] > self.t_vec[timeind]),
np.squeeze(self.adjnoun['time'] <= (
self.t_vec[timeind] + self.params.time_window)))]
avrg_data_test_0 = avrg_alltime_test_0[:, :,
np.logical_and(np.squeeze(self.adjnoun['time'] > self.t_vec[timeind]),
np.squeeze(self.adjnoun['time'] <= (
self.t_vec[timeind] + self.params.time_window)))]
avrg_data_test_1 = avrg_alltime_test_1[:, :,
np.logical_and(np.squeeze(self.adjnoun['time'] > self.t_vec[timeind]),
np.squeeze(self.adjnoun['time'] <= (
self.t_vec[timeind] + self.params.time_window)))]
### training
word_vecs = self.params.word_vecs[avrg_labels_train.astype(int), :]
word_vecs = word_vecs[:, self.params.word_dims]
all_ests, all_targs, all_ests_test_0, all_targs_test_0, all_ests_test_1, all_targs_test_1 = \
self.leave_two_out(avrg_data_train, word_vecs, a, 1, avrg_data_test_0, word_vecs, avrg_data_test_1,
word_vecs)
all_res = self.make_results(all_ests, all_targs, self.params.dist_metric)
all_res_test_0 = self.make_results(all_ests_test_0, all_targs_test_0, self.params.dist_metric)
all_res_test_1 = self.make_results(all_ests_test_1, all_targs_test_1, self.params.dist_metric)
all_2v2[timeind, taskind - 1, wordind] = 100 * np.mean(all_res)
all_2v2[timeind, taskind_test[0] - 1, wordind] = 100 * np.mean(all_res_test_0)
all_2v2[timeind, taskind_test[1] - 1, wordind] = 100 * np.mean(all_res_test_1)
logging.critical('2v2 Acc {:.4f}\t 2v2 Acc test {:.4f}\t 2v2 Acc test {:.4f}\t\n{}\n\n'.format(
100 * np.mean(all_res),
100 * np.mean(all_res_test_0),
100 * np.mean(all_res_test_1),
str(datetime.datetime.now())))
if timeind % 5 == 0:
sio.savemat(fileName, {'all_2v2': all_2v2, 't_vec': self.t_vec, 'params': self.params,
'parser': self.args})
sio.savemat(fileName, {'all_2v2': all_2v2, 't_vec': self.t_vec, 'params': self.params,
'parser': self.args})
return all_2v2
def tgm_do_2v2_train_test_noun_adj(self): # testTaskList
# TODO: when averaging based on utterance, there's different number of instances
# TODO: between 1-word and 2word conditions
# IMPORTANT: assumed the desired channels have been already selected in adjnoun
# IMPORTANT: labels must be 0-indexed
# IMPORTANT: trainTaskList and testTaskList are 1-indexed
weightfolder = '{}/weights/{}'.format(self.params.my_out_dir, self.params.subjs)
if not os.path.exists(weightfolder):
os.makedirs(weightfolder)
fileName = '{}/{}/tgm_{}_classify_{}_on_{}_with_{}_averaging.mat'.format(
self.params.my_out_dir, self.params.subjs, self.subj, self.args.whcond, self.args.traind, self.args.avg)
if os.path.exists(fileName):
loaded = sio.loadmat(fileName)
all_2v2 = loaded['all_2v2']
else:
all_2v2 = np.zeros(
(self.t_vec.shape[0], self.t_vec.shape[0], 5, 5, 2)) # Note: time, task, word, train,test
if self.args.whcond == 'noun_train':
all_tasks = [5, 3, 2]
wordind = 1
if self.subj == 'A0056':
return
elif self.args.whcond == 'adj_train':
all_tasks = [4, 3, 2]
wordind = 0
else:
return
for taskind in all_tasks: # label index (first word spoken, second word spoken)
taskind_test = all_tasks.copy()
taskind_test.remove(taskind)
if taskind < 4:
taskind_test.remove(5 - taskind)
data_train = self.data_select(taskind)
data_test_0 = self.data_select(taskind_test[0])
data_test_1 = self.data_select(taskind_test[1])
### average data
# check labels_train va labels_test yeki bashe
a, avrg_alltime_train, avrg_labels_train = self.avg_data(wordind, taskind, data_train)
a, avrg_alltime_test_0, avrg_labels_test_0 = self.avg_data(wordind, taskind_test[0], data_test_0)
a, avrg_alltime_test_1, avrg_labels_test_1 = self.avg_data(wordind, taskind_test[1], data_test_1)
for traintimeind, _ in enumerate(self.t_vec):
logging.info('{} trainTask {} testTask {}, word {} t {:.3f}\t'.format(self.subj, taskind,
taskind_test, wordind,
self.t_vec[traintimeind]))
train_range = range(np.argmax(self.adjnoun['time'] > self.t_vec[traintimeind]),
np.argmax(self.adjnoun['time'] > self.t_vec[traintimeind]) + 99)
avg_cond_r_time_r = avrg_alltime_train[:, :, train_range]
for testtimeind, _ in enumerate(self.t_vec):
test_range = range(np.argmax(self.adjnoun['time'] > self.t_vec[testtimeind]),
np.argmax(self.adjnoun['time'] > self.t_vec[testtimeind]) + 99)
avg_cond_r_time_t = avrg_alltime_train[:, :, test_range]
avg_cond_t_time_t_0 = avrg_alltime_test_0[:, :, test_range]
avg_cond_t_time_t_1 = avrg_alltime_test_1[:, :, test_range]
### training
word_vecs = self.params.word_vecs[avrg_labels_train.astype(int), :]
word_vecs = word_vecs[:, self.params.word_dims]
all_ests = np.zeros((a.shape[0], 2, word_vecs.shape[1]))
all_targs = np.zeros((a.shape[0], 2, word_vecs.shape[1]))
p_all_ests = np.zeros((a.shape[0], 2, word_vecs.shape[1]))
p_all_targs = np.zeros((a.shape[0], 2, word_vecs.shape[1]))
l_all_ests = np.zeros((a.shape[0], 2, word_vecs.shape[1]))
l_all_targs = np.zeros((a.shape[0], 2, word_vecs.shape[1]))
for j in range(a.shape[0]):
folds = np.zeros(word_vecs.shape[0])
folds[a[j, :]] = 1
modelfile = '{}/wordind_{}_ttaskind_{}_ttimeind_{}_a_{}.joblib'.format(
weightfolder, wordind, taskind, traintimeind, j)
if os.path.exists(modelfile):
pass
# reg = joblib.load(modelfile)
else:
reg = VectorRegressor(fZscore=1, folds=folds)
## Train
reg.fit(avg_cond_r_time_r, word_vecs)
# joblib.dump(reg, modelfile)
## predict
_, _, scaler = reg.transform(avg_cond_r_time_t, word_vecs)
all_ests[j, :, :], all_targs[j, :, :] = reg.predict(avg_cond_r_time_t, word_vecs, scaler)
_, _, scaler = reg.transform(avg_cond_t_time_t_0, word_vecs)
p_all_ests[j, :, :], p_all_targs[j, :, :] = reg.predict(avg_cond_t_time_t_0, word_vecs, scaler)
_, _, scaler = reg.transform(avg_cond_t_time_t_1, word_vecs)
l_all_ests[j, :, :], l_all_targs[j, :, :] = reg.predict(avg_cond_t_time_t_1, word_vecs, scaler)
all_res = self.make_results(all_ests, all_targs, self.params.dist_metric)
all_res_test_0 = self.make_results(p_all_ests, p_all_targs, self.params.dist_metric)
all_res_test_1 = self.make_results(l_all_ests, l_all_targs, self.params.dist_metric)
all_2v2[traintimeind, testtimeind, taskind - 1, taskind - 1, wordind] = 100 * np.mean(all_res)
all_2v2[traintimeind, testtimeind, taskind - 1, taskind_test[0] - 1, wordind] = 100 * np.mean(
all_res_test_0)
all_2v2[traintimeind, testtimeind, taskind - 1, taskind_test[1] - 1, wordind] = 100 * np.mean(
all_res_test_1)
logging.critical(
'2v2 Acc {:.4f}\t 2v2 Acc test {:.4f}\t 2v2 Acc test {:.4f}\t\n | traintime {} , testtime {}\n\n'.format(
100 * np.mean(all_res),
100 * np.mean(all_res_test_0),
100 * np.mean(all_res_test_1),
self.t_vec[traintimeind],
self.t_vec[testtimeind]))
if traintimeind % 5 == 0:
sio.savemat(fileName, {'all_2v2': all_2v2, 't_vec': self.t_vec, 'params': self.params,
'parser': self.args})
sio.savemat(fileName, {'all_2v2': all_2v2, 't_vec': self.t_vec, 'params': self.params,
'parser': self.args})
return all_2v2
def data_select(self, taskind, timeind=None):
if timeind is None:
data = self.adjnoun['data'][np.squeeze(self.adjnoun['task'] == taskind), :, :]
data = data * 10 ** 12
logging.info('\n\t#trials: {}\n'.format(data.shape[0]))
return data
data = self.adjnoun['data'][np.squeeze(self.adjnoun['task'] == taskind), :, :]
data = data[:, :,
np.logical_and(np.squeeze(self.adjnoun['time'] > self.t_vec[timeind]),
np.squeeze(self.adjnoun['time'] <= (
self.t_vec[timeind] + self.params.time_window)))] # *10 ^ 12
data = data * 10 ** 12
logging.info('\n\t#trials: {}\n'.format(data.shape[0]))
return data
def avg_data(self, wordind, taskind, data, params=[]):
### averaging trials together
# forall task == cur_task choose 1st / 2nd word cods
labels = self.adjnoun['labels'][wordind, np.squeeze(self.adjnoun['task'] == taskind)]
if labels[0] == 255: # when we have only 1 word and it might be stored in 0 or 1 index
labels = self.adjnoun['labels'][:, np.squeeze(self.adjnoun['task'] == taskind)]
labels += 1
labels = np.sum(labels, axis=0)
labels -= 1
# for every unique word have exactly 4(num_per_inst) instance
total_num_inst = np.unique(labels).size * self.params.num_per_inst
# average trials together so that we have say 20 ecpochs in total (total_num_inst) 5 * 4 = 20
avrg_data = np.zeros((total_num_inst, data.shape[1], data.shape[2]))
avrg_labels = np.zeros(total_num_inst)
if self.params.avg == 'random':
# TODO: needs random, cv and the averaging
np.random.seed(9876)
avrg_counter = 0
labs = np.unique(labels)
for i in range(0, labs.size):
cur_data = data[np.squeeze(labels == labs[i]), :, :]
f = KFold(self.params.num_per_inst, True, np.random.randint(1000))
for _, fold_ind in f.split(cur_data):
avrg_data[avrg_counter, :, :] = np.mean(cur_data[fold_ind, :, :], 0)
avrg_labels[avrg_counter] = labs[i]
avrg_counter = avrg_counter + 1
a = self.__make_distinct(avrg_labels, total_num_inst)
elif self.params.avg == 'utterance': # TODO: rethink this else
avrg_counter = 0
labs = np.unique(labels)
all_labels = self.adjnoun['labels'][:, np.squeeze(self.adjnoun['task'] == taskind)]
for i in range(0, labs.size):
cur_data = data[ | np.squeeze(labels == labs[i]) | numpy.squeeze |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.