prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import os
import dlib
import numpy as np
import scipy.ndimage
from PIL import Image
from utils.common import make_transform
def get_landmark(filepath, detector, predictor):
"""get landmark with dlib
:return: np.array shape=(68, 2)
"""
img = dlib.load_rgb_image(filepath)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = predictor(img, d)
t = list(shape.parts())
a = []
for tt in t:
a.append([tt.x, tt.y])
lm = np.array(a)
return lm
def get_eyes_coors(landmark):
lm_eye_left = landmark[36: 42] # left-clockwise
lm_eye_right = landmark[42: 48] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
return eye_left, eye_right
def get_rotation_from_eyes(left_eye_unaligned, right_eye_unaligned, left_eye_aligned, right_eye_aligned):
eye_to_eye1 = right_eye_unaligned - left_eye_unaligned
eye_to_eye_normalized1 = eye_to_eye1 / np.linalg.norm(eye_to_eye1)
eye_to_eye2 = right_eye_aligned - left_eye_aligned
eye_to_eye_normalized2 = eye_to_eye2 / np.linalg.norm(eye_to_eye2)
cos_r = np.inner(eye_to_eye_normalized1, eye_to_eye_normalized2)
r_rad = np.arccos(cos_r)
r = np.degrees(r_rad)
if right_eye_unaligned[1] > left_eye_unaligned[1]:
r = 360 - r
return r
def get_alignment_positions(filepath: str, detector, predictor, eyes_distance_only: bool = True):
lm = get_landmark(filepath, detector, predictor)
lm_mouth_outer = lm[48: 60] # left-clockwise
# Calculate auxiliary vectors.
eye_left, eye_right = get_eyes_coors(lm)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
if eyes_distance_only:
x *= np.hypot(*eye_to_eye) * 2.0
else:
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
return c, x, y
def get_alignment_transformation(c: np.ndarray, x: np.ndarray, y: np.ndarray):
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
return quad, qsize
def get_fixed_cropping_transformation(c, x):
d = np.hypot(x[0], x[1])
d_hor = np.array([d, 0])
d_ver = np.array([0, d])
quad = np.stack([c - d_hor - d_ver, c - d_hor + d_ver, c + d_hor + d_ver, c + d_hor - d_ver])
qsize = np.hypot(*x) * 2
return quad, qsize
def crop_face_by_transform(filepath: str, quad: np.ndarray, qsize: int, output_size: int = 1024,
transform_size: int = 1024, enable_padding: bool = True):
# read image
img = Image.open(filepath)
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = Image.fromarray(np.uint8(np.clip(
|
np.rint(img)
|
numpy.rint
|
"""Implementations of IAC and COMA, selected via use_Q and use_V.
Same as alg_baseline.py, except that Checkers global state has more components.
"""
import numpy as np
import tensorflow.compat.v1 as tf
import sys
import networks
class Alg(object):
def __init__(self, experiment, dimensions, stage=1, n_agents=1,
tau=0.01, lr_V=0.001, lr_Q=0.001,
lr_actor=0.0001, gamma=0.99, alpha=0.5,
use_Q=1, use_V=1, nn={}, IAC=False):
"""Same as alg_baseline.py. Checkers state has more components.
Inputs:
experiment: string
dimensions: dictionary containing tensor dimensions
(h,w,c) for tensor
l for 1D vector
stage: curriculum stage (always 2 for IAC and COMA)
n_agents: int
tau: target variable update rate
lr_V, lr_Q, lr_actor: learning rates for optimizer
gamma: discount factor
alpha: weighting of local vs. global gradient
use_Q: set to 1 and set use_V=0 and IAC=False to run COMA
use_V: if 1, activates V network
nn: dictionary that specifies neural net architecture
IAC: set to True and set use_V to 1 to run IAC
"""
self.experiment = experiment
if self.experiment == "checkers":
# Global state
self.rows_state = dimensions['rows_state']
self.columns_state = dimensions['columns_state']
self.channels_state = dimensions['channels_state']
self.l_state = n_agents * dimensions['l_state_one']
self.l_state_one_agent = dimensions['l_state_one']
self.l_state_other_agents = (n_agents-1) * dimensions['l_state_one']
# Agent observations
self.l_obs_others = dimensions['l_obs_others']
self.l_obs_self = dimensions['l_obs_self']
# Dimensions for image input
self.rows_obs = dimensions['rows_obs']
self.columns_obs = dimensions['columns_obs']
self.channels_obs = dimensions['channels_obs']
self.l_action = dimensions['l_action']
self.l_goal = dimensions['l_goal']
self.n_agents = n_agents
self.tau = tau
self.lr_V = lr_V
self.lr_Q = lr_Q
self.lr_actor = lr_actor
self.gamma = gamma
self.alpha = alpha
self.use_Q = use_Q
self.use_V = use_V
self.nn = nn
self.IAC = IAC
self.agent_labels = np.eye(self.n_agents)
self.actions = np.eye(self.l_action)
# Initialize computational graph
self.create_networks(stage)
self.list_initialize_target_ops, self.list_update_target_ops = self.get_assign_target_ops(tf.trainable_variables())
if self.use_V:
self.create_local_critic_train_op()
if self.n_agents > 1 and self.use_Q:
self.create_global_critic_train_op()
self.create_policy_gradient_op()
# TF summaries
self.create_summary()
def create_networks(self, stage):
# Placeholders
self.state_env = tf.placeholder(tf.float32, [None, self.rows_state, self.columns_state, self.channels_state], 'state_env')
self.v_state = tf.placeholder(tf.float32, [None, self.l_state], 'v_state')
self.v_state_one_agent = tf.placeholder(tf.float32, [None, self.l_state_one_agent], 'v_state_one_agent')
self.v_state_other_agents = tf.placeholder(tf.float32, [None, self.l_state_other_agents], 'v_state_other_agents')
self.v_goal = tf.placeholder(tf.float32, [None, self.l_goal], 'v_goal')
self.v_goal_others = tf.placeholder(tf.float32, [None, (self.n_agents-1)*self.l_goal], 'v_goal_others')
self.v_labels = tf.placeholder(tf.float32, [None, self.n_agents])
self.action_others = tf.placeholder(tf.float32, [None, self.n_agents-1, self.l_action], 'action_others')
if self.experiment == "checkers":
self.obs_self_t = tf.placeholder(tf.float32, [None, self.rows_obs, self.columns_obs, self.channels_obs], 'obs_self_t')
self.obs_self_v = tf.placeholder(tf.float32, [None, self.l_obs_self], 'obs_self_v')
self.obs_others = tf.placeholder(tf.float32, [None, self.l_obs_others], 'obs_others')
self.actions_prev = tf.placeholder(tf.float32, [None, self.l_action], 'action_prev')
# Actor network
self.epsilon = tf.placeholder(tf.float32, None, 'epsilon')
with tf.variable_scope("Policy_main"):
if self.experiment == 'checkers':
probs = networks.actor_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action, stage=stage)
# probs is normalized
self.probs = (1-self.epsilon) * probs + self.epsilon/float(self.l_action)
self.action_samples = tf.multinomial(tf.log(self.probs), 1)
with tf.variable_scope("Policy_target"):
if self.experiment == 'checkers':
probs_target = networks.actor_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action, stage=stage)
self.action_samples_target = tf.multinomial(tf.log( (1-self.epsilon)*probs_target + self.epsilon/float(self.l_action) ), 1)
# V(s,g^n)
if self.use_V:
with tf.variable_scope("V_main"):
if self.experiment == 'checkers':
if self.IAC:
self.V = networks.V_checkers_local(self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['V_conv_f'], k1=self.nn['V_conv_k'], n_h1_1=self.nn['V_n_h1_1'], n_h1_2=self.nn['V_n_h1_2'], n_h2=self.nn['V_n_h2'], stage=stage)
else:
self.V = networks.V_checkers_global(self.state_env, self.v_state_one_agent, self.v_goal, self.v_state_other_agents, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'], n_h1_1=self.nn['Q_n_h1_1'], n_h1_2=self.nn['Q_n_h1_2'], n_h2=self.nn['Q_n_h2'], stage=stage)
with tf.variable_scope("V_target"):
if self.experiment == 'checkers':
if self.IAC:
self.V_target = networks.V_checkers_local(self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['V_conv_f'], k1=self.nn['V_conv_k'], n_h1_1=self.nn['V_n_h1_1'], n_h1_2=self.nn['V_n_h1_2'], n_h2=self.nn['V_n_h2'], stage=stage)
else:
self.V_target = networks.V_checkers_global(self.state_env, self.v_state_one_agent, self.v_goal, self.v_state_other_agents, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'], n_h1_1=self.nn['Q_n_h1_1'], n_h1_2=self.nn['Q_n_h1_2'], n_h2=self.nn['Q_n_h2'], stage=stage)
# Q(s, a^{-n}, g^n, g^{-n}, n, o^n)
if self.n_agents > 1 and self.use_Q:
with tf.variable_scope("Q_main"):
if self.experiment == 'checkers':
self.Q = networks.Q_coma_checkers(self.state_env, self.v_state, self.action_others, self.v_goal, self.v_goal_others, self.v_labels, self.obs_self_t, self.obs_self_v, n_actions=self.l_action)
with tf.variable_scope("Q_target"):
if self.experiment == 'checkers':
self.Q_target = networks.Q_coma_checkers(self.state_env, self.v_state, self.action_others, self.v_goal, self.v_goal_others, self.v_labels, self.obs_self_t, self.obs_self_v, n_actions=self.l_action)
def get_assign_target_ops(self, list_vars):
# ops for equating main and target
list_initial_ops = []
# ops for slow update of target toward main
list_update_ops = []
if self.use_V:
list_V_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_main')
map_name_V_main = {v.name.split('main')[1] : v for v in list_V_main}
list_V_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_target')
map_name_V_target = {v.name.split('target')[1] : v for v in list_V_target}
if len(list_V_main) != len(list_V_target):
raise ValueError("get_initialize_target_ops : lengths of V_main and V_target do not match")
for name, var in map_name_V_main.items():
# create op that assigns value of main variable to
# target variable of the same name
list_initial_ops.append( map_name_V_target[name].assign(var) )
for name, var in map_name_V_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_V_target[name].assign( self.tau*var + (1-self.tau)*map_name_V_target[name] ) )
# For policy
list_P_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Policy_main')
map_name_P_main = {v.name.split('main')[1] : v for v in list_P_main}
list_P_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Policy_target')
map_name_P_target = {v.name.split('target')[1] : v for v in list_P_target}
if len(list_P_main) != len(list_P_target):
raise ValueError("get_initialize_target_ops : lengths of P_main and P_target do not match")
# ops for equating main and target
for name, var in map_name_P_main.items():
list_initial_ops.append( map_name_P_target[name].assign(var) )
# ops for slow update of target toward main
for name, var in map_name_P_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_P_target[name].assign( self.tau*var + (1-self.tau)*map_name_P_target[name] ) )
# Repeat for Q if needed
if self.n_agents > 1 and self.use_Q:
list_Q_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_main')
map_name_Q_main = {v.name.split('main')[1] : v for v in list_Q_main}
list_Q_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_target')
map_name_Q_target = {v.name.split('target')[1] : v for v in list_Q_target}
if len(list_Q_main) != len(list_Q_target):
raise ValueError("get_initialize_target_ops : lengths of Q_main and Q_target do not match")
# ops for equating main and target
for name, var in map_name_Q_main.items():
# create op that assigns value of main variable to
# target variable of the same name
list_initial_ops.append( map_name_Q_target[name].assign(var) )
# ops for slow update of target toward main
for name, var in map_name_Q_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Q_target[name].assign( self.tau*var + (1-self.tau)*map_name_Q_target[name] ) )
return list_initial_ops, list_update_ops
def run_actor(self, actions_prev, obs_others, obs_self_t, obs_self_v, goals, epsilon, sess):
"""Get actions for all agents as a batch.
Args:
actions_prev - list of integers
obs_others - list of 1D vector describing other agents
obs_self_t: list of image part of observations
obs_self_v: list of 1D vector part of observations
goals - [n_agents, n_lanes]
epsilon: float in [0,1]
sess: TF session
Returns:
np.array of actions for all agents
"""
# convert to batch
obs_others = np.array(obs_others)
obs_self_t = np.array(obs_self_t)
obs_self_v = np.array(obs_self_v)
actions_prev_1hot = np.zeros([self.n_agents, self.l_action])
actions_prev_1hot[
|
np.arange(self.n_agents)
|
numpy.arange
|
"""Markov Chain Monte Carlo for trDesign."""
# native
from datetime import datetime
import time
# lib
import numpy as np
import torch
# pkg
from losses import * # pylint: disable=wildcard-import, unused-wildcard-import
from tr_Rosetta_model import trRosettaEnsemble, prep_seq
from utils import aa2idx, distogram_distribution_to_distogram, idx2aa, plot_progress
import config as cfg
def v(torch_value):
"""Return a detached value, if possible."""
try:
return torch_value.cpu().detach().item()
except Exception:
return torch_value
class MCMC_Optimizer(torch.nn.Module):
"""Markov Chain Monte Carlo optimizer."""
# We don't define `.forward()`, but it's nn-based.
# pylint: disable=too-many-instance-attributes, abstract-method
DEFAULT_ROOT_PATH = Path(__file__).parent.parent
DEFAULT_MODEL_PATH = DEFAULT_ROOT_PATH / "models" / "trRosetta_models"
DEFAULT_BGND_PATH = DEFAULT_ROOT_PATH / "backgrounds"
DEFAULT_RESULTS_PATH = DEFAULT_ROOT_PATH / "results"
def __init__(
self,
L,
aa_weight,
MCMC,
native_frequencies,
experiment_name,
aa_valid,
max_aa_index=20,
sequence_constraint=None,
target_motif_path=None,
trRosetta_model_dir="models/trRosetta_models",
background_distribution_dir="backgrounds",
):
"""Construct the optimizer."""
super().__init__()
self.results_dir = self.setup_results_dir(experiment_name)
self.bkg_dir = background_distribution_dir
self.structure_models = trRosettaEnsemble(
trRosetta_model_dir
) # .share_memory()
print(
f"{self.structure_models.n_models} structure prediction models loaded to {d()}"
)
# General params:
self.eps = 1e-7
self.seq_L = L
# Setup MCMC params:
self.beta, self.N, self.coef, self.M = (
MCMC["BETA_START"],
MCMC["N_STEPS"],
MCMC["COEF"],
MCMC["M"],
)
self.aa_weight = aa_weight
# Setup sequence constraints:
self.aa_valid = aa_valid
self.native_frequencies = native_frequencies
self.seq_constraint = sequence_constraint
if self.seq_constraint is not None:
assert len(self.seq_constraint) == self.seq_L, \
"Constraint length (%d) must == Seq_L (%d)" %(len(self.seq_constraint), self.seq_L)
self.seq_constraint = (
aa2idx(self.seq_constraint).copy().reshape([1, self.seq_L])
)
self.seq_constraint_indices = np.where(
self.seq_constraint != max_aa_index, 1, 0
)
self.target_motif_path = target_motif_path
self.setup_losses()
# stats
self.bad_accepts = []
self.n_accepted_mutations = 0
self.n_accepted_bad_mutations = 0
self.best_metrics = {}
self.best_step = 0
self.best_sequence = None
self.best_E = None
self.step = 0
def setup_results_dir(self, experiment_name):
"""Create the directories for the results."""
results_dir = (
self.DEFAULT_RESULTS_PATH
/ experiment_name
/ datetime.now().strftime("%Y-%m-%d_%H%M%S")
)
results_dir.mkdir(parents=True, exist_ok=True)
(results_dir / "distogram_evolution").mkdir(parents=True, exist_ok=True)
print(f"Writing results to {results_dir}")
return results_dir
def setup_losses(self):
"""Prepare the loss functions."""
# Initialize protein background distributions:
self.bkg_loss = Structural_Background_Loss(self.seq_L, self.bkg_dir)
self.aa_bkgr_distribution = torch.from_numpy(self.native_frequencies).to(d())
# Motif-Loss:
self.motif_weight = 1.00
self.motif_mask = np.zeros((self.seq_L, self.seq_L))
self.motif_mask = torch.from_numpy(self.motif_mask).long().to(d())
if self.target_motif_path is not None:
self.motif_mask = np.ones((self.seq_L, self.seq_L))
self.motif_mask = torch.from_numpy(self.motif_mask).long().to(d())
self.motif_mask.fill_diagonal_(0)
self.motif_sat_loss = Motif_Satisfaction(
self.target_motif_path, mask=self.motif_mask, save_dir=self.results_dir
)
# Apply the background KL-loss only under the hallucination_mask == 1 region
self.hallucination_mask = 1 - self.motif_mask
self.hallucination_mask.fill_diagonal_(0)
def loss(self, sequence, structure_predictions, msa1hot, track=False):
"""Compute the loss function."""
# Top-prob:
TM_score_proxy = top_prob(structure_predictions['dist'], verbose=False)
TM_score_proxy = TM_score_proxy[0] # We're running with batch_size = 1
# Background KL-loss:
background_loss = self.bkg_loss(
structure_predictions, hallucination_mask=self.hallucination_mask
)
# aa composition loss
aa_samp = (
msa1hot[0, :, :20].sum(axis=0) / self.seq_L + self.eps
) # Get relative frequency for each AA
aa_samp = (
aa_samp / aa_samp.sum()
) # Normalize to turn into distributions (possibly redundant)
loss_aa = (
aa_samp
* torch.log(aa_samp / (self.aa_bkgr_distribution + self.eps) + self.eps)
).sum()
# Motif Loss:
if self.target_motif_path is not None:
motif_loss = self.motif_sat_loss(structure_predictions)
else:
motif_loss = 0
# total loss
loss_v = (
background_loss + self.aa_weight * loss_aa + self.motif_weight * motif_loss
)
metrics = {}
if track:
metrics["aa_weight"] = self.aa_weight
metrics["background_loss"] = background_loss
metrics["total_loss"] = loss_v
metrics["TM_score_proxy"] = TM_score_proxy
if self.target_motif_path is not None:
metrics["motif_loss"] = motif_loss
return loss_v, metrics
def metropolis(self, seq, seq_curr, E_curr, E):
"""Compute the Metropolis criterion."""
# Metropolis criterion
if E_curr < E: # Lower energy, replace!
seq = np.copy(seq_curr)
E = E_curr
self.n_accepted_mutations += 1
else: # Higher energy, maybe replace..
if torch.exp((E - E_curr) * self.beta) > np.random.uniform():
seq = np.copy(seq_curr)
E = E_curr
self.bad_accepts.append(1)
self.n_accepted_bad_mutations += 1
self.n_accepted_mutations += 1
else:
self.bad_accepts.append(0)
# Update the best sequence:
if E_curr < self.best_E:
self.best_E = E_curr
self.best_sequence = idx2aa(seq_curr[0])
self.best_step = self.step
return seq, E
def mutate(self, seq):
"""Return a mutated version of the sequence."""
seq_curr = np.copy(seq)
# Introduce a random mutation using the allowed aa_types:
idx = np.random.randint(self.seq_L)
seq_curr[0, idx] = np.random.choice(self.aa_valid)
if self.seq_constraint is not None: # Fix the constraint:
seq_curr = np.where(
self.seq_constraint_indices, self.seq_constraint, seq_curr
)
if
|
np.equal(seq_curr, seq)
|
numpy.equal
|
from __future__ import print_function, division, absolute_import
from ..basis import spin_basis_1d as _default_basis
from ..basis import isbasis as _isbasis
from ..tools.evolution import evolve
from ..tools.matvec import _matvec
from ..tools.matvec import _get_matvec_function
# from ._oputils import matvec as _matvec
# from ._oputils import _get_matvec_function
# from .exp_op_core import isexp_op,exp_op
from ._make_hamiltonian import make_static
from ._make_hamiltonian import make_dynamic
from ._make_hamiltonian import test_function
from ._make_hamiltonian import _check_almost_zero
from ._functions import function
# need linear algebra packages
import scipy
import scipy.sparse.linalg as _sla
import scipy.linalg as _la
import scipy.sparse as _sp
import numpy as _np
from operator import mul
import functools
from six import iteritems,itervalues,viewkeys
try:
from itertools import izip as zip
except ImportError:
pass
try:
from functools import reduce as reduce
except ImportError:
pass
import warnings
__all__ = ["commutator","anti_commutator","hamiltonian","ishamiltonian"]
def commutator(H1,H2):
""" Calculates the commutator of two Hamiltonians :math:`H_1` and :math:`H_2`.
.. math::
[H_1,H_2] = H_1 H_2 - H_2 H_1
Examples
--------
The following script shows how to compute the commutator of two `hamiltonian` objects.
.. literalinclude:: ../../doc_examples/commutator-example.py
:linenos:
:language: python
:lines: 7-
Parameters
-----------
H1 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
H2 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
Returns
--------
obj
Commutator: :math:`[H_1,H_2] = H_1 H_2 - H_2 H_1`
"""
if ishamiltonian(H1) or ishamiltonian(H2):
return H1*H2 - H2*H1
else:
return H1.dot(H2) - H2.dot(H1)
def anti_commutator(H1,H2):
""" Calculates the anticommutator of two Hamiltonians :math:`H_1` and :math:`H_2`.
.. math::
\\{H_1,H_2\\}_+ = H_1 H_2 + H_2 H_1
Examples
--------
The following script shows how to compute the anticommutator of two `hamiltonian` objects.
.. literalinclude:: ../../doc_examples/anti_commutator-example.py
:linenos:
:language: python
:lines: 7-
Parameters
-----------
H1 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
H2 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
Returns
--------
obj
Anticommutator: :math:`\\{H_1,H_2\\}_+ = H_1 H_2 + H_2 H_1`
"""
if ishamiltonian(H1) or ishamiltonian(H2):
return H1*H2 + H2*H1
else:
return H1.dot(H2) + H2.dot(H1)
class HamiltonianEfficiencyWarning(Warning):
pass
#global names:
supported_dtypes=tuple([_np.float32, _np.float64, _np.complex64, _np.complex128])
def _check_static(sub_list):
"""Checks format of static list. """
if (type(sub_list) in [list,tuple]) and (len(sub_list) == 2):
if type(sub_list[0]) is not str: raise TypeError('expecting string type for opstr')
if type(sub_list[1]) in [list,tuple]:
for sub_sub_list in sub_list[1]:
if (type(sub_sub_list) in [list,tuple]) and (len(sub_sub_list) > 0):
for element in sub_sub_list:
if not _np.isscalar(element): raise TypeError('expecting scalar elements of indx')
else: raise TypeError('expecting list for indx')
else: raise TypeError('expecting a list of one or more indx')
return True
else:
return False
def _check_dynamic(sub_list):
"""Checks format of dynamic list. """
if (type(sub_list) in [list,tuple]):
if (len(sub_list) == 4):
if type(sub_list[0]) is not str: raise TypeError('expecting string type for opstr')
if type(sub_list[1]) in [list,tuple]:
for sub_sub_list in sub_list[1]:
if (type(sub_sub_list) in [list,tuple]) and (len(sub_sub_list) > 0):
for element in sub_sub_list:
if not _np.isscalar(element): raise TypeError('expecting scalar elements of indx')
else: raise TypeError('expecting list for indx')
else: raise TypeError('expecting a list of one or more indx')
if not hasattr(sub_list[2],"__call__"): raise TypeError('expecting callable object for driving function')
if type(sub_list[3]) not in [list,tuple]: raise TypeError('expecting list for function arguments')
return True
elif (len(sub_list) == 3):
if not hasattr(sub_list[1],"__call__"): raise TypeError('expecting callable object for driving function')
if type(sub_list[2]) not in [list,tuple]: raise TypeError('expecting list for function arguments')
return False
elif (len(sub_list) == 2):
if not hasattr(sub_list[1],"__call__"): raise TypeError('expecting callable object for driving function')
return False
else:
raise TypeError('expecting list with object, driving function, and function arguments')
def _hamiltonian_dot(hamiltonian,time,v):
"""Used to create linear operator of a hamiltonian."""
return hamiltonian.dot(v,time=time,check=False)
class hamiltonian(object):
"""Constructs time-dependent (hermitian and nonhermitian) operators.
The `hamiltonian` class wraps most of the functionalty of the QuSpin package. This object allows the user to construct
lattice Hamiltonians and operators, solve the time-dependent Schroedinger equation, do full/Lanczos
diagonalization, etc.
The user can create both static and time-dependent, hermitian and non-hermitian operators for any particle
type (boson, spin, fermion) specified by the basis constructor.
Notes
-----
One can instantiate the class either by parsing a set of symmetries, or an instance of `basis`. Note that
instantiation with a `basis` will automatically ignore all symmetry inputs.
Examples
---------
Here is an example how to employ a `basis` object to construct the periodically driven XXZ Hamiltonian
.. math::
H(t) = \\sum_{j=0}^{L-1} \\left( JS^z_{j+1}S^z_j + hS^z_j + g\cos(\\Omega t)S^x_j \\right)
in the zero-momentum sector (`kblock=0`) of positive parity (`pblock=1`). We use periodic boundary conditions.
The code snippet below initiates the class, and is required to run the example codes for the function methods.
.. literalinclude:: ../../doc_examples/hamiltonian-example.py
:linenos:
:language: python
:lines: 7-
"""
def __init__(self,static_list,dynamic_list,N=None,basis=None,shape=None,dtype=_np.complex128,static_fmt=None,dynamic_fmt=None,copy=True,check_symm=True,check_herm=True,check_pcon=True,**basis_kwargs):
"""Intializes the `hamtilonian` object (any quantum operator).
Parameters
-----------
static_list : list
Contains list of objects to calculate the static part of a `hamiltonian` operator. The format goes like:
>>> static_list=[[opstr_1,[indx_11,...,indx_1m]],matrix_2,...]
dynamic_list : list
Contains list of objects to calculate the dynamic (time-dependent) part of a `hamiltonian` operator.
The format goes like:
>>> dynamic_list=[[opstr_1,[indx_11,...,indx_1n],fun_1,fun_1_args],[matrix_2,fun_2,fun_2_args],...]
* `fun`: function object which multiplies the matrix or operator given in the same list.
* `fun_args`: tuple of the extra arguments which go into the function to evaluate it like:
>>> f_val = fun(t,*fun_args)
If the operator is time-INdependent, one must pass an empty list: `dynamic_list = []`.
N : int, optional
Number of lattice sites for the `hamiltonian` object.
dtype : numpy.datatype, optional
Data type (e.g. numpy.float64) to construct the operator with.
static_fmt : str {"csr","csc","dia","dense"}, optional
Specifies format of static part of Hamiltonian.
dynamic_fmt: str {"csr","csc","dia","dense"} or dict, keys: (func,func_args), values: str {"csr","csc","dia","dense"}
Specifies the format of the dynamic parts of the hamiltonian. To specify a particular dynamic part of the hamiltonian use a tuple (func,func_args) which matches a function+argument pair
used in the construction of the hamiltonian as a key in the dictionary.
shape : tuple, optional
Shape to create the `hamiltonian` object with. Default is `shape = None`.
copy: bool, optional
If set to `True`, this option creates a copy of the input array.
check_symm : bool, optional
Enable/Disable symmetry check on `static_list` and `dynamic_list`.
check_herm : bool, optional
Enable/Disable hermiticity check on `static_list` and `dynamic_list`.
check_pcon : bool, optional
Enable/Disable particle conservation check on `static_list` and `dynamic_list`.
basis_kwargs : dict
Optional additional arguments to pass to the `basis` class, if not already using a `basis` object
to create the operator.
"""
self._is_dense = False
self._ndim = 2
self._basis = basis
if not (dtype in supported_dtypes):
raise TypeError('hamiltonian does not support type: '+str(dtype))
else:
self._dtype=dtype
if type(static_list) in [list,tuple]:
static_opstr_list=[]
static_other_list=[]
for ele in static_list:
if _check_static(ele):
static_opstr_list.append(ele)
else:
static_other_list.append(ele)
else:
raise TypeError('expecting list/tuple of lists/tuples containing opstr and list of indx')
if type(dynamic_list) in [list,tuple]:
dynamic_opstr_list=[]
dynamic_other_list=[]
for ele in dynamic_list:
if _check_dynamic(ele):
dynamic_opstr_list.append(ele)
else:
dynamic_other_list.append(ele)
else:
raise TypeError('expecting list/tuple of lists/tuples containing opstr and list of indx, functions, and function args')
# need for check_symm
self._static_opstr_list = static_opstr_list
self._dynamic_opstr_list = dynamic_opstr_list
# if any operator strings present must get basis.
if static_opstr_list or dynamic_opstr_list:
if self._basis is not None:
if len(basis_kwargs) > 0:
wrong_keys = set(basis_kwargs.keys())
temp = ", ".join(["{}" for key in wrong_keys])
raise ValueError(("unexpected optional argument(s): "+temp).format(*wrong_keys))
# if not
if self._basis is None:
if N is None: # if L is missing
raise Exception('if opstrs in use, argument N needed for basis class')
if type(N) is not int: # if L is not int
raise TypeError('argument N must be integer')
self._basis=_default_basis(N,**basis_kwargs)
elif not _isbasis(self._basis):
raise TypeError('expecting instance of basis class for argument: basis')
if check_herm:
self._basis.check_hermitian(static_opstr_list, dynamic_opstr_list)
if check_symm:
self._basis.check_symm(static_opstr_list,dynamic_opstr_list)
if check_pcon:
self._basis.check_pcon(static_opstr_list,dynamic_opstr_list)
self._static=make_static(self._basis,static_opstr_list,dtype)
self._dynamic=make_dynamic(self._basis,dynamic_opstr_list,dtype)
self._shape = self._static.shape
if static_other_list or dynamic_other_list:
if not hasattr(self,"_shape"):
found = False
if shape is None: # if no shape argument found, search to see if the inputs have shapes.
for i,O in enumerate(static_other_list):
try: # take the first shape found
shape = O.shape
found = True
break
except AttributeError:
continue
if not found:
for tup in dynamic_other_list:
if len(tup) == 2:
O,_ = tup
else:
O,_,_ = tup
try:
shape = O.shape
found = True
break
except AttributeError:
continue
else:
found = True
if not found:
raise ValueError('missing argument shape')
if shape[0] != shape[1]:
raise ValueError('hamiltonian must be square matrix')
self._shape=shape
self._static = _sp.dia_matrix(self._shape,dtype=self._dtype)
self._dynamic = {}
for O in static_other_list:
if _sp.issparse(O):
self._mat_checks(O)
if self._static is None:
self._static = O.astype(self._dtype,copy=copy)
continue
try:
self._static += O.astype(self._dtype)
except NotImplementedError:
self._static = self._static + O.astype(self._dtype)
else:
O = _np.asarray(O,dtype=self._dtype)
self._mat_checks(O)
self._is_dense=True
if self._static is None:
self._static = O.astype(self._dtype,copy=copy)
continue
try:
self._static += O
except NotImplementedError:
self._static = self._static + O.astype(self._dtype)
if not _sp.issparse(self._static):
self._static = _np.asarray(self._static)
try:
self._static.sum_duplicates()
self._static.eliminate_zeros()
except: pass
for tup in dynamic_other_list:
if len(tup) == 2:
O,func = tup
else:
O,f,f_args = tup
test_function(f,f_args,self._dtype)
func = function(f,tuple(f_args))
if _sp.issparse(O):
self._mat_checks(O)
O = O.astype(self._dtype,copy=copy)
else:
O = _np.array(O,copy=copy,dtype=self._dtype)
self._mat_checks(O)
self._is_dense=True
if func in self._dynamic:
try:
self._dynamic[func] += O
except:
self._dynamic[func] = self._dynamic[func] + O
else:
self._dynamic[func] = O
else:
if not hasattr(self,"_shape"):
if shape is None:
# if not
if self._basis is None:
if N is None: # if N is missing
raise Exception("argument N or shape needed to create empty hamiltonian")
if type(N) is not int: # if L is not int
raise TypeError('argument N must be integer')
self._basis=_default_basis(N,**basis_kwargs)
elif not _isbasis(self._basis):
raise TypeError('expecting instance of basis class for argument: basis')
shape = (self._basis.Ns,self._basis.Ns)
else:
self._basis=basis_kwargs.get('basis')
if not basis is None:
raise ValueError("empty hamiltonian only accepts basis or shape, not both")
if len(shape) != 2:
raise ValueError('expecting ndim = 2')
if shape[0] != shape[1]:
raise ValueError('hamiltonian must be square matrix')
self._shape=shape
self._static = _sp.dia_matrix(self._shape,dtype=self._dtype)
self._dynamic = {}
self.update_matrix_formats(static_fmt,dynamic_fmt)
self._Ns = self._shape[0]
@property
def basis(self):
""":obj:`basis`: basis used to build the `hamiltonian` object.
Defaults to `None` if operator has no basis (i.e. was created externally and passed as a precalculated array).
"""
if self._basis is not None:
return self._basis
else:
raise AttributeError("object has no attribute 'basis'")
@property
def ndim(self):
"""int: number of dimensions, always equal to 2. """
return self._ndim
@property
def Ns(self):
"""int: number of states in the (symmetry-reduced) Hilbert space spanned by `basis`."""
return self._Ns
@property
def get_shape(self):
"""tuple: shape of the `hamiltonian` object, always equal to `(Ns,Ns)`."""
return self._shape
@property
def is_dense(self):
"""bool: checks sparsity of operator matrix.
`True` if the operator contains a dense matrix as a component of either
the static or dynamic lists.
"""
return self._is_dense
@property
def dtype(self):
"""type: data type of `hamiltonian` object."""
return _np.dtype(self._dtype).name
@property
def static(self):
"""scipy.sparse.csr: static part of the operator."""
return self._static
@property
def dynamic(self):
"""dict: contains dynamic parts of the operator as `{func: Hdyn}`.
Here `func` is the memory address of the time-dependent function which can be called as `func(time)`.
The function arguments are hard-coded, and are not passed. `Hdyn` is the sparse matrix to which
the drive couples.
"""
return self._dynamic
@property
def T(self):
""":obj:`hamiltonian`: transposes the operator matrix: :math:`H_{ij}\\mapsto H_{ji}`."""
return self.transpose()
@property
def H(self):
""":obj:`hamiltonian`: transposes and conjugates the operator matrix: :math:`H_{ij}\\mapsto H_{ji}^*`."""
return self.getH()
@property
def nbytes(self):
nbytes = 0
if _sp.issparse(self._static):
nbytes += self._static.data.nbytes
nbytes += self._static.indices.nbytes
nbytes += self._static.indptr.nbytes
else:
nbytes += self._static.nbytes
for Hd in itervalues(self._dynamic):
if _sp.issparse(Hd):
nbytes += Hd.data.nbytes
nbytes += Hd.indices.nbytes
nbytes += Hd.indptr.nbytes
else:
nbytes += Hd.nbytes
return nbytes
def check_is_dense(self):
""" updates attribute `_.is_dense`."""
is_sparse = _sp.issparse(self._static)
for Hd in itervalues(self._dynamic):
is_sparse *= _sp.issparse(Hd)
self._is_dense = not is_sparse
def _get_matvecs(self):
self._static_matvec = _get_matvec_function(self._static)
self._dynamic_matvec = {}
for func,Hd in iteritems(self._dynamic):
self._dynamic_matvec[func] = _get_matvec_function(Hd)
### state manipulation/observable routines
def dot(self,V,time=0,check=True,out=None,overwrite_out=True,a=1.0):
"""Matrix-vector multiplication of `hamiltonian` operator at time `time`, with state `V`.
.. math::
aH(t=\\texttt{time})|V\\rangle
Notes
-----
* this function does the matrix multiplication with the state(s) and Hamiltonian as is, see Example 17 (Lidblad dynamics / Optical Bloch Equations)
* for right-multiplication of quantum operators, see function `rdot()`.
Parameters
-----------
V : {numpy.ndarray, scipy.spmatrix}
Array containing the quantums state to multiply the `hamiltonian` operator with.
time : obj, optional
Can be either one of the following:
* float: time to evalute the time-dependent part of the operator at (if operator has time dependence).
Default is `time = 0`.
* (N,) array_like: if `V.shape[-1] == N`, the `hamiltonian` operator is evaluated at the i-th time
and dotted into `V[...,i]` to get the i-th slice of the output array. Here V must be either
2- or 3-d array, where 2-d would be for pure states and 3-d would be for mixed states.
check : bool, optional
Whether or not to do checks for shape compatibility.
out : array_like, optional
specify the output array for the the result. This is not supported if `V` is a sparse matrix or if `times` is an array.
overwrite_out : bool, optional
flag used to toggle between two different ways to treat `out`. If set to `True` all values in `out` will be overwritten with the result.
If `False` the result of the dot product will be added to the values of `out`.
a : scalar, optional
scalar to multiply the final product with: :math:`B = aHV`.
Returns
--------
numpy.ndarray
Vector corresponding to the `hamiltonian` operator applied on the state `V`.
Examples
---------
>>> B = H.dot(A,time=0,check=True)
corresponds to :math:`B = HA`.
"""
from .exp_op_core import isexp_op
if ishamiltonian(V):
return a*(self * V)
elif isexp_op(V):
raise ValueError("This is an ambiguous operation. Use the .rdot() method of the `exp_op` class instead.")
times = _np.array(time)
if check:
try:
shape = V.shape
except AttributeError:
V =_np.asanyarray(V)
shape = V.shape
if shape[0] != self._shape[1]:
raise ValueError("matrix dimension mismatch with shapes: {0} and {1}.".format(V.shape,self._shape))
if V.ndim > 3:
raise ValueError("Expecting V.ndim < 4.")
result_dtype = _np.result_type(V.dtype,self._dtype)
if result_dtype not in supported_dtypes:
raise TypeError("resulting dtype is not supported.")
if times.ndim > 0:
if times.ndim > 1:
raise ValueError("Expecting time to be one dimensional array-like.")
if V.shape[-1] != times.shape[0]:
raise ValueError("For non-scalar times V.shape[-1] must be equal to len(time).")
if _sp.issparse(V):
V = V.tocsc()
return _sp.vstack([a*self.dot(V.get_col(i),time=t,check=check) for i,t in enumerate(time)])
else:
if V.ndim == 3 and V.shape[0] != V.shape[1]:
raise ValueError("Density matrices must be square!")
# allocate C-contiguous array to output results in.
out = _np.zeros(V.shape[-1:]+V.shape[:-1],dtype=result_dtype)
for i,t in enumerate(time):
v = _np.ascontiguousarray(V[...,i],dtype=result_dtype)
self._static_matvec(self._static,v,overwrite_out=True,out=out[i,...],a=a)
for func,Hd in iteritems(self._dynamic):
self._dynamic_matvec[func](Hd,v,overwrite_out=False,a=a*func(t),out=out[i,...])
# transpose, leave non-contiguous results which can be handled by numpy.
if out.ndim == 2:
out = out.transpose()
else:
out = out.transpose((1,2,0))
return out
else:
if isinstance(V,_np.ndarray):
V = V.astype(result_dtype,copy=False,order="C")
if out is None:
out = self._static_matvec(self._static,V,a=a)
else:
try:
if out.dtype != result_dtype:
raise TypeError("'out' must be array with correct dtype and dimensions for output array.")
if out.shape != V.shape:
raise ValueError("'out' must be array with correct dtype and dimensions for output array.")
if not out.flags["B"]:
raise ValueError("'out' must be array with correct dtype and dimensions for output array.")
except AttributeError:
raise TypeError("'out' must be array with correct dtype and dimensions for output array.")
self._static_matvec(self._static,V,out=out,overwrite_out=overwrite_out,a=a)
for func,Hd in iteritems(self._dynamic):
self._dynamic_matvec[func](Hd,V,overwrite_out=False,a=a*func(time),out=out)
elif _sp.issparse(V):
if out is not None:
raise TypeError("'out' option does not apply for sparse inputs.")
out = self._static * V
for func,Hd in iteritems(self._dynamic):
out = out + func(time)*(Hd.dot(V))
out = a*out
else:
# should we raise an error here?
pass
return out
def rdot(self,V,time=0,check=True,out=None,overwrite_out=True,a=1.0):
"""Vector-Matrix multiplication of `hamiltonian` operator at time `time`, with state `V`.
.. math::
a\\langle V|H(t=\\texttt{time})
Notes
-----
* this function does the matrix multiplication with the state(s) and Hamiltonian as is, see Example 17 (Lidblad dynamics / Optical Bloch Equations).
Parameters
-----------
V : numpy.ndarray
Vector (quantum state) to multiply the `hamiltonian` operator with on the left.
time : obj, optional
Can be either one of the following:
* float: time to evalute the time-dependent part of the operator at (if existent).
Default is `time = 0`.
* (N,) array_like: if `V.shape[-1] == N`, the `hamiltonian` operator is evaluated at the i-th time
and the mattrix multiplication on the right is calculated with respect to `V[...,i]`. Here V must be either
2- or 3-d array, where 2-d would be for pure states and 3-d would be for mixed states.
check : bool, optional
Whether or not to do checks for shape compatibility.
out : array_like, optional
specify the output array for the the result. This is not supported if `V` is a sparse matrix or if `times` is an array.
overwrite_out : bool, optional
flag used to toggle between two different ways to treat `out`. If set to `True` all values in `out` will be overwritten with the result.
If `False` the result of the dot product will be added to the values of `out`.
a : scalar, optional
scalar to multiply the final product with: :math:`B = aVH`.
Returns
--------
numpy.ndarray
Vector corresponding to the `hamiltonian` operator applied on the state `V`.
Examples
---------
>>> B = H.rdot(A,time=0,check=True)
corresponds to :math:`B = AH`.
"""
times = _np.array(time)
try:
ndim = V.ndim
except AttributeError:
V = _np.asanyarray(V)
ndim = V.ndim
if ndim not in [1,2,3]:
raise ValueError("expecting V.ndim < 4.")
if ndim == 1:
return self.transpose().dot(V,time=times,check=check,out=out.T,overwrite_out=overwrite_out,a=a)
elif ndim == 2:
if _np.array(times).ndim>0:
return self.transpose().dot(V,time=times,check=check,out=out.T,overwrite_out=overwrite_out,a=a)
else:
return self.transpose().dot(V.transpose(),time=times,check=check,out=out.T,overwrite_out=overwrite_out,a=a).transpose()
else:
V_transpose = V.transpose((1,0,2))
return self.transpose().dot(V_transpose,time=times,check=check,out=out.T,overwrite_out=overwrite_out,a=a).transpose((1,0,2))
def quant_fluct(self,V,time=0,check=True,enforce_pure=False):
"""Calculates the quantum fluctuations (variance) of `hamiltonian` operator at time `time`, in state `V`.
.. math::
\\langle V|H^2(t=\\texttt{time})|V\\rangle - \\langle V|H(t=\\texttt{time})|V\\rangle^2
Parameters
-----------
V : numpy.ndarray
Depending on the shape, can be a single state or a collection of pure or mixed states
[see `enforce_pure`].
time : obj, optional
Can be either one of the following:
* float: time to evalute the time-dependent part of the operator at (if existent).
Default is `time = 0`.
* (N,) array_like: if `V.shape[-1] == N`, the `hamiltonian` operator is evaluated at the i-th time
and the fluctuations are calculated with respect to `V[...,i]`. Here V must be either
2- or 3-d array, where 2-d would be for pure states and 3-d would be for mixed states.
enforce_pure : bool, optional
Flag to enforce pure expectation value of `V` is a square matrix with multiple pure states
in the columns.
check : bool, optional
Returns
--------
float
Quantum fluctuations of `hamiltonian` operator in state `V`.
Examples
---------
>>> H_fluct = H.quant_fluct(V,time=0,diagonal=False,check=True)
corresponds to :math:`\\Delta H = \\sqrt{ \\langle V|H^2(t=\\texttt{time})|V\\rangle - \\langle V|H(t=\\texttt{time})|V\\rangle^2 }`.
"""
from .exp_op_core import isexp_op
if self.Ns <= 0:
return _np.asarray([])
if ishamiltonian(V):
raise TypeError("Can't take expectation value of hamiltonian")
if isexp_op(V):
raise TypeError("Can't take expectation value of exp_op")
# fluctuations = expctH2 - expctH^2
kwargs = dict(time=time,enforce_pure=enforce_pure)
V_dot=self.dot(V,time=time,check=check)
expt_value_sq = self._expt_value_core(V,V_dot,**kwargs)**2
if len(V.shape) > 1 and V.shape[0] != V.shape[1] or enforce_pure:
sq_expt_value = self._expt_value_core(V_dot,V_dot,**kwargs)
else:
V_dot=self.dot(V_dot,time=time,check=check)
sq_expt_value = self._expt_value_core(V,V_dot,**kwargs)
return sq_expt_value - expt_value_sq
def expt_value(self,V,time=0,check=True,enforce_pure=False):
"""Calculates expectation value of `hamiltonian` operator at time `time`, in state `V`.
.. math::
\\langle V|H(t=\\texttt{time})|V\\rangle
Parameters
-----------
V : numpy.ndarray
Depending on the shape, can be a single state or a collection of pure or mixed states
[see `enforce_pure` argument of `basis.ent_entropy`].
time : obj, optional
Can be either one of the following:
* float: time to evalute the time-dependent part of the operator at (if existent).
Default is `time = 0`.
* (N,) array_like: if `V.shape[-1] == N`, the `hamiltonian` operator is evaluated at the i-th time
and the expectation value is calculated with respect to `V[...,i]`. Here V must be either
2- or 3-d array, where 2-d would be for pure states and 3-d would be for mixed states.
enforce_pure : bool, optional
Flag to enforce pure expectation value of `V` is a square matrix with multiple pure states
in the columns.
check : bool, optional
Returns
--------
float
Expectation value of `hamiltonian` operator in state `V`.
Examples
---------
>>> H_expt = H.expt_value(V,time=0,diagonal=False,check=True)
corresponds to :math:`H_{expt} = \\langle V|H(t=0)|V\\rangle`.
"""
from .exp_op_core import isexp_op
if self.Ns <= 0:
return _np.asarray([])
if ishamiltonian(V):
raise TypeError("Can't take expectation value of hamiltonian")
if isexp_op(V):
raise TypeError("Can't take expectation value of exp_op")
V_dot = self.dot(V,time=time,check=check)
return self._expt_value_core(V,V_dot,time=time,enforce_pure=enforce_pure)
def _expt_value_core(self,V_left,V_right,time=0,enforce_pure=False):
if _np.array(time).ndim > 0: # multiple time point expectation values
if _sp.issparse(V_right): # multiple pure states multiple time points
return (V_left.H.dot(V_right)).diagonal()
else:
V_left = _np.asarray(V_left)
if V_left.ndim == 2: # multiple pure states multiple time points
return _np.einsum("ij,ij->j",V_left.conj(),V_right)
elif V_left.ndim == 3: # multiple mixed states multiple time points
return _np.einsum("iij->j",V_right)
else:
if _sp.issparse(V_right):
if V_left.shape[0] != V_left.shape[1] or enforce_pure: # pure states
return _np.asscalar((V_left.H.dot(V_right)).toarray())
else: # density matrix
return V_right.diagonal().sum()
else:
V_right = _np.asarray(V_right).squeeze()
if V_right.ndim == 1: # pure state
return _np.vdot(V_left,V_right)
elif V_left.shape[0] != V_left.shape[1] or enforce_pure: # multiple pure states
return _np.einsum("ij,ij->j",V_left.conj(),V_right)
else: # density matrix
return V_right.trace()
def matrix_ele(self,Vl,Vr,time=0,diagonal=False,check=True):
"""Calculates matrix element of `hamiltonian` operator at time `time` in states `Vl` and `Vr`.
.. math::
\\langle V_l|H(t=\\texttt{time})|V_r\\rangle
Notes
-----
Taking the conjugate or transpose of the state `Vl` is done automatically.
Parameters
-----------
Vl : {numpy.ndarray, scipy.spmatrix}
Vector(s)/state(s) to multiple with on left side.
Vl : {numpy.ndarray, scipy.spmatrix}
Vector(s)/state(s) to multiple with on right side.
time : obj, optional
Can be either one of the following:
* float: time to evalute the time-dependent part of the operator at (if existent).
Default is `time = 0`.
* (N,) array_like: if `V.shape[1] == N`, the `hamiltonian` operator is evaluated at the i-th time
and the fluctuations are calculated with respect to `V[:,i]`. Here V must be a 2-d array
containing pure states in the columns of the array.
diagonal : bool, optional
When set to `True`, returs only diagonal part of expectation value. Default is `diagonal = False`.
check : bool,
Returns
--------
float
Matrix element of `hamiltonian` operator between the states `Vl` and `Vr`.
Examples
---------
>>> H_lr = H.expt_value(Vl,Vr,time=0,diagonal=False,check=True)
corresponds to :math:`H_{lr} = \\langle V_l|H(t=0)|V_r\\rangle`.
"""
Vr=self.dot(Vr,time=time,check=check)
if check:
try:
shape = Vl.shape
except AttributeError:
Vl = _np.asarray(Vl)
shape = Vl.shape
if Vl.shape[0] != self._shape[1]:
raise ValueError("matrix dimension mismatch with shapes: {0} and {1}.".format(Vl.shape,self._shape))
if diagonal:
if Vl.shape[1] != Vr.shape[1]:
raise ValueError("number of vectors must be equal for diagonal=True.")
if Vr.ndim > 2:
raise ValueError('Expecting Vr to have ndim < 3')
if _sp.issparse(Vl):
if diagonal:
return Vl.H.dot(Vr).diagonal()
else:
return Vl.H.dot(Vr)
else:
if diagonal:
return _np.einsum("ij,ij->j",Vl.conj(),Vr)
else:
return Vl.T.conj().dot(Vr)
### transformation routines
def project_to(self,proj):
"""Projects/Transforms `hamiltonian` operator with projector/operator `proj`.
Let us call the projector/transformation :math:`V`. Then, the function computes
.. math::
V^\\dagger H V
Notes
-----
The `proj` argument can be a square array, in which case the function just transforms the
`hailtonian` operator :math:`H`. Or it can be a projector which then projects :math:`H` onto
a smaller Hilbert space.
Projectors onto bases with symmetries other than `H.basis` can be conveniently obtain using the
`basis.get_proj()` method of the basis constructor class.
Parameters
-----------
proj : obj
Can be either one of the following:
* `hamiltonian` object
* `exp_op` object
* `numpy.ndarray`
* `scipy.sparse` array
The shape of `proj` need not be square, but has to comply with the matrix multiplication requirements
in the definition above.
Returns
--------
obj
Projected/Transformed `hamiltonian` operator. The output object type depends on the object
type of `proj`.
Examples
---------
>>> H_new = H.project_to(V)
correponds to :math:`V^\\dagger H V`.
"""
from .exp_op_core import isexp_op
if ishamiltonian(proj):
new = self._rmul_hamiltonian(proj.getH())
return new._imul_hamiltonian(proj)
elif isexp_op(proj):
return proj.sandwich(self)
elif _sp.issparse(proj):
if self._shape[1] != proj.shape[0]:
raise ValueError("matrix dimension mismatch with shapes: {0} and {1}.".format(proj.shape,self._shape))
new = self._rmul_sparse(proj.getH())
new._shape = (proj.shape[1],proj.shape[1])
return new._imul_sparse(proj)
elif _np.isscalar(proj):
raise NotImplementedError
elif proj.__class__ == _np.ndarray:
if self._shape[1] != proj.shape[0]:
raise ValueError("matrix dimension mismatch with shapes: {0} and {1}.".format(proj.shape,self._shape))
new = self._rmul_dense(proj.T.conj())
new._shape = (proj.shape[1],proj.shape[1])
return new._imul_dense(proj)
elif proj.__class__ == _np.matrix:
if self._shape[1] != proj.shape[0]:
raise ValueError("matrix dimension mismatch with shapes: {0} and {1}.".format(proj.shape,self._shape))
new = self._rmul_dense(proj.H)
new._shape = (proj.shape[1],proj.shape[1])
return new._imul_dense(proj)
else:
proj = _np.asanyarray(proj)
if self._shape[1] != proj.shape[0]:
raise ValueError("matrix dimension mismatch with shapes: {0} and {1}.".format(proj.shape,self._shape))
new = self._rmul_dense(proj.T.conj())
new._shape = (proj.shape[1],proj.shape[1])
return new._imul_dense(proj)
def rotate_by(self, other, generator=False,**exp_op_kwargs):
"""Rotates/Transforms `hamiltonian` object by an operator `other`.
Let us denote the transformation by :math:`V`. With `generator=False`, `other` corresponds to the
transformation :math:`V`, and this function implements
.. math::
V^\\dagger H V
while for `generator=True`, `other` corresponds to a generator :math:`K`, and the function implements
.. math::
\\exp(a^*K^\\dagger) H \\exp(a K)
Notes
-----
If `generator = False`, this function calls `project_to`.
Parameters
-----------
other : obj
Can be either one of the following:
* `hamiltonian` object
* `exp_op` object
* `numpy.ndarray`
* `scipy.sparse` array
generator : bool, optional
If set to `True`, this flag renders `other` a generator, and implements the calculation of
.. math::
\\exp(a^*K^\\dagger) H \\exp(a K)
If set to `False`, the function implements
.. math::
V^\\dagger H V
Default is `generator = False`.
All other optional arguments are the same as for the `exp_op` class.
Returns
--------
obj
Transformed `hamiltonian` operator. The output object type depends on the object type of `other`.
Examples
---------
>>> H_new = H.rotate_by(V,generator=False)
corresponds to :math:`V^\\dagger H V`.
>>> H_new = H.rotate_by(K,generator=True,**exp_op_kwargs)
corresponds to :math:`\\exp(K^\\dagger) H \\exp(K)`.
"""
from .exp_op_core import exp_op
if generator:
return exp_op(other,**exp_op_kwargs).sandwich(self)
else:
return self.project_to(other)
### Diagonalisation routines
def eigsh(self,time=0.0,**eigsh_args):
"""Computes SOME eigenvalues and eigenvectors of hermitian `hamiltonian` operator using SPARSE hermitian methods.
This function method solves for eigenvalues and eigenvectors, but can only solve for a few of them accurately.
It calls `scipy.sparse.linalg.eigsh <https://docs.scipy.org/doc/scipy/reference/generated/generated/scipy.sparse.linalg.eigsh.html>`_, which is a wrapper for ARPACK.
Notes
-----
Assumes the operator is hermitian! If the flat `check_hermiticity = False` is used, we advise the user
to reassure themselves of the hermiticity properties before use.
Parameters
-----------
time : float
Time to evalute the `hamiltonian` operator at (if time dependent). Default is `time = 0.0`.
eigsh_args :
For all additional arguments see documentation of `scipy.sparse.linalg.eigsh <https://docs.scipy.org/doc/scipy/reference/generated/generated/scipy.sparse.linalg.eigsh.html>`_.
Returns
--------
tuple
Tuple containing the `(eigenvalues, eigenvectors)` of the `hamiltonian` operator.
Examples
---------
>>> eigenvalues,eigenvectors = H.eigsh(time=time,**eigsh_args)
"""
if self.Ns <= 0:
try:
return_eigenvectors = eigsh_args["return_eigenvectors"]
except KeyError:
return_eigenvectors = True
if return_eigenvectors:
return _np.array([],dtype=self._dtype).real, _np.array([[]],dtype=self._dtype)
else:
return _np.array([],dtype=self._dtype).real
return _sla.eigsh(self.tocsr(time=time),**eigsh_args)
def eigh(self,time=0,**eigh_args):
"""Computes COMPLETE eigensystem of hermitian `hamiltonian` operator using DENSE hermitian methods.
This function method solves for all eigenvalues and eigenvectors. It calls
`numpy.linalg.eigh <https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.eigh.html>`_,
and uses wrapped LAPACK functions which are contained in the module py_lapack.
Notes
-----
Assumes the operator is hermitian! If the flat `check_hermiticity = False` is used, we advise the user
to reassure themselves of the hermiticity properties before use.
Parameters
-----------
time : float
Time to evalute the `hamiltonian` operator at (if time dependent). Default is `time = 0.0`.
eigh_args :
For all additional arguments see documentation of `numpy.linalg.eigh <https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.eigh.html>`_.
Returns
--------
tuple
Tuple containing the `(eigenvalues, eigenvectors)` of the `hamiltonian` operator.
Examples
---------
>>> eigenvalues,eigenvectors = H.eigh(time=time,**eigh_args)
"""
if self.Ns <= 0:
return _np.array([],dtype=self._dtype).real,_np.array([[]],dtype=self._dtype)
eigh_args["overwrite_a"] = True
# fill dense array with hamiltonian
H_dense = self.todense(time=time)
# calculate eigh
return _la.eigh(H_dense,**eigh_args)
def eigvalsh(self,time=0,**eigvalsh_args):
"""Computes ALL eigenvalues of hermitian `hamiltonian` operator using DENSE hermitian methods.
This function method solves for all eigenvalues. It calls
`numpy.linalg.eigvalsh <https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.eigvalsh.html#numpy.linalg.eigvalsh>`_,
and uses wrapped LAPACK functions which are contained in the module py_lapack.
Notes
-----
Assumes the operator is hermitian! If the flat `check_hermiticity = False` is used, we advise the user
to reassure themselves of the hermiticity properties before use.
Parameters
-----------
time : float
Time to evalute the `hamiltonian` operator at (if time dependent). Default is `time = 0.0`.
eigvalsh_args :
For all additional arguments see documentation of `numpy.linalg.eigvalsh <https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.eigvalsh.html#numpy.linalg.eigvalsh>`_.
Returns
--------
numpy.ndarray
Eigenvalues of the `hamiltonian` operator.
Examples
---------
>>> eigenvalues = H.eigvalsh(time=time,**eigvalsh_args)
"""
if self.Ns <= 0:
return _np.array([],dtype=self._dtype).real
H_dense = self.todense(time=time)
eigvalsh_args["overwrite_a"] = True
return _la.eigvalsh(H_dense,**eigvalsh_args)
### Schroedinger evolution routines
def __LO(self,time,rho,rho_out):
"""
args:
rho, flattened density matrix to multiple with
time, the time to evalute drive at.
description:
This function is what gets passed into the ode solver. This is the real time Liouville operator.
"""
rho = rho.reshape((self.Ns,self.Ns))
self._static_matvec(self._static ,rho ,out=rho_out ,a=+1.0,overwrite_out=True) # rho_out = self._static.dot(rho)
self._static_matvec(self._static.T,rho.T,out=rho_out.T,a=-1.0,overwrite_out=False) # rho_out -= (self._static.T.dot(rho.T)).T
for func,Hd in iteritems(self._dynamic):
ft = func(time)
self._dynamic_matvec[func](Hd ,rho ,out=rho_out ,a=+ft,overwrite_out=False) # rho_out += ft*Hd.dot(rho)
self._dynamic_matvec[func](Hd.T,rho.T,out=rho_out.T,a=-ft,overwrite_out=False) # rho_out -= ft*(Hd.T.dot(rho.T)).T
rho_out *= -1j
return rho_out.ravel()
def __ISO(self,time,V,V_out):
"""
args:
V, the vector to multiple with
V_out, the vector to use with output.
time, the time to evalute drive at.
description:
This function is what gets passed into the ode solver. This is the Imaginary time Schrodinger operator -H(t)*|V >
"""
V = V.reshape(V_out.shape)
self._static_matvec(self._static,V,out=V_out,overwrite_out=True)
for func,Hd in iteritems(self._dynamic):
self._dynamic_matvec[func](Hd,V,a=func(time),out=V_out,overwrite_out=False)
V_out *= -1.0
return V_out.ravel()
def __SO_real(self,time,V,V_out):
"""
args:
V, the vector to multiple with
V_out, the vector to use with output.
time, the time to evalute drive at.
description:
This function is what gets passed into the ode solver. This is the real time Schrodinger operator -i*H(t)*|V >
This function is designed for real hamiltonians and increases the speed of integration compared to __SO
u_dot + i*v_dot = -i*H(u + i*v)
u_dot = Hv
v_dot = -Hu
"""
V = V.reshape(V_out.shape)
self._static_matvec(self._static,V[self._Ns:],out=V_out[:self._Ns],a=+1,overwrite_out=True) # V_dot[:self._Ns] = self._static.dot(V[self._Ns:])
self._static_matvec(self._static,V[:self._Ns],out=V_out[self._Ns:],a=-1,overwrite_out=True) # V_dot[self._Ns:] = -self._static.dot(V[:self._Ns])
for func,Hd in iteritems(self._dynamic):
ft=func(time)
self._dynamic_matvec[func](Hd,V[self._Ns:],out=V_out[:self._Ns],a=+ft,overwrite_out=False) # V_dot[:self._Ns] += func(time)*Hd.dot(V[self._Ns:])
self._dynamic_matvec[func](Hd,V[:self._Ns],out=V_out[self._Ns:],a=-ft,overwrite_out=False) # V_dot[self._Ns:] += -func(time)*Hd.dot(V[:self._Ns])
return V_out
def __SO(self,time,V,V_out):
"""
args:
V, the vector to multiple with
V_out, the vector to use with output.
time, the time to evalute drive at.
description:
This function is what gets passed into the ode solver. This is the Imaginary time Schrodinger operator -H(t)*|V >
"""
V = V.reshape(V_out.shape)
self._static_matvec(self._static,V,out=V_out,overwrite_out=True)
for func,Hd in iteritems(self._dynamic):
self._dynamic_matvec[func](Hd,V,a=func(time),out=V_out,overwrite_out=False)
V_out *= -1j
return V_out.ravel()
def evolve(self,v0,t0,times,eom="SE",solver_name="dop853",stack_state=False,verbose=False,iterate=False,imag_time=False,**solver_args):
"""Implements (imaginary) time evolution generated by the `hamiltonian` object.
The functions handles evolution generated by both time-dependent and time-independent Hamiltonians.
Currently the following three built-in routines are supported (see parameter `eom`):
i) real-time Schroedinger equation: :math:`\\partial_t|v(t)\\rangle=-iH(t)|v(t)\\rangle`.
ii) imaginary-time Schroedinger equation: :math:`\\partial_t|v(t)\\rangle=-H(t)|v(t)\\rangle`.
iii) Liouvillian dynamics: :math:`\\partial_t\\rho(t)=-i[H,\\rho(t)]`.
Notes
-----
Supports evolution of multiple states simulataneously (`eom="SE") and evolution of mixed
and pure density matrices (`eom="LvNE"). For a user-defined custom ODE solver which can handle non-linear equations, check out the
`measurements.evolve()` routine, which has a similar functionality but allows for a complete freedom
over the differential equation to be solved.
Parameters
-----------
v0 : numpy.ndarray
Initial state :math:`|v(t)\\rangle` or density matrix (pure and mixed) :math:`\\rho(t)`.
t0 : float
Initial time.
times : numpy.ndarray
Vector of times to compute the time-evolved state at.
eom : str, optional
Specifies the ODE type. Can be either one of
* "SE", real and imaginary-time Schroedinger equation.
* "LvNE", real-time Liouville equation.
Default is "eom = SE" (Schroedinger evolution).
iterate : bool, optional
If set to `True`, creates a generator object for the time-evolved the state. Default is `False`.
solver_name : str, optional
Scipy solver integrator name. Default is `dop853`.
See `scipy integrator (solver) <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.integrate.ode.html>`_ for other options.
solver_args : dict, optional
Dictionary with additional `scipy integrator (solver) <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.integrate.ode.html>`_.
stack_state : bool, optional
Flag to determine if `f` is real or complex-valued. Default is `False` (i.e. complex-valued).
imag_time : bool, optional
Must be set to `True` when `f` defines imaginary-time evolution, in order to normalise the state
at each time in `times`. Default is `False`.
verbose : bool, optional
If set to `True`, prints normalisation of state at teach time in `times`.
Returns
--------
obj
Can be either one of the following:
* numpy.ndarray containing evolved state against time.
* generator object for time-evolved state (requires `iterate = True`).
Note that for Liouvillian dynamics the output is a square complex `numpy.ndarray`.
Examples
---------
>>> v_t = H.evolve(v0,t0,times,eom="SE",solver_name="dop853",verbose=False,iterate=False,imag_time=False,**solver_args)
"""
try:
shape0 = v0.shape
except AttributeError:
v0 =_np.asanyarray(v0)
shape0 = v0.shape
if _np.iscomplexobj(times):
raise ValueError("times must be real number(s).")
evolve_args = (v0,t0,times)
evolve_kwargs = solver_args
evolve_kwargs["solver_name"]=solver_name
evolve_kwargs["stack_state"]=stack_state
evolve_kwargs["verbose"]=verbose
evolve_kwargs["iterate"]=iterate
evolve_kwargs["imag_time"]=imag_time
if eom == "SE":
if v0.ndim > 2:
raise ValueError("v0 must have ndim <= 2")
if v0.shape[0] != self.Ns:
raise ValueError("v0 must have {0} elements".format(self.Ns))
if imag_time:
if stack_state:
raise NotImplementedError("stack state is not compatible with imaginary time evolution.")
evolve_args = evolve_args + (self.__ISO,)
result_dtype = _np.result_type(v0.dtype,self.dtype,_np.float64)
v0 = _np.array(v0,dtype=result_dtype,copy=True,order="C")
evolve_kwargs["f_params"]=(v0,)
evolve_kwargs["real"] = not _np.iscomplexobj(v0)
else:
evolve_kwargs["real"]=False
if stack_state:
if _np.iscomplexobj(_np.array(1,dtype=self.dtype)): # no idea how to do this in python :D
raise ValueError('stack_state option cannot be used with complex-valued Hamiltonians')
shape = (v0.shape[0]*2,)+v0.shape[1:]
v0 = _np.zeros(shape,dtype=_np.float64,order="C")
evolve_kwargs["f_params"]=(v0,)
evolve_args = evolve_args + (self.__SO_real,)
else:
v0 = _np.array(v0,dtype=_np.complex128,copy=True,order="C")
evolve_kwargs["f_params"]=(v0,)
evolve_args = evolve_args + (self.__SO,)
elif eom == "LvNE":
n = 1.0
if v0.ndim != 2:
raise ValueError("v0 must have ndim = 2")
if v0.shape != self._shape:
raise ValueError("v0 must be same shape as Hamiltonian")
if imag_time:
raise NotImplementedError("imaginary time not implemented for Liouville-von Neumann dynamics")
else:
if stack_state:
raise NotImplementedError("stack_state not implemented for Liouville-von Neumann dynamics")
else:
v0 = _np.array(v0,dtype=_np.complex128,copy=True,order="C")
evolve_kwargs["f_params"]=(v0,)
evolve_args = evolve_args + (self.__LO,)
else:
raise ValueError("'{} equation' not recognized, must be 'SE' or 'LvNE'".format(eom))
return evolve(*evolve_args,**evolve_kwargs)
### routines to change object type
def aslinearoperator(self,time=0.0):
"""Returns copy of a `hamiltonian` object at time `time` as a `scipy.sparse.linalg.LinearOperator`.
Casts the `hamiltonian` object as a
`scipy.sparse.linalg.LinearOperator <https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.sparse.linalg.LinearOperator.html>`_
object.
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
Returns
--------
:obj:`scipy.sparse.linalg.LinearOperator`
Examples
---------
>>> H_aslinop=H.aslinearoperator(time=time)
"""
time = _np.array(time)
if time.ndim > 0:
raise TypeError('expecting scalar argument for time')
matvec = functools.partial(_hamiltonian_dot,self,time)
rmatvec = functools.partial(_hamiltonian_dot,self.H,time)
return _sla.LinearOperator(self.get_shape,matvec,rmatvec=rmatvec,matmat=matvec,dtype=self._dtype)
def tocsr(self,time=0):
"""Returns copy of a `hamiltonian` object at time `time` as a `scipy.sparse.csr_matrix`.
Casts the `hamiltonian` object as a
`scipy.sparse.csr_matrix <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html>`_
object.
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
Returns
--------
:obj:`scipy.sparse.csr_matrix`
Examples
---------
>>> H_csr=H.tocsr(time=time)
"""
if _np.array(time).ndim > 0:
raise TypeError('expecting scalar argument for time')
H = _sp.csr_matrix(self._static)
for func,Hd in iteritems(self._dynamic):
Hd = _sp.csr_matrix(Hd)
try:
H += Hd * func(time)
except:
H = H + Hd * func(time)
return H
def tocsc(self,time=0):
"""Returns copy of a `hamiltonian` object at time `time` as a `scipy.sparse.csc_matrix`.
Casts the `hamiltonian` object as a
`scipy.sparse.csc_matrix <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.html>`_
object.
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
Returns
--------
:obj:`scipy.sparse.csc_matrix`
Examples
---------
>>> H_csc=H.tocsc(time=time)
"""
if _np.array(time).ndim > 0:
raise TypeError('expecting scalar argument for time')
H = _sp.csc_matrix(self._static)
for func,Hd in iteritems(self._dynamic):
Hd = _sp.csc_matrix(Hd)
try:
H += Hd * func(time)
except:
H = H + Hd * func(time)
return H
def todense(self,time=0,order=None, out=None):
"""Returns copy of a `hamiltonian` object at time `time` as a dense array.
This function can overflow memory if not used carefully!
Notes
-----
If the array dimension is too large, scipy may choose to cast the `hamiltonian` operator as a
`numpy.matrix` instead of a `numpy.ndarray`. In such a case, one can use the `hamiltonian.toarray()`
method.
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
order : str, optional
Whether to store multi-dimensional data in C (rom-major) or Fortran (molumn-major) order in memory.
Default is `order = None`, indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out` argument.
out : numpy.ndarray
Array to fill in with the output.
Returns
--------
obj
Depending of size of array, can be either one of
* `numpy.ndarray`.
* `numpy.matrix`.
Examples
---------
>>> H_dense=H.todense(time=time)
"""
if _np.array(time).ndim > 0:
raise TypeError('expecting scalar argument for time')
if out is None:
out = _np.zeros(self._shape,dtype=self.dtype)
out = _np.asmatrix(out)
if _sp.issparse(self._static):
self._static.todense(order=order,out=out)
else:
out[:] = self._static[:]
for func,Hd in iteritems(self._dynamic):
out += Hd * func(time)
return out
def toarray(self,time=0,order=None, out=None):
"""Returns copy of a `hamiltonian` object at time `time` as a dense array.
This function can overflow memory if not used carefully!
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
order : str, optional
Whether to store multi-dimensional data in C (rom-major) or Fortran (molumn-major) order in memory.
Default is `order = None`, indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out` argument.
out : numpy.ndarray
Array to fill in with the output.
Returns
--------
numpy.ndarray
Dense array.
Examples
---------
>>> H_dense=H.toarray(time=time)
"""
if _np.array(time).ndim > 0:
raise TypeError('expecting scalar argument for time')
if out is None:
out = _np.zeros(self._shape,dtype=self.dtype)
if _sp.issparse(self._static):
self._static.toarray(order=order,out=out)
else:
out[:] = self._static[:]
for func,Hd in iteritems(self._dynamic):
out += Hd * func(time)
return out
def update_matrix_formats(self,static_fmt,dynamic_fmt):
"""Change the internal structure of the matrices in-place.
Parameters
-----------
static_fmt : str {"csr","csc","dia","dense"}
Specifies format of static part of Hamiltonian.
dynamic_fmt: str {"csr","csc","dia","dense"} or dict, keys: (func,func_args), values: str {"csr","csc","dia","dense"}
Specifies the format of the dynamic parts of the hamiltonian. To specify a particular dynamic part of the hamiltonian use a tuple (func,func_args) which matches a function+argument pair
used in the construction of the hamiltonian as a key in the dictionary.
copy : bool,optional
Whether to return a deep copy of the original object. Default is `copy = False`.
Examples
---------
make the dynamic part of the `hamiltonian` object to be DIA matrix format and have the static part be CSR matrix format:
>>> H.update_matrix_formats(static_fmt="csr",dynamic_fmt={(func,func_args):"dia"})
"""
if static_fmt is not None:
if type(static_fmt) is not str:
raise ValueError("Expecting string for 'sparse_fmt'")
if static_fmt not in ["csr","csc","dia","dense"]:
raise ValueError("'{0}' is not a valid sparse format for Hamiltonian class.".format(static_fmt))
if static_fmt == "dense":
if _sp.issparse(self._static):
self._static = self._static.toarray()
else:
self._static = _np.ascontiguousarray(self._static)
else:
sparse_constuctor = getattr(_sp,static_fmt+"_matrix")
self._static = sparse_constuctor(self._static)
if dynamic_fmt is not None:
if type(dynamic_fmt) is str:
if dynamic_fmt not in ["csr","csc","dia","dense"]:
raise ValueError("'{0}' is not a valid sparse format for Hamiltonian class.".format(dynamic_fmt))
if dynamic_fmt == "dense":
updates = {func:sparse_constuctor(Hd) for func,Hd in iteritems(self._dynamic) if _sp.issparse(Hd)}
updates.update({func:_np.ascontiguousarray(Hd) for func,Hd in iteritems(self._dynamic) if not _sp.issparse(Hd)})
else:
updates = {func:sparse_constuctor(Hd) for func,Hd in iteritems(self._dynamic)}
self._dynamic.update(updates)
elif type(dynamic_fmt) in [list,tuple]:
for fmt,(f,f_args) in dynamic_fmt:
func = function(f,tuple(f_args))
if fmt not in ["csr","csc","dia","dense"]:
raise ValueError("'{0}' is not a valid sparse format for Hamiltonian class.".format(fmt))
try:
if fmt == "dense":
if _sp.issparse(self._static):
self._dynamic[func] = self._dynamic[func].toarray()
else:
self._dynamic[func] = _np.ascontiguousarray(self._dynamic[func])
else:
sparse_constuctor = getattr(_sp,fmt+"_matrix")
self._dynamic[func] = sparse_constuctor(self._dynamic[func])
except KeyError:
raise ValueError("({},{}) is not found in dynamic list.".format(f,f_args))
self._get_matvecs()
def as_dense_format(self,copy=False):
"""Casts `hamiltonian` operator to DENSE format.
Parameters
-----------
copy : bool,optional
Whether to return a deep copy of the original object. Default is `copy = False`.
Returns
--------
obj
Either one of the following:
* Shallow copy, if `copy = False`.
* Deep copy, if `copy = True`.
Examples
---------
>>> H_dense=H.as_dense_format()
"""
if _sp.issparse(self._static):
new_static = self._static.toarray()
else:
new_static = _np.asarray(self._static,copy=copy)
dynamic = [([M.toarray(),func] if _sp.issparse(M) else [M,func])
for func,M in iteritems(self.dynamic)]
return hamiltonian([new_static],dynamic,basis=self._basis,dtype=self._dtype,copy=copy)
def as_sparse_format(self,static_fmt="csr",dynamic_fmt={},copy=False):
"""Casts `hamiltonian` operator to SPARSE format(s).
Parameters
-----------
static_fmt : str {"csr","csc","dia","dense"}
Specifies format of static part of Hamiltonian.
dynamic_fmt: str {"csr","csc","dia","dense"} or dict, keys: (func,func_args), values: str {"csr","csc","dia","dense"}
Specifies the format of the dynamic parts of the hamiltonian. To specify a particular dynamic part of the hamiltonian use a tuple (func,func_args) which matches a function+argument pair
used in the construction of the hamiltonian as a key in the dictionary.
copy : bool,optional
Whether to return a deep copy of the original object. Default is `copy = False`.
Returns
--------
obj
Either one of the following:
* whenever possible do not copy data, if `copy = False`.
* explicitly copy all possible data, if `copy = True`.
Examples
---------
>>> H_dia=H.as_sparse_format(static_fmt="csr",dynamic_fmt={(func,func_args):"dia"})
"""
dynamic = [[M,func] for func,M in iteritems(self.dynamic)]
return hamiltonian([self.static],dynamic,basis=self._basis,dtype=self._dtype,
static_fmt=static_fmt,dynamic_fmt=dynamic_fmt,copy=copy)
### algebra operations
def transpose(self,copy=False):
"""Transposes `hamiltonian` operator.
Notes
-----
This function does NOT conjugate the operator.
Returns
--------
:obj:`hamiltonian`
:math:`H_{ij}\\mapsto H_{ji}`
Examples
---------
>>> H_tran = H.transpose()
"""
dynamic = [[M.T,func] for func,M in iteritems(self.dynamic)]
return hamiltonian([self.static.T],dynamic,
basis=self._basis,dtype=self._dtype,copy=copy)
def conjugate(self):
"""Conjugates `hamiltonian` operator.
Notes
-----
This function does NOT transpose the operator.
Returns
--------
:obj:`hamiltonian`
:math:`H_{ij}\\mapsto H_{ij}^*`
Examples
---------
>>> H_conj = H.conjugate()
"""
dynamic = [[M.conj(),func.conj()] for func,M in iteritems(self.dynamic)]
return hamiltonian([self.static.conj()],dynamic,
basis=self._basis,dtype=self._dtype)
def conj(self):
"""Same functionality as :func:`conjugate`."""
return self.conjugate()
def getH(self,copy=False):
"""Calculates hermitian conjugate of `hamiltonian` operator.
Parameters
-----------
copy : bool, optional
Whether to return a deep copy of the original object. Default is `copy = False`.
Returns
--------
:obj:`hamiltonian`
:math:`H_{ij}\\mapsto H_{ij}^*`
Examples
---------
>>> H_herm = H.getH()
"""
return self.conj().transpose(copy=copy)
### lin-alg operations
def diagonal(self,time=0):
"""Calculates diagonal of `hamiltonian` operator at time `time`.
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
Returns
--------
numpy.ndarray
Diagonal part of operator :math:`H(t=\\texttt{time})`.
Examples
---------
>>> H_diag = H.diagonal(time=0.0)
"""
if self.Ns <= 0:
return 0
if _np.array(time).ndim > 0:
raise TypeError('expecting scalar argument for time')
diagonal = self._static.diagonal()
for func,Hd in iteritems(self._dynamic):
diagonal += Hd.diagonal() * func(time)
return diagonal
def trace(self,time=0):
"""Calculates trace of `hamiltonian` operator at time `time`.
Parameters
-----------
time : float, optional
Time to evalute the time-dependent part of the operator at (if existent). Default is `time = 0.0`.
Returns
--------
float
Trace of operator :math:`\\sum_{j=1}^{Ns} H_{jj}(t=\\texttt{time})`.
Examples
---------
>>> H_tr = H.tr(time=0.0)
"""
if self.Ns <= 0:
return 0
if _np.array(time).ndim > 0:
raise TypeError('expecting scalar argument for time')
trace = self._static.diagonal().sum()
for func,Hd in iteritems(self._dynamic):
trace += Hd.diagonal().sum() * func(time)
return trace
def astype(self,dtype,copy=False,casting="unsafe"):
"""Changes data type of `hamiltonian` object.
Parameters
-----------
dtype : 'type'
The data type (e.g. numpy.float64) to cast the Hamiltonian with.
Returns
--------
`hamiltonian`
Operator with altered data type.
Examples
---------
`hamiltonian`
Operator with altered data type.
>>> H_cpx=H.astype(np.complex128)
"""
if dtype not in supported_dtypes:
raise TypeError('hamiltonian does not support type: '+str(dtype))
static = self.static.astype(dtype,copy=copy,casting=casting)
dynamic = [[M.astype(dtype,copy=copy,casting=casting),func] for func,M in iteritems(self.dynamic)]
return hamiltonian([static],dynamic,basis=self._basis,dtype=dtype,copy=False)
def copy(self):
"""Returns a copy of `hamiltonian` object."""
dynamic = [[M,func] for func,M in iteritems(self.dynamic)]
return hamiltonian([self.static],dynamic,
basis=self._basis,dtype=self._dtype,copy=True)
###################
# special methods #
###################
def __getitem__(self,key):
if len(key) != 3:
raise IndexError("invalid number of indices, hamiltonian must be indexed with three indices [time,row,col].")
try:
times = iter(key[0])
iterate=True
except TypeError:
time = key[0]
iterate=False
key = tuple(key[1:])
if iterate:
ME = []
if self.is_dense:
for t in times:
ME.append(self.todense(time=t)[key])
else:
for t in times:
ME.append(self.tocsr(time=t)[key])
ME = tuple(ME)
else:
ME = self.tocsr(time=time)[key]
return ME
def __str__(self):
string = "static mat: \n{0}\n\n\ndynamic:\n".format(self._static.__str__())
for i,(func,Hd) in enumerate(iteritems(self._dynamic)):
h_str = Hd.__str__()
func_str = func.__str__()
string += ("{0}) func: {2}, mat: \n{1} \n".format(i,h_str,func_str))
return string
def __repr__(self):
string = "<quspin.operators.hamiltonian:\nstatic mat: {0}\ndynamic:".format(self._static.__repr__())
for i,(func,Hd) in enumerate(iteritems(self._dynamic)):
h_str = Hd.__repr__()
func_str = func.__str__()
string += ("\n {0}) func: {2}, mat: {1} ".format(i,h_str,func_str))
string = string + ">"
return string
def __neg__(self): # -self
dynamic = [[-M,func] for func,M in iteritems(self.dynamic)]
return hamiltonian([-self.static],dynamic,basis=self._basis,dtype=self._dtype)
def __call__(self,time=0): # self(time)
"""Return hamiltonian as a sparse or dense matrix at specific time
Parameters
-----------
time: float
time to evaluate dynamic parts at.
Returns
--------
if `is_dense` is True:
`numpy.ndarray`
else
`scipy.csr_matrix`
Examples
---------
>>> H_t = H(time)
"""
if self.is_dense:
return self.toarray(time)
else:
return self.tocsr(time)
##################################
# symbolic arithmetic operations #
# currently only have +,-,* like #
# operators implimented. #
##################################
def __pow__(self,power): # self ** power
if type(power) is not int:
raise TypeError("hamiltonian can only be raised to integer power.")
return reduce(mul,(self for i in range(power)))
def __mul__(self,other): # self * other
if ishamiltonian(other):
return self._mul_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other,casting="unsafe")
return self._mul_sparse(other)
elif _np.isscalar(other):
return self._mul_scalar(other)
elif other.__class__ == _np.ndarray:
self._mat_checks(other,casting="unsafe")
return self._mul_dense(other)
elif other.__class__ == _np.matrix:
self._mat_checks(other,casting="unsafe")
return self._mul_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other,casting="unsafe")
return self._mul_dense(other)
def __rmul__(self,other): # other * self
if ishamiltonian(other):
self._mat_checks(other,casting="unsafe")
return self._rmul_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other,casting="unsafe")
return self._rmul_sparse(other)
elif _np.isscalar(other):
return self._mul_scalar(other)
elif other.__class__ == _np.ndarray:
self._mat_checks(other,casting="unsafe")
return self._rmul_dense(other)
elif other.__class__ == _np.matrix:
self._mat_checks(other,casting="unsafe")
return self._rmul_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other,casting="unsafe")
return self._rmul_dense(other)
def __imul__(self,other): # self *= other
if ishamiltonian(other):
self._mat_checks(other)
return self._imul_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other)
return self._imul_sparse(other)
elif _np.isscalar(other):
return self._imul_scalar(other)
elif other.__class__ == _np.ndarray:
self._mat_checks(other)
return self._imul_dense(other)
elif other.__class__ == _np.matrix:
self._mat_checks(other)
return self._imul_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other)
return self._imul_dense(other)
def __truediv__(self,other):
return self.__div__(other)
def __div__(self,other): # self / other
if ishamiltonian(other):
return NotImplemented
elif _sp.issparse(other):
return NotImplemented
elif _np.isscalar(other):
return self._mul_scalar(1.0/other)
elif other.__class__ == _np.ndarray:
return NotImplemented
elif other.__class__ == _np.matrix:
return NotImplemented
else:
return NotImplemented
def __rdiv__(self,other): # other / self
return NotImplemented
def __idiv__(self,other): # self *= other
if ishamiltonian(other):
return NotImplemented
elif _sp.issparse(other):
return NotImplemented
elif _np.isscalar(other):
return self._imul_scalar(1.0/other)
elif other.__class__ == _np.ndarray:
return NotImplemented
elif other.__class__ == _np.matrix:
return NotImplemented
else:
return NotImplemented
def __add__(self,other): # self + other
if ishamiltonian(other):
self._mat_checks(other,casting="unsafe")
return self._add_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other,casting="unsafe")
return self._add_sparse(other)
elif _np.isscalar(other):
if other==0.0:
return self
else:
raise NotImplementedError('hamiltonian does not support addition by nonzero scalar')
elif other.__class__ == _np.ndarray:
self._mat_checks(other,casting="unsafe")
return self._add_dense(other)
elif other.__class__ == _np.matrix:
self._mat_checks(other,casting="unsafe")
return self._add_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other,casting="unsafe")
return self._add_dense(other)
def __radd__(self,other): # other + self
return self.__add__(other)
def __iadd__(self,other): # self += other
if ishamiltonian(other):
self._mat_checks(other)
return self._iadd_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other)
return self._iadd_sparse(other)
elif _np.isscalar(other):
if other==0.0:
return self
else:
raise NotImplementedError('hamiltonian does not support addition by nonzero scalar')
elif other.__class__ == _np.ndarray:
self._mat_checks(other)
return self._iadd_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other)
return self._iadd_dense(other)
def __sub__(self,other): # self - other
if ishamiltonian(other):
self._mat_checks(other,casting="unsafe")
return self._sub_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other,casting="unsafe")
return self._sub_sparse(other)
elif _np.isscalar(other):
if other==0.0:
return self
else:
raise NotImplementedError('hamiltonian does not support subtraction by nonzero scalar')
elif other.__class__ == _np.ndarray:
self._mat_checks(other,casting="unsafe")
return self._sub_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other,casting="unsafe")
return self._sub_dense(other)
def __rsub__(self,other): # other - self
# NOTE: because we use signed types this is possble
return self.__sub__(other).__neg__()
def __isub__(self,other): # self -= other
if ishamiltonian(other):
self._mat_checks(other)
return self._isub_hamiltonian(other)
elif _sp.issparse(other):
self._mat_checks(other)
return self._isub_sparse(other)
elif _np.isscalar(other):
if other==0.0:
return self
else:
raise NotImplementedError('hamiltonian does not support subtraction by nonzero scalar')
elif other.__class__ == _np.ndarray:
self._mat_checks(other)
return self._isub_dense(other)
else:
other = _np.asanyarray(other)
self._mat_checks(other)
return self._sub_dense(other)
##########################################################################################
##########################################################################################
# below all of the arithmetic functions are implimented for various combination of types #
##########################################################################################
##########################################################################################
# checks
def _mat_checks(self,other,casting="same_kind"):
try:
if other.shape != self._shape: # only accepts square matricies
raise ValueError('shapes do not match')
if not _np.can_cast(other.dtype,self._dtype,casting=casting):
raise ValueError('cannot cast types')
except AttributeError:
if other._shape != self._shape: # only accepts square matricies
raise ValueError('shapes do not match')
if not _np.can_cast(other.dtype,self._dtype,casting=casting):
raise ValueError('cannot cast types')
##########################
# hamiltonian operations #
##########################
def _add_hamiltonian(self,other):
result_dtype = _np.result_type(self._dtype, other.dtype)
new=self.astype(result_dtype,copy=True)
new._is_dense = new._is_dense or other._is_dense
try:
new._static += other._static
except NotImplementedError:
new._static = new._static + other._static
if _check_almost_zero(new._static):
new._static = _sp.dia_matrix(new._shape,dtype=new._dtype)
for func,Hd in iteritems(other._dynamic):
if func in new._dynamic:
try:
new._dynamic[func] += Hd
except NotImplementedError:
new._dynamic[func] = new._dynamic[func] + Hd
if _check_almost_zero(new._dynamic[func]):
new._dynamic.pop(func)
else:
new._dynamic[func] = Hd
new.check_is_dense()
new._get_matvecs()
return new
def _iadd_hamiltonian(self,other):
self._is_dense = self._is_dense or other._is_dense
try:
self._static += other._static
except NotImplementedError:
self._static = self._static + other._static
if _check_almost_zero(self._static):
self._static = _sp.dia_matrix(self._shape,dtype=self._dtype)
for func,Hd in iteritems(other._dynamic):
if func in self._dynamic:
try:
self._dynamic[func] += Hd
except NotImplementedError:
self._dynamic[func] = self._dynamic[func] + Hd
try:
self._dynamic[func].sum_duplicates()
self._dynamic[func].eliminate_zeros()
except: pass
if _check_almost_zero(self._dynamic[func]):
self._dynamic.pop(func)
else:
self._dynamic[func] = Hd
self.check_is_dense()
self._get_matvecs()
return self
def _sub_hamiltonian(self,other):
result_dtype = _np.result_type(self._dtype, other.dtype)
new=self.astype(result_dtype,copy=True)
new._is_dense = new._is_dense or other._is_dense
try:
new._static -= other._static
except NotImplementedError:
new._static = new._static - other._static
if _check_almost_zero(new._static):
new._static = _sp.dia_matrix(new._shape,dtype=new._dtype)
for func,Hd in iteritems(other._dynamic):
if func in new._dynamic:
try:
new._dynamic[func] -= Hd
except NotImplementedError:
new._dynamic[func] = new._dynamic[func] - Hd
if _check_almost_zero(new._dynamic[func]):
new._dynamic.pop(func)
else:
new._dynamic[func] = -Hd
new.check_is_dense()
new._get_matvecs()
return new
def _isub_hamiltonian(self,other):
self._is_dense = self._is_dense or other._is_dense
try:
self._static -= other._static
except NotImplementedError:
self._static = self._static - other._static
if _check_almost_zero(self._static):
self._static = _sp.dia_matrix(self._shape,dtype=self._dtype)
for func,Hd in iteritems(other._dynamic):
if func in self._dynamic:
try:
self._dynamic[func] -= Hd
except NotImplementedError:
self._dynamic[func] = new._dynamic[func] - Hd
if _check_almost_zero(new._dynamic[func]):
self._dynamic.pop(func)
else:
self._dynamic[func] = -Hd
self.check_is_dense()
self._get_matvecs()
return self
def _mul_hamiltonian(self,other):
if self.dynamic and other.dynamic:
new = self.astype(self._dtype)
return new._imul_hamiltonian(other)
elif self.dynamic:
return self._mul_sparse(other.static)
elif other.dynamic:
return other._rmul_sparse(self.static)
else:
return self._mul_sparse(other.static)
def _rmul_hamiltonian(self,other):
if self.dynamic and other.dynamic:
new = other.astype(self._dtype)
return (new.T._imul_hamiltonian(self.T)).T #lazy implementation
elif self.dynamic:
return self._rmul_sparse(other.static)
elif other.dynamic:
return other._mul_sparse(self.static)
else:
return self._rmul_sparse(other.static)
def _imul_hamiltonian(self,other):
if self.dynamic and other.dynamic:
self._is_dense = self._is_dense or other._is_dense
new_dynamic_ops = {}
# create new dynamic operators coming from
# self.static * other.static
if _sp.issparse(self.static):
new_static_op = self.static.dot(other._static)
elif _sp.issparse(other._static):
new_static_op = self.static * other._static
else:
new_static_op =
|
_np.matmul(self.static,other._static)
|
numpy.matmul
|
import numpy as np
from imblearn.over_sampling import SMOTE, ADASYN
from utils import norm_feats
def oversample(method,pose_feats, d_list, labels):
"""Normalize data"""
pose_feats_n, d_list_n = norm_feats(pose_feats, d_list)
"""Extract class indecies and equalize"""
idx0 = np.flatnonzero(labels == 0)
idx1 = np.flatnonzero(labels == 1)
idx2 = np.flatnonzero(labels == 2)
dom = np.min([len(idx0), len(idx1), len(idx2)])
n_idx0 = idx0[0:dom-2]
n_idx1 = idx1[0:dom-2]
n_idx2 = idx2[0:dom-2]
n_pose_feats0 = pose_feats_n[n_idx0]
n_pose_feats1 = pose_feats_n[n_idx1]
n_pose_feats2 = pose_feats_n[n_idx2]
pose_feats_n = np.concatenate([n_pose_feats0, n_pose_feats1, n_pose_feats2])
d_list_n = np.concatenate([d_list_n[n_idx0], d_list_n[n_idx1], d_list_n[n_idx2]])
labels_n =
|
np.concatenate([labels[n_idx0], labels[n_idx1], labels[n_idx2]])
|
numpy.concatenate
|
import numbers
import os
import random
import string
import sys
import warnings
import tempfile
from collections.abc import Iterable
import numpy as np
import scipy
from astropy.time import Time, TimeDelta
import astropy.units as u
from astropy.units import Quantity
from numpy import histogram as histogram_np
from numpy import histogram2d as histogram2d_np
# If numba is installed, import jit. Otherwise, define an empty decorator with
# the same name.
HAS_NUMBA = False
try:
from numba import jit
HAS_NUMBA = True
from numba import njit, prange, vectorize, float32, float64, int32, int64
except ImportError:
warnings.warn("Numba not installed. Faking it")
class jit(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, func):
def wrapped_f(*args, **kwargs):
return func(*args, **kwargs)
return wrapped_f
class njit(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, func):
def wrapped_f(*args, **kwargs):
return func(*args, **kwargs)
return wrapped_f
class vectorize(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, func):
wrapped_f = np.vectorize(func)
return wrapped_f
def generic(x, y=None):
return None
float32 = float64 = int32 = int64 = generic
def prange(x):
return range(x)
try:
from tqdm import tqdm as show_progress
except ImportError:
def show_progress(a):
return a
try:
from statsmodels.robust import mad as mad # pylint: disable=unused-import
except ImportError:
def mad(data, c=0.6745, axis=None):
"""
Mean Absolute Deviation (MAD) along an axis.
Straight from statsmodels's source code, adapted
Parameters
----------
data : iterable
The data along which to calculate the MAD
c : float, optional
The normalization constant. Defined as
``scipy.stats.norm.ppf(3/4.)``, which is approximately ``.6745``.
axis : int, optional, default ``0``
Axis along which to calculate ``mad``. Default is ``0``, can also
be ``None``
"""
data = np.asarray(data)
if axis is not None:
center = np.apply_over_axes(np.median, data, axis)
else:
center = np.median(data)
return np.median((np.fabs(data - center)) / c, axis=axis)
__all__ = ['simon', 'rebin_data', 'rebin_data_log', 'look_for_array_in_array',
'is_string', 'is_iterable', 'order_list_of_arrays',
'optimal_bin_time', 'contiguous_regions', 'is_int',
'get_random_state', 'baseline_als', 'excess_variance',
'create_window', 'poisson_symmetrical_errors', 'standard_error',
'nearest_power_of_two', 'find_nearest', 'genDataPath']
def _root_squared_mean(array):
array = np.asarray(array)
return np.sqrt(np.sum(array ** 2)) / array.size
def simon(message, **kwargs):
"""The Statistical Interpretation MONitor.
A warning system designed to always remind the user that Simon
is watching him/her.
Parameters
----------
message : string
The message that is thrown
kwargs : dict
The rest of the arguments that are passed to ``warnings.warn``
"""
warnings.warn("SIMON says: {0}".format(message), **kwargs)
def rebin_data(x, y, dx_new, yerr=None, method='sum', dx=None):
"""Rebin some data to an arbitrary new data resolution. Either sum
the data points in the new bins or average them.
Parameters
----------
x: iterable
The dependent variable with some resolution, which can vary throughout
the time series.
y: iterable
The independent variable to be binned
dx_new: float
The new resolution of the dependent variable ``x``
Other parameters
----------------
yerr: iterable, optional
The uncertainties of ``y``, to be propagated during binning.
method: {``sum`` | ``average`` | ``mean``}, optional, default ``sum``
The method to be used in binning. Either sum the samples ``y`` in
each new bin of ``x``, or take the arithmetic mean.
dx: float
The old resolution (otherwise, calculated from difference between
time bins)
Returns
-------
xbin: numpy.ndarray
The midpoints of the new bins in ``x``
ybin: numpy.ndarray
The binned quantity ``y``
ybin_err: numpy.ndarray
The uncertainties of the binned values of ``y``.
step_size: float
The size of the binning step
Examples
--------
>>> x = np.arange(0, 100, 0.01)
>>> y = np.ones(x.size)
>>> yerr = np.ones(x.size)
>>> xbin, ybin, ybinerr, step_size = rebin_data(
... x, y, 4, yerr=yerr, method='sum', dx=0.01)
>>> np.allclose(ybin, 400)
True
>>> np.allclose(ybinerr, 20)
True
>>> xbin, ybin, ybinerr, step_size = rebin_data(
... x, y, 4, yerr=yerr, method='mean')
>>> np.allclose(ybin, 1)
True
>>> np.allclose(ybinerr, 0.05)
True
"""
y = np.asarray(y)
if yerr is None:
yerr = np.zeros_like(y)
else:
yerr = np.asarray(yerr)
if not dx:
dx_old = np.diff(x)
elif np.size(dx) == 1:
dx_old = np.array([dx])
else:
dx_old = dx
if np.any(dx_new < dx_old):
raise ValueError("New frequency resolution must be larger than "
"old frequency resolution.")
# left and right bin edges
# assumes that the points given in `x` correspond to
# the left bin edges
xedges = np.hstack([x, x[-1]+dx_old[-1]])
# new regularly binned resolution
xbin = np.arange(xedges[0], xedges[-1]+dx_new, dx_new)
output = np.zeros(xbin.shape[0] - 1, dtype=type(y[0]))
outputerr = np.zeros(xbin.shape[0] - 1, dtype=type(y[0]))
step_size = np.zeros(xbin.shape[0] - 1)
all_x = np.searchsorted(xedges, xbin)
min_inds = all_x[:-1]
max_inds = all_x[1:]
xmins = xbin[:-1]
xmaxs = xbin[1:]
for i, (xmin, xmax, min_ind, max_ind) in enumerate(zip(xmins, xmaxs, min_inds, max_inds)):
filtered_y = y[min_ind:max_ind-1]
filtered_yerr = yerr[min_ind:max_ind-1]
output[i] = np.sum(filtered_y)
outputerr[i] = np.sum(filtered_yerr)
step_size[i] = max_ind - 1 - min_ind
prev_dx = xedges[min_ind] - xedges[min_ind-1]
prev_frac = (xedges[min_ind] - xmin)/prev_dx
output[i] += y[min_ind-1]*prev_frac
outputerr[i] += yerr[min_ind-1]*prev_frac
step_size[i] += prev_frac
if not max_ind == xedges.size:
dx_post = xedges[max_ind] - xedges[max_ind-1]
post_frac = (xmax-xedges[max_ind-1])/dx_post
output[i] += y[max_ind-1]*post_frac
outputerr[i] += yerr[max_ind-1]*post_frac
step_size[i] += post_frac
if method in ['mean', 'avg', 'average', 'arithmetic mean']:
ybin = output / step_size
ybinerr = np.sqrt(outputerr) / step_size
elif method == "sum":
ybin = output
ybinerr = np.sqrt(outputerr)
else:
raise ValueError("Method for summing or averaging not recognized. "
"Please enter either 'sum' or 'mean'.")
tseg = x[-1] - x[0] + dx_old[-1]
if (tseg / dx_new % 1) > 0:
ybin = ybin[:-1]
ybinerr = ybinerr[:-1]
step_size = step_size[:-1]
dx_var = np.var(dx_old) / np.mean(dx_old)
if np.size(dx_old) == 1 or dx_var < 1e-6:
step_size = step_size[0]
new_x0 = (x[0] - (0.5 * dx_old[0])) + (0.5 * dx_new)
xbin = np.arange(ybin.shape[0]) * dx_new + new_x0
return xbin, ybin, ybinerr, step_size
def rebin_data_log(x, y, f, y_err=None, dx=None):
"""Logarithmic re-bin of some data. Particularly useful for the power
spectrum.
The new dependent variable depends on the previous dependent variable
modified by a factor f:
.. math::
d\\nu_j = d\\nu_{j-1} (1+f)
Parameters
----------
x: iterable
The dependent variable with some resolution ``dx_old = x[1]-x[0]``
y: iterable
The independent variable to be binned
f: float
The factor of increase of each bin wrt the previous one.
Other Parameters
----------------
yerr: iterable, optional
The uncertainties of ``y`` to be propagated during binning.
method: {``sum`` | ``average`` | ``mean``}, optional, default ``sum``
The method to be used in binning. Either sum the samples ``y`` in
each new bin of ``x`` or take the arithmetic mean.
dx: float, optional
The binning step of the initial ``x``
Returns
-------
xbin: numpy.ndarray
The midpoints of the new bins in ``x``
ybin: numpy.ndarray
The binned quantity ``y``
ybin_err: numpy.ndarray
The uncertainties of the binned values of ``y``
step_size: float
The size of the binning step
"""
dx_init = apply_function_if_none(dx, np.diff(x), np.median)
x = np.asarray(x)
y = np.asarray(y)
y_err = np.asarray(apply_function_if_none(y_err, y, np.zeros_like))
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must be of the same length!")
if y.shape[0] != y_err.shape[0]:
raise ValueError("y and y_err must be of the same length!")
minx = x[0] * 0.5 # frequency to start from
maxx = x[-1] # maximum frequency to end
binx = [minx, minx + dx_init] # first
dx = dx_init # the frequency resolution of the first bin
# until we reach the maximum frequency, increase the width of each
# frequency bin by f
while binx[-1] <= maxx:
binx.append(binx[-1] + dx * (1.0 + f))
dx = binx[-1] - binx[-2]
binx = np.asarray(binx)
real = y.real
real_err = y_err.real
# compute the mean of the ys that fall into each new frequency bin.
# we cast to np.double due to scipy's bad handling of longdoubles
biny, bin_edges, binno = scipy.stats.binned_statistic(
x.astype(np.double), real.astype(np.double),
statistic="mean", bins=binx)
biny_err, bin_edges, binno = scipy.stats.binned_statistic(
x.astype(np.double), real_err.astype(np.double),
statistic=_root_squared_mean, bins=binx)
if np.iscomplexobj(y):
imag = y.imag
biny_imag, bin_edges, binno = scipy.stats.binned_statistic(
x.astype(np.double), imag.astype(np.double),
statistic="mean", bins=binx)
biny = biny + 1j * biny_imag
if np.iscomplexobj(y_err):
imag_err = y_err.imag
biny_err_imag, bin_edges, binno = scipy.stats.binned_statistic(
x.astype(np.double), imag_err.astype(np.double),
statistic=_root_squared_mean, bins=binx)
biny_err = biny_err + 1j * biny_err_imag
# compute the number of powers in each frequency bin
nsamples = np.array([len(binno[np.where(binno == i)[0]])
for i in range(1, np.max(binno) + 1, 1)])
return binx, biny, biny_err, nsamples
def apply_function_if_none(variable, value, func):
"""
Assign a function value to a variable if that variable has value ``None`` on input.
Parameters
----------
variable : object
A variable with either some assigned value, or ``None``
value : object
A variable to go into the function
func : function
Function to apply to ``value``. Result is assigned to ``variable``
Returns
-------
new_value : object
The new value of ``variable``
Examples
--------
>>> var = 4
>>> value = np.zeros(10)
>>> apply_function_if_none(var, value, np.mean)
4
>>> var = None
>>> apply_function_if_none(var, value, lambda y: np.mean(y))
0.0
"""
if variable is None:
return func(value)
else:
return variable
def assign_value_if_none(value, default):
"""
Assign a value to a variable if that variable has value ``None`` on input.
Parameters
----------
value : object
A variable with either some assigned value, or ``None``
default : object The value to assign to the variable ``value`` if
``value is None`` returns ``True``
Returns
-------
new_value : object
The new value of ``value``
"""
return default if value is None else value
def look_for_array_in_array(array1, array2):
"""
Find a subset of values in an array.
Parameters
----------
array1 : iterable
An array with values to be searched
array2 : iterable
A second array which potentially contains a subset of values
also contained in ``array1``
Returns ------- array3 : iterable An array with the subset of values
contained in both ``array1`` and ``array2``
"""
return next((i for i in array1 if i in array2), None)
def is_string(s):
"""
Portable function to answer whether a variable is a string.
Parameters
----------
s : object
An object that is potentially a string
Returns
-------
isstring : bool
A boolean decision on whether ``s`` is a string or not
"""
return isinstance(s, str)
def is_iterable(var):
"""Test if a variable is an iterable.
Parameters
----------
var : object
The variable to be tested for iterably-ness
Returns
-------
is_iter : bool
Returns ``True`` if ``var`` is an ``Iterable``, ``False`` otherwise
"""
return isinstance(var, Iterable)
def order_list_of_arrays(data, order):
"""Sort an array according to the specified order.
Parameters
----------
data : iterable
Returns
-------
data : list or dict
"""
if hasattr(data, 'items'):
data = dict([(key, value[order]) for key, value in data.items()])
elif is_iterable(data):
data = [i[order] for i in data]
else:
data = None
return data
def optimal_bin_time(fftlen, tbin):
"""Vary slightly the bin time to have a power of two number of bins.
Given an FFT length and a proposed bin time, return a bin time
slightly shorter than the original, that will produce a power-of-two number
of FFT bins.
Parameters
----------
fftlen : int
Number of positive frequencies in a proposed Fourier spectrum
tbin : float
The proposed time resolution of a light curve
Returns
-------
res : float
A time resolution that will produce a Fourier spectrum with ``fftlen`` frequencies and
a number of FFT bins that are a power of two
"""
return fftlen / (2 ** np.ceil(np.log2(fftlen / tbin)))
def contiguous_regions(condition):
"""Find contiguous ``True`` regions of the boolean array ``condition``.
Return a 2D array where the first column is the start index of the region
and the second column is the end index, found on [so-contiguous]_.
Parameters
----------
condition : bool array
Returns
-------
idx : ``[[i0_0, i0_1], [i1_0, i1_1], ...]``
A list of integer couples, with the start and end of each ``True`` blocks
in the original array
Notes
-----
.. [so-contiguous] http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array
"""
# Find the indices of changes in "condition"
diff = np.logical_xor(condition[1:], condition[:-1])
idx, = diff.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size]
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def is_int(obj):
"""Test if object is an integer."""
return isinstance(obj, (numbers.Integral, np.integer))
def get_random_state(random_state=None):
"""Return a Mersenne Twister pseudo-random number generator.
Parameters
----------
seed : integer or ``numpy.random.RandomState``, optional, default ``None``
Returns
-------
random_state : mtrand.RandomState object
"""
if not random_state:
random_state = np.random.mtrand._rand
else:
if is_int(random_state):
random_state = np.random.RandomState(random_state)
elif not isinstance(random_state, np.random.RandomState):
raise ValueError(
"{value} can't be used to generate a numpy.random.RandomState".format(
value=random_state
))
return random_state
def _offset(x, off):
"""An offset."""
return off
def offset_fit(x, y, offset_start=0):
"""Fit a constant offset to the data.
Parameters
----------
x : array-like
y : array-like
offset_start : float
Constant offset, initial value
Returns
-------
offset : float
Fitted offset
"""
from scipy.optimize import curve_fit
par, _ = curve_fit(_offset, x, y, [offset_start],
maxfev=6000)
return par[0]
def _als(y, lam, p, niter=10):
"""Baseline Correction with Asymmetric Least Squares Smoothing.
Modifications to the routine from Eilers & Boelens 2005 [eilers-2005]_.
The Python translation is partly from [so-als]_.
Parameters
----------
y : array-like
the data series corresponding to ``x``
lam : float
the lambda parameter of the ALS method. This control how much the
baseline can adapt to local changes. A higher value corresponds to a
stiffer baseline
p : float
the asymmetry parameter of the ALS method. This controls the overall
slope tollerated for the baseline. A higher value correspond to a
higher possible slope
Other parameters
----------------
niter : int
The number of iterations to perform
Returns
-------
z : array-like, same size as ``y``
Fitted baseline.
References
----------
.. [eilers-2005] https://www.researchgate.net/publication/228961729_Technical_Report_Baseline_Correction_with_Asymmetric_Least_Squares_Smoothing
.. [so-als] http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"""
from scipy import sparse
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for _ in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w * y)
w = p * (y > z) + (1 - p) * (y < z)
return z
def baseline_als(x, y, lam=None, p=None, niter=10, return_baseline=False,
offset_correction=False):
"""Baseline Correction with Asymmetric Least Squares Smoothing.
Parameters
----------
x : array-like
the sample time/number/position
y : array-like
the data series corresponding to ``x``
lam : float
the lambda parameter of the ALS method. This control how much the
baseline can adapt to local changes. A higher value corresponds to a
stiffer baseline
p : float
the asymmetry parameter of the ALS method. This controls the overall
slope tolerated for the baseline. A higher value correspond to a
higher possible slope
Other Parameters
----------------
niter : int
The number of iterations to perform
return_baseline : bool
return the baseline?
offset_correction : bool
also correct for an offset to align with the running mean of the scan
Returns
-------
y_subtracted : array-like, same size as ``y``
The initial time series, subtracted from the trend
baseline : array-like, same size as ``y``
Fitted baseline. Only returned if return_baseline is ``True``
Examples
--------
>>> x = np.arange(0, 10, 0.01)
>>> y = np.zeros_like(x) + 10
>>> ysub = baseline_als(x, y)
>>> np.all(ysub < 0.001)
True
"""
if lam is None:
lam = 1e11
if p is None:
p = 0.001
z = _als(y, lam, p, niter=niter)
ysub = y - z
offset = 0
if offset_correction:
std = mad(ysub)
good = np.abs(ysub) < 10 * std
if len(x[good]) < 10:
good = np.ones(len(x), dtype=bool)
warnings.warn('Too few bins to perform baseline offset correction'
' precisely. Beware of results')
offset = offset_fit(x[good], ysub[good], 0)
if return_baseline:
return ysub - offset, z + offset
else:
return ysub - offset
def excess_variance(lc, normalization='fvar'):
"""Calculate the excess variance.
Vaughan et al. 2003, MNRAS 345, 1271 give three measurements of source
intrinsic variance: if a light curve has a total variance of :math:`S^2`,
and each point has an error bar :math:`\sigma_{err}`, the *excess variance*
is defined as
.. math:: \sigma_{XS} = S^2 - \overline{\sigma_{err}}^2;
the *normalized excess variance* is the excess variance divided by the
square of the mean intensity:
.. math:: \sigma_{NXS} = \dfrac{\sigma_{XS}}{\overline{x}^2};
the *fractional mean square variability amplitude*, or
:math:`F_{var}`, is finally defined as
.. math:: F_{var} = \sqrt{\dfrac{\sigma_{XS}}{\overline{x}^2}}
Parameters
----------
lc : a :class:`Lightcurve` object
normalization : str
if ``fvar``, return the fractional mean square variability :math:`F_{var}`.
If ``none``, return the unnormalized excess variance variance
:math:`\sigma_{XS}`. If ``norm_xs``, return the normalized excess variance
:math:`\sigma_{XS}`
Returns
-------
var_xs : float
var_xs_err : float
"""
lc_mean_var = np.mean(lc.counts_err ** 2)
lc_actual_var = np.var(lc.counts)
var_xs = lc_actual_var - lc_mean_var
mean_lc = np.mean(lc.counts)
mean_ctvar = mean_lc ** 2
var_nxs = var_xs / mean_lc ** 2
fvar = np.sqrt(var_xs / mean_ctvar)
N = len(lc.counts)
var_nxs_err_A = np.sqrt(2 / N) * lc_mean_var / mean_lc ** 2
var_nxs_err_B = np.sqrt(lc_mean_var / N) * 2 * fvar / mean_lc
var_nxs_err = np.sqrt(var_nxs_err_A ** 2 + var_nxs_err_B ** 2)
fvar_err = var_nxs_err / (2 * fvar)
if normalization == 'fvar':
return fvar, fvar_err
elif normalization == 'norm_xs':
return var_nxs, var_nxs_err
elif normalization == 'none' or normalization is None:
return var_xs, var_nxs_err * mean_lc ** 2
def create_window(N, window_type='uniform'):
"""A method to create window functions commonly used in signal processing.
Windows supported are:
Hamming, Hanning, uniform (rectangular window), triangular window,
blackmann window among others.
Parameters
----------
N : int
Total number of data points in window. If negative, abs is taken.
window_type : {``uniform``, ``parzen``, ``hamming``, ``hanning``, ``triangular``,\
``welch``, ``blackmann``, ``flat-top``}, optional, default ``uniform``
Type of window to create.
Returns
-------
window: numpy.ndarray
Window function of length ``N``.
"""
if not isinstance(N, int):
raise TypeError('N (window length) must be an integer')
windows = ['uniform', 'parzen', 'hamming', 'hanning', 'triangular',
'welch', 'blackmann', 'flat-top']
if not isinstance(window_type, str):
raise TypeError('type of window must be specified as string!')
window_type = window_type.lower()
if window_type not in windows:
raise ValueError(
"Wrong window type specified or window function is not available")
# Return empty array as window if N = 0
if N == 0:
return np.array([])
window = None
N = abs(N)
# Window samples index
n = np.arange(N)
# Constants
N_minus_1 = N - 1
N_by_2 = int((np.floor((N_minus_1) / 2)))
# Create Windows
if window_type == 'uniform':
window = np.ones(N)
if window_type == 'parzen':
N_parzen = int(np.ceil((N + 1) / 2))
N2_plus_1 = int(np.floor((N_parzen / 2))) + 1
window = np.zeros(N_parzen)
windlag0 = np.arange(0, N2_plus_1) / (N_parzen - 1)
windlag1 = 1 - np.arange(N2_plus_1, N_parzen) / (N_parzen - 1)
window[:N2_plus_1] = 1 - (1 - windlag0) * windlag0 * windlag0 * 6
window[N2_plus_1:] = windlag1 * windlag1 * windlag1 * 2
lagindex = np.arange(N_parzen - 1, 0, -1)
window = np.concatenate((window[lagindex], window))
window = window[:N]
if window_type == 'hamming':
window = 0.54 - 0.46 * np.cos((2 * np.pi * n) / N_minus_1)
if window_type == 'hanning':
window = 0.5 * (1 - np.cos(2 * np.pi * n / N_minus_1))
if window_type == 'triangular':
window = 1 - np.abs((n - (N_by_2)) / N)
if window_type == 'welch':
N_minus_1_by_2 = N_minus_1 / 2
window = 1 - np.square((n - N_minus_1_by_2) / N_minus_1_by_2)
if window_type == 'blackmann':
a0 = 0.42659
a1 = 0.49656
a2 = 0.076849
window = a0 - a1 * np.cos((2 * np.pi * n) / N_minus_1) + a2 * np.cos(
(4 * np.pi * n) / N_minus_1)
if window_type == 'flat-top':
a0 = 1
a1 = 1.93
a2 = 1.29
a3 = 0.388
a4 = 0.028
window = a0 - a1 * np.cos((2 * np.pi * n) / N_minus_1) + \
a2 * np.cos((4 * np.pi * n) / N_minus_1) - \
a3 * np.cos((6 * np.pi * n) / N_minus_1) + \
a4 * np.cos((8 * np.pi * n) / N_minus_1)
return window
def poisson_symmetrical_errors(counts):
"""Optimized version of frequentist symmetrical errors.
Uses a lookup table in order to limit the calls to poisson_conf_interval
Parameters
----------
counts : iterable
An array of Poisson-distributed numbers
Returns
-------
err : numpy.ndarray
An array of uncertainties associated with the Poisson counts in
``counts``
Examples
--------
>>> from astropy.stats import poisson_conf_interval
>>> counts = np.random.randint(0, 1000, 100)
>>> # ---- Do it without the lookup table ----
>>> err_low, err_high = poisson_conf_interval(np.asarray(counts),
... interval='frequentist-confidence', sigma=1)
>>> err_low -= np.asarray(counts)
>>> err_high -= np.asarray(counts)
>>> err = (np.absolute(err_low) + np.absolute(err_high))/2.0
>>> # Do it with this function
>>> err_thisfun = poisson_symmetrical_errors(counts)
>>> # Test that results are always the same
>>> assert np.allclose(err_thisfun, err)
"""
from astropy.stats import poisson_conf_interval
counts_int = np.asarray(counts, dtype=np.int64)
count_values = np.nonzero(np.bincount(counts_int))[0]
err_low, err_high = \
poisson_conf_interval(count_values,
interval='frequentist-confidence', sigma=1)
# calculate approximately symmetric uncertainties
err_low -= np.asarray(count_values)
err_high -= np.asarray(count_values)
err = (np.absolute(err_low) + np.absolute(err_high)) / 2.0
idxs = np.searchsorted(count_values, counts_int)
return err[idxs]
def standard_error(xs, mean):
"""
Return the standard error of the mean (SEM) of an array of arrays.
Parameters
----------
xs : 2-d float array
List of data point arrays.
mean : 1-d float array
Average of the data points.
Returns
-------
standard_error : 1-d float array
Standard error of the mean (SEM).
"""
n_seg = len(xs)
xs_diff_sq = np.subtract(xs, mean) ** 2
standard_deviation = np.sum(xs_diff_sq, axis=0) / (n_seg - 1)
error = np.sqrt(standard_deviation / n_seg)
return error
def nearest_power_of_two(x):
"""
Return a number which is nearest to `x` and is the integral power of two.
Parameters
----------
x : int, float
Returns
-------
x_nearest : int
Number closest to `x` and is the integral power of two.
"""
x = int(x)
x_lower = 1 if x == 0 else 2 ** (x - 2).bit_length()
x_upper = 1 if x == 0 else 2 ** (x - 1).bit_length()
x_nearest = x_lower if (x - x_lower) < (x_upper - x) else x_upper
return x_nearest
def find_nearest(array, value):
"""
Return the array value that is closest to the input value (<NAME>:
Thanks StackOverflow!)
Parameters
----------
array : np.array of ints or floats
1-D array of numbers to search through. Should already be sorted
from low values to high values.
value : int or float
The value you want to find the closest to in the array.
Returns
-------
array[idx] : int or float
The array value that is closest to the input value.
idx : int
The index of the array of the closest value.
"""
idx = np.searchsorted(array, value, side="left")
if idx == len(array) or np.fabs(value - array[idx - 1]) < \
np.fabs(value - array[idx]):
return array[idx - 1], idx - 1
else:
return array[idx], idx
def genDataPath(dir_path):
"""Generates data path to chunks.
Parameters
----------
dir_path: string
Path to zarr datastore + Top level directory name for data
Returns
-------
list
List of path's to datastore
Raises
------
IOError
If directory does not exist
"""
path_list = []
if os.path.isdir(dir_path):
if not (os.path.isdir(os.path.join(dir_path, 'main_data/'))
or os.path.join(dir_path, 'meta_data/')):
raise IOError(("Directory does not exist."))
else:
path_list.append(os.path.join(dir_path, 'main_data/'))
path_list.append(os.path.join(dir_path, 'meta_data/'))
return path_list
else:
raise IOError(("Directory does not exist."))
def interpret_times(time, mjdref=0):
"""Get time interval in seconds from an astropy Time object
Examples
--------
>>> time = Time(57483, format='mjd')
>>> newt, mjdref = interpret_times(time)
>>> newt == 0
True
>>> mjdref == 57483
True
>>> time = Time([57483], format='mjd')
>>> newt, mjdref = interpret_times(time)
>>> np.allclose(newt, 0)
True
>>> mjdref == 57483
True
>>> time = TimeDelta([3, 4, 5] * u.s)
>>> newt, mjdref = interpret_times(time)
>>> np.allclose(newt, [3, 4, 5])
True
>>> time = np.array([3, 4, 5])
>>> newt, mjdref = interpret_times(time, mjdref=45000)
>>> np.allclose(newt, [3, 4, 5])
True
>>> mjdref == 45000
True
>>> time = np.array([3, 4, 5] * u.s)
>>> newt, mjdref = interpret_times(time, mjdref=45000)
>>> np.allclose(newt, [3, 4, 5])
True
>>> mjdref == 45000
True
>>> newt, mjdref = interpret_times(1, mjdref=45000)
>>> newt == 1
True
>>> newt, mjdref = interpret_times(list, mjdref=45000)
Traceback (most recent call last):
...
ValueError: Unknown time format: ...
>>> newt, mjdref = interpret_times("guadfkljfd", mjdref=45000)
Traceback (most recent call last):
...
ValueError: Unknown time format: ...
"""
if isinstance(time, TimeDelta):
out_times = time.to('s').value
return out_times, mjdref
if isinstance(time, Time):
mjds = time.mjd
if mjdref == 0:
if np.all(mjds > 10000):
if isinstance(mjds, Iterable):
mjdref = mjds[0]
else:
mjdref = mjds
out_times = (mjds - mjdref) * 86400
return out_times, mjdref
if isinstance(time, Quantity):
out_times = time.to('s').value
return out_times, mjdref
if isinstance(time, (tuple, list, np.ndarray)):
return time, mjdref
if not isinstance(time, Iterable):
try:
float(time)
return time, mjdref
except (ValueError, TypeError):
pass
raise ValueError(f"Unknown time format: {type(time)}")
def check_iterables_close(iter0, iter1, **kwargs):
"""Check that the values produced by iterables are equal.
Uses `np.isclose` if the iterables produce single values per iteration,
`np.allclose` otherwise.
Additional keyword arguments are passed to `np.allclose`
and `np.isclose`.
Parameters
----------
iter0 : iterable
iter1 : iterable
Examples
--------
>>> iter0 = [0, 1]
>>> iter1 = [0, 2]
>>> check_iterables_close(iter0, iter1)
False
>>> iter0 = [(0, 0), (0, 1)]
>>> iter1 = [(0, 0.), (0, 1.)]
>>> check_iterables_close(iter0, iter1)
True
>>> iter1 = [(0, 0.), (0, 3.)]
>>> check_iterables_close(iter0, iter1)
False
"""
for i0, i1 in zip(iter0, iter1):
if isinstance(i0, Iterable):
if not
|
np.allclose(i0, i1)
|
numpy.allclose
|
import jax.numpy as jnp
from jax import grad, vmap, hessian, jit
from jax.config import config;
config.update("jax_enable_x64", True)
# numpy
import numpy as onp
from numpy import random
import argparse
import logging
import datetime
from time import time
import os
# solving -grad(a*grad u) + alpha u^m = f on unit ball
# a = a(x,theta)
def get_parser():
parser = argparse.ArgumentParser(description='Parametric Elliptic equation GP solver')
parser.add_argument("--alpha", type=float, default = 0.0)
parser.add_argument("--m", type = int, default = 3)
parser.add_argument("--dim_x", type = int, default = 5)
parser.add_argument("--dim_theta", type = int, default = 5)
parser.add_argument("--kernel", type=str, default="anisotropic_Gaussian")
parser.add_argument("--sigma-scale_x", type = float, default = 0.25)
parser.add_argument("--sigma-scale_theta", type = float, default = 0.2)
# sigma_x = args.sigma-scale_x*sqrt(dim)
# sigma_theta = args.sigma-scale_theta*sqrt(dim)
parser.add_argument("--N_domain", type = int, default = 4000)
parser.add_argument("--N_boundary", type = int, default = 800)
parser.add_argument("--N_test", type = int, default = 2000)
parser.add_argument("--nugget", type = float, default = 1e-6)
parser.add_argument("--GNsteps", type = int, default = 1)
parser.add_argument("--logroot", type=str, default='./logs/')
parser.add_argument("--randomseed", type=int, default=1)
parser.add_argument("--num_exp", type=int, default=1)
args = parser.parse_args()
return args
@jit # tx is short hand of theta_x
def get_GNkernel_train(x,tx,y,ty,wx0,wx1,wxg,wy0,wy1,wyg,sigma):
# wx0 * delta_x + wxg * nabla_x delta_x + wx1 * Delta_x delta_x
return wx0*wy0*kappa(x,tx,y,ty,sigma) + wx0*wy1*Delta_y1_kappa(x,tx,y,ty,sigma) + wy0*wx1*Delta_x1_kappa(x,tx,y,ty,sigma) + wx1*wy1*Delta_x1_Delta_y1_kappa(x,tx,y,ty,sigma) + wx0*D_wy1_kappa(x,tx,y,ty,sigma,wyg) + wy0*D_wx1_kappa(x,tx,y,ty,sigma,wxg) + wx1*Delta_x1_D_wy1_kappa(x,tx,y,ty,sigma,wyg) + wy1*D_wx1_Delta_y1_kappa(x,tx,y,ty,sigma,wxg) + D_wx1_D_wy1_kappa(x,tx,y,ty,sigma,wxg,wyg)
@jit
def get_GNkernel_train_boundary(x,tx,y,ty,wy0,wy1,wyg,sigma):
return wy0*kappa(x,tx,y,ty,sigma) + wy1*Delta_y1_kappa(x,tx,y,ty,sigma) + D_wy1_kappa(x,tx,y,ty,sigma,wyg)
@jit
def get_GNkernel_val_predict(x,tx,y,ty,wy0,wy1,wyg,sigma):
return wy0*kappa(x,tx,y,ty,sigma) + wy1*Delta_y1_kappa(x,tx,y,ty,sigma) + D_wy1_kappa(x,tx,y,ty,sigma,wyg)
def assembly_Theta(X_domain, X_boundary, w0, w1, wg, sigma):
# X_domain, dim: N_domain*d;
# w0 col vec: coefs of Diracs, dim: N_domain;
# w1 coefs of Laplacians, dim: N_domain
N_domain,d = onp.shape(X_domain)
N_boundary,_ = onp.shape(X_boundary)
Theta = onp.zeros((N_domain+N_boundary,N_domain+N_boundary))
XdXd0 = onp.reshape(onp.tile(X_domain,(1,N_domain)),(-1,d))
XdXd1 = onp.tile(X_domain,(N_domain,1))
XbXd0 = onp.reshape(onp.tile(X_boundary,(1,N_domain)),(-1,d))
XbXd1 = onp.tile(X_domain,(N_boundary,1))
XbXb0 = onp.reshape(onp.tile(X_boundary,(1,N_boundary)),(-1,d))
XbXb1 = onp.tile(X_boundary,(N_boundary,1))
arr_wx0 = onp.reshape(onp.tile(w0,(1,N_domain)),(-1,1))
arr_wx1 = onp.reshape(onp.tile(w1,(1,N_domain)),(-1,1))
arr_wxg = onp.reshape(onp.tile(wg,(1,N_domain)),(-1,d_x))
arr_wy0 = onp.tile(w0,(N_domain,1))
arr_wy1 = onp.tile(w1,(N_domain,1))
arr_wyg = onp.tile(wg,(N_domain,1))
arr_wy0_bd = onp.tile(w0,(N_boundary,1))
arr_wy1_bd = onp.tile(w1,(N_boundary,1))
arr_wyg_bd = onp.tile(wg,(N_boundary,1))
val = vmap(lambda x,tx,y,ty,wx0,wx1,wxg,wy0,wy1,wyg: get_GNkernel_train(x,tx,y,ty,wx0,wx1,wxg,wy0,wy1,wyg,sigma))(XdXd0[:,:d_x],XdXd0[:,d_x:],XdXd1[:,:d_x],XdXd1[:,d_x:],arr_wx0,arr_wx1,arr_wxg,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_domain,:N_domain] = onp.reshape(val, (N_domain,N_domain))
val = vmap(lambda x,tx,y,ty,wy0,wy1,wyg: get_GNkernel_train_boundary(x,tx,y,ty,wy0,wy1,wyg,sigma))(XbXd0[:,:d_x],XbXd0[:,d_x:],XbXd1[:,:d_x],XbXd1[:,d_x:],arr_wy0_bd,arr_wy1_bd,arr_wyg_bd)
Theta[N_domain:,:N_domain] =
|
onp.reshape(val, (N_boundary,N_domain))
|
numpy.reshape
|
"""Model definitions for Monotonic Gaussian Process with Identity Mean Function."""
import tqdm
import numpy as np
import meta.model as model_template
import model
import util.dtype as dtype_util
import util.model as model_util
import util.kernel as kernel_util
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from tensorflow_probability import edward2 as ed
_CDF_PENALTY_MEAN_SHIFT = np.array(-5e-3).astype(dtype_util.NP_DTYPE)
_CDF_PENALTY_SCALE = np.array(1e-3).astype(dtype_util.NP_DTYPE)
_DEFAULT_CDF_LABEL_BANDWIDTH = .1
_WEIGHT_PRIOR_SDEV = np.array(1.).astype(dtype_util.NP_DTYPE)
_LOG_NOISE_PRIOR_MEAN = np.array(-1.).astype(dtype_util.NP_DTYPE)
_LOG_NOISE_PRIOR_SDEV = np.array(1.).astype(dtype_util.NP_DTYPE)
NULL_CDF_VAL = np.nan
class MonoGP(model_template.Model):
"""Class definition for CDF calibration model.
Prior:
noise ~ N(0, sigma)
F ~ GP(F_S, kern_func(F_S, X))
Model:
F_emp = F + noise
"""
def __init__(self, X, y,
X_induce, cdf_sample_induce,
log_ls,
kern_func=kernel_util.rbf,
activation=model_util.relu1):
"""Initializer.
Args:
X: (np.ndarray of float32) Training features, shape (n_obs, n_dim),
X_induce: (np.ndarray of float32) Inducing points for training features, shape (n_obs_induce, n_dim),
cdf_sample_induce: (dict of np.ndarray) A dictionary of two items:
`perc_eval`: y locations where CDF are evaluated,
shape (n_eval, ).
`quantile`: predictive CDF values for n_obs locations
in sample_dict, evaluated at perc_eval,
shape (n_eval, n_obs).
y: (np.ndarray of float32) Training labels, shape (n_obs, ).
log_ls: (float32) length scale parameter.
kern_func: (function) kernel function for the gaussian process.
Default to rbf.
"""
self.model_name = "Monotonic Gaussian Process"
self.param_names = ("gp", "log_sigma",)
self.sample_names = ("gp", "log_sigma",
"noise", "cdf")
# initiate parameter dictionaries.
super().__init__(self.param_names, self.sample_names)
# data handling
self.X = X
self.X_induce = X_induce
self.y = y
self.perc_eval = cdf_sample_induce["perc_eval"]
self.quant_val = cdf_sample_induce["quantile"]
self.ls = tf.exp(log_ls)
self.kern_func = kern_func
self.activation = activation
# record statistics
self.n_obs, self.n_dim = self.X.shape
self.n_obs_induce, n_dim_induce = self.X_induce.shape
self.n_eval = len(self.perc_eval)
self.n_cdf_obs = self.n_eval * self.n_obs_induce
self.param_dims = {"gp": (self.n_cdf_obs,),
"log_sigma": ()}
# check data
Ny = self.y.size
if self.n_obs != Ny:
raise ValueError("Sample sizes in X ({}) and "
"y ({}) not equal".format(self.n_obs, Ny))
if self.n_dim != n_dim_induce:
raise ValueError("Dimension in X ({}) and "
"X_induce ({}) not equal".format(self.n_dim, n_dim_induce))
# make model and empirical cdfs, shape (n_eval*n_obs, ...)
self.model_cdf, self.cdf_feature = (
self._make_cdf_features(self.perc_eval, self.X_induce))
self.empir_cdf = self._make_cdf_labels(bandwidth=_DEFAULT_CDF_LABEL_BANDWIDTH)
# initiate a zero-mean GP.
self.gp_model = model.GaussianProcess(X=self.cdf_feature,
y=self.empir_cdf,
log_ls=log_ls,
kern_func=kern_func)
self.outcome_obs = self.empir_cdf
def definition(self, **resid_kwargs):
"""Defines Gaussian process with identity mean function.
Args:
**resid_kwargs: Keyword arguments for GaussianProcess model
definition.
Returns:
(ed.RandomVariable) outcome random variable.
"""
# specify identity mean function
mean_func = self.model_cdf
# specify residual function
gp = self.gp_model.definition(gp_only=True,
name="gp",
**resid_kwargs)
# specify observational noise
sigma = ed.Normal(loc=_LOG_NOISE_PRIOR_MEAN,
scale=_LOG_NOISE_PRIOR_SDEV, name="log_sigma")
# specify outcome
cdf_mean = mean_func + gp
if self.activation:
cdf_mean = self.activation(cdf_mean)
cdf = ed.MultivariateNormalDiag(loc=cdf_mean,
scale_identity_multiplier=tf.exp(sigma),
name="cdf")
return cdf
def likelihood(self, outcome_rv, outcome_value,
cdf_constraint=False,
constraint_penalty=_CDF_PENALTY_SCALE):
"""Returns tensor of constrained model likelihood.
Adds Probit range constraints to Gaussian process in log likelihood.
Note:
Currently cdf_constraint will over-constraint CDF-estimate to be
away from 0 and 1. More research needed.
Args:
outcome_rv: (ed.RandomVariable) A random variable representing model outcome.
outcome_value: (np.ndarray) Values of the training data.
cdf_constraint: (bool) Whether to constraint cdf.
constraint_penalty: (float) Penalty factor for likelihood constraints.
Returns:
(tf.Tensor) A tf.Tensor representing likelihood values to be optimized.
"""
log_penalties = 0.
if cdf_constraint:
# construct penalties
cdf_ge_zero = tfd.Normal(
loc=_CDF_PENALTY_MEAN_SHIFT,
scale=constraint_penalty).log_cdf(outcome_rv)
cdf_le_one = tfd.Normal(
loc=_CDF_PENALTY_MEAN_SHIFT,
scale=constraint_penalty).log_cdf(1 - outcome_rv)
log_penalties = [cdf_ge_zero, cdf_le_one]
log_penalties = tf.reduce_mean(log_penalties)
# define likelihood
log_likehood = outcome_rv.distribution.log_prob(outcome_value)
return log_likehood + log_penalties
def variational_family(self, **resid_kwargs):
"""Defines variational family and parameters.
Args:
**resid_kwargs: Keyword arguments for GaussianProcess model's
variational family.
"""
param_dict_all = dict()
for param_name, param_dim in self.param_dims.items():
if param_name == "gp":
continue
param_dict_all[param_name] = (
model_util.normal_variational_family(shape=param_dim,
name=param_name))
# compile rv and param dicts
self.model_param, self.vi_param = model_util.make_param_dict(param_dict_all)
# Add vi family for resid_gp
gp_model_param, gp_vi_param = (
self.gp_model.variational_family(**resid_kwargs,
name="gp",
return_vi_param=True, )
)
self.model_param.update(gp_model_param)
self.vi_param.update(gp_vi_param)
return self.model_param
def posterior_sample(self, rv_dict, n_sample):
"""Sample posterior distribution for training sample.
Args:
rv_dict: (dict of RandomVariable) Dictionary of RandomVariables
following same structure as self.model_param
n_sample: (int) Number of samples.
Returns:
(dict of tf.Tensor) A dictionary of tf.Tensor representing
sampled values, shape (n_sample, n_cdf_obs)
"""
post_sample_dict = dict()
# fill in parameter samples, shape (n_sample, param_dims)
for param_name in self.param_names:
post_sample_dict[param_name] = (
rv_dict[param_name].distribution.sample(n_sample))
# add mean_func, shape (n_sample, n_cdf_obs)
post_sample_dict["mean_func"] = tf.tile(
tf.expand_dims(self.model_cdf, 0),
multiples=[n_sample, 1]
)
# make noise, shape (n_sample, n_cdf_obs)
post_sample_dict["noise"] = model_util.sample_noise_using_sigma(
log_sigma_sample=post_sample_dict["log_sigma"],
n_obs=self.n_cdf_obs)
# make cdf prediction, shape (n_sample, n_cdf_obs)
post_sample_dict["y_eval"] = tf.convert_to_tensor(
self.quant_val, dtype=dtype_util.TF_DTYPE)
cdf_mean = post_sample_dict["mean_func"] + post_sample_dict["gp"]
if self.activation:
cdf_mean = self.activation(cdf_mean)
post_sample_dict["cdf"] = cdf_mean + post_sample_dict["noise"]
post_sample_dict["cdf_orig"] = tf.reshape(self.model_cdf,
shape=(self.n_eval, self.n_obs_induce))
return post_sample_dict
def predictive_sample(self,
X_new, quant_dict_new,
post_sample_dict,
reshape=True,
verbose=False,
**resid_kwargs):
"""Samples new observations.
Args:
X_new: (np.ndarray) New observations of shape (n_obs_new, n_dim).
quant_dict_new: (dict) Dictionary of cdf values at prediction locations,
contains:
`perc_eval`: Number of eval locations (n_eval_new, )
`quantile`: (n_eval_new, n_obs_new).
post_sample_dict: (dict of np.ndarray) Dictionary of sampled values.
following same format as output of posterior_sample.
**resid_kwargs: Keyword arguments to pass to
GaussianProcess.predictive_sample
Returns:
(dict of tf.Tensor) Dictionary of predictive cdf samples,
with keys containing those in self.sample_names.
Specifically, pred_sample_dict["cdf"] is of shape
(n_sample, n_eval_new, n_obs_new, )
"""
# prepare predictive features, shape (n_cdf_obs_new, 1 + n_dim)
# where n_cdf_obs_new = n_eval_new * n_obs_new
cdf_val_new = quant_dict_new["perc_eval"]
y_eval_new = quant_dict_new["quantile"]
n_sample, n_cdf_obs = post_sample_dict["cdf"].shape
n_eval_new, n_obs_new = y_eval_new.shape
n_obs_X_new, n_dim_X_new = X_new.shape
n_cdf_obs_new = n_eval_new * n_obs_new
if n_dim_X_new != self.n_dim:
raise ValueError(
"Feature dimension in X_new ({}) and "
"model dimension ({}) not equal!".format(n_dim_X_new, self.n_dim))
if n_obs_X_new != n_obs_new:
raise ValueError(
"Sample size in X_new ({}) and "
"quant_dict_new ({}) not equal!".format(n_obs_X_new, n_obs_new))
model_cdf_new, cdf_feature_new = (
self._make_cdf_features(cdf_val_new, X_new))
# prepare prediction dictionary, all shape (n_sample, n_cdf_obs_new)
pred_sample_dict = dict()
# make gp prediction (looping over chunks to avoid large matrix computation)
gp_pred_list = []
gp_feature_iter = np.split(cdf_feature_new, n_eval_new)
if verbose:
gp_feature_iter = tqdm.tqdm(gp_feature_iter)
for cdf_feature_chunk in gp_feature_iter:
pred_sample_chunk = (
self.gp_model.predictive_sample(
cdf_feature_chunk,
f_sample=post_sample_dict["gp"],
return_dict=False,
**resid_kwargs))
gp_pred_list.append(pred_sample_chunk)
pred_sample_dict["gp"] = tf.concat(gp_pred_list, axis=-1)
# make mean_func prediction
pred_sample_dict["mean_func"] = tf.tile(
tf.expand_dims(model_cdf_new, 0),
multiples=[n_sample, 1]
)
# make noise prediction.
pred_sample_dict["log_sigma"] = (
tf.convert_to_tensor(post_sample_dict["log_sigma"],
dtype=dtype_util.TF_DTYPE))
pred_sample_dict["noise"] = model_util.sample_noise_using_sigma(
log_sigma_sample=post_sample_dict["log_sigma"],
n_obs=n_cdf_obs_new)
# make cdf prediction, shape (n_sample, n_cdf_obs_new)
pred_sample_dict["y_eval"] = tf.convert_to_tensor(
y_eval_new, dtype=dtype_util.TF_DTYPE)
cdf_mean = pred_sample_dict["mean_func"] + pred_sample_dict["gp"]
if self.activation:
cdf_mean = self.activation(cdf_mean)
pred_sample_dict["cdf"] = cdf_mean + pred_sample_dict["noise"]
if reshape:
# reshape cdf to (n_sample, n_eval_new, n_obs_new)
pred_sample_dict["cdf"] = tf.reshape(
pred_sample_dict["cdf"],
shape=(n_sample, n_eval_new, n_obs_new))
# make original cdf prediction, shape (n_eval_new, n_obs_new)
pred_sample_dict["cdf_orig"] = tf.reshape(model_cdf_new,
shape=(n_eval_new, n_obs_new))
return pred_sample_dict
@staticmethod
def _make_cdf_features(cdf_val, X, flatten=True):
"""Produces CDF features [F(y|X), X].
Outputs an array [F(y|x), x] of shape (n_eval * n_obs, 1 + n_dim).
Args:
flatten: (bool) Whether to flatten output.
Returns:
cdf_feature (np.ndarray) CDF feature only, shape (n_eval * n_obs, )
feature_all (np.ndarray) CDF and original input features of shape (n_eval * n_obs, 1 + n_dim).
"""
return _join_cdf_and_feature(cdf_array=cdf_val,
feature_array=X,
flatten=flatten)
def _make_cdf_labels(self, flatten=True, bandwidth=0.1):
"""Makes empirical cdf I(y < perc_eval).
Args:
flatten: (bool) Whether to flatten final array.
Returns:
(n_eval, n_obs) Evaluated empirical cdf.
"""
return _make_empirical_cdf(y_eval=self.quant_val,
y_obs=self.y,
X_obs=self.X,
X_induce=self.X_induce,
flatten=flatten,
bandwidth=bandwidth)
def _join_cdf_and_feature(cdf_array, feature_array, flatten=True):
"""Produces CDF features [F(y|feature_array), feature_array].
Outputs an array [F(y|x), x] of shape (n_eval * n_obs, 1 + n_dim).
Args:
cdf_array: (np.ndarray) CDF values of shape (n_eval, ).
feature_array: (np.ndarray) Features of shape (n_obs, n_dim).
flatten: (bool) Whether to flatten output.
Returns:
cdf_feature (np.ndarray) CDF feature only, shape (n_eval * n_obs, )
feature_all (np.ndarray) CDF and original input features of shape (n_eval * n_obs, 1 + n_dim).
"""
n_eval, = cdf_array.shape
n_obs, n_dim = feature_array.shape
# repeat
cdf_feature = np.tile(np.reshape(cdf_array, [cdf_array.size, 1, 1]),
(1, n_obs, 1)) # shape (n_eval, n_obs, 1)
X_feature = np.tile(np.expand_dims(feature_array, 0),
(n_eval, 1, 1)) # shape (n_eval, n_obs, n_dim)
# assemble features to wide format, shape (n_eval, n_obs, 1 + n_dim)
feature_all = np.concatenate([cdf_feature, X_feature], axis=-1)
# convert features to long format, shape (n_eval * n_obs, 1 + n_dim)
if flatten:
feature_all = feature_all.reshape(n_eval * n_obs, 1 + n_dim)
feature_cdf = feature_all[..., 0]
return feature_cdf, feature_all
def _make_empirical_cdf(y_eval, y_obs, X_obs,
X_induce=None,
flatten=True, bandwidth=0.1):
"""Makes empirical cdf I(y < perc_eval).
Args:
y_eval: (np.ndarray of float32) y locations where CDF
are evaluated, shape (n_eval, n_obs_induce).
y_obs: (np.ndarray of float32) Training labels, shape (n_obs, ).
X_obs: (np.ndarray of float32) Training features, shape (n_obs, n_dim).
X_induce: (np.ndarray of float32) Inducing features, shape (n_obs_induce, n_dim).
Returns:
(np.ndarray) Evaluated empirical cdf,
shape (n_eval, n_obs_induce) if flatten = False,
or (n_eval * n_obs_induce, ) if flatten = True
"""
# TODO(jereliu): check this
if X_induce is None:
X_induce = X_obs
# reshape input for broadcasting
n_eval, n_obs_induce = y_eval.shape
n_obs, = y_obs.shape
n_obs_X, n_dim = X_obs.shape
n_obs_induce_X, n_dim_induce = X_induce.shape
if n_obs_induce != n_obs_induce_X:
raise ValueError("Different sample size in y_eval ({}) "
"and X_induce ({})".format(n_obs_induce, n_obs_induce_X))
if n_obs != n_obs_X:
raise ValueError("Different sample size in y_obs ({}) "
"and X_obs ({})".format(n_obs, n_obs_X))
if n_dim != n_dim_induce:
raise ValueError("Different dimension in X_obs ({}) "
"and X_induce ({})".format(n_dim, n_dim_induce))
# make comparison matrix, shape (n_eval, n_obs_induce, n_obs)
y_obs = y_obs[None, None, :] # shape (1, 1, n_obs)
y_eval = y_eval[:, :, None] # shape (n_eval, n_obs_induce, 1)
comp_array = (y_obs < y_eval).astype(dtype_util.NP_DTYPE) # shape (n_eval, n_obs_induce, n_obs)
# make mask, shape (n_eval, n_obs_induce, n_obs)
thres_mask = model_util.make_distance_mask(X_induce, X_obs,
threshold=bandwidth)
thres_mask = np.tile(
|
np.expand_dims(thres_mask, 0)
|
numpy.expand_dims
|
"""
SANS reduction steps
====================
Set of reduction steps for SANS reduction.
"""
from __future__ import print_function
from posixpath import basename, join
from copy import copy, deepcopy
from io import BytesIO
from collections import OrderedDict
import numpy as np
from dataflow.lib.uncertainty import Uncertainty
from dataflow.lib import uncertainty
from .sansdata import RawSANSData, SansData, Sans1dData, SansIQData, Parameters
from .sans_vaxformat import readNCNRSensitivity
from vsansred.steps import _s, _b
ALL_ACTIONS = []
IGNORE_CORNER_PIXELS = True
def cache(action):
"""
Decorator which adds the *cached* attribute to the function.
Use *@cache* to force caching to always occur (for example, when
the function references remote resources, vastly reduces memory, or is
expensive to compute. Use *@nocache* when debugging a function
so that it will be recomputed each time regardless of whether or not it
is seen again.
"""
action.cached = True
return action
def nocache(action):
"""
Decorator which adds the *cached* attribute to the function.
Use *@cache* to force caching to always occur (for example, when
the function references remote resources, vastly reduces memory, or is
expensive to compute. Use *@nocache* when debugging a function
so that it will be recomputed each time regardless of whether or not it
is seen again.
"""
action.cached = False
return action
def module(action):
"""
Decorator which records the action in *ALL_ACTIONS*.
This just collects the action, it does not otherwise modify it.
"""
ALL_ACTIONS.append(action)
# This is a decorator, so return the original function
return action
#################
# Loader stuff
#################
@cache
@module
def LoadDIV(filelist=None, variance=0.0001):
"""
loads a DIV file (VAX format) into a SansData obj and returns that.
**Inputs**
filelist (fileinfo[]): Files to open.
variance (float): Target variance of DIV measurement (default 0.0001, i.e. 1% error)
**Returns**
output (sans2d[]): all the entries loaded.
2018-04-21 <NAME>
"""
from dataflow.fetch import url_get
from .sans_vaxformat import readNCNRSensitivity
output = []
if filelist is not None:
for fileinfo in filelist:
path, mtime, entries = fileinfo['path'], fileinfo.get('mtime', None), fileinfo.get('entries', None)
name = basename(path)
fid = BytesIO(url_get(fileinfo, mtime_check=False))
sens_raw = readNCNRSensitivity(fid)
sens = SansData(Uncertainty(sens_raw, sens_raw * variance))
sens.metadata = OrderedDict([
("run.filename", name),
("analysis.groupid", -1),
("analysis.intent", "DIV"),
("analysis.filepurpose", "Sensitivity"),
("run.experimentScanID", name),
("sample.description", "PLEX"),
("entry", "entry"),
("sample.labl", "PLEX"),
("run.configuration", "DIV"),
])
output.append(sens)
return output
@cache
@module
def LoadRawSANS(filelist=None, check_timestamps=True):
"""
loads a data file into a RawSansData obj and returns that.
**Inputs**
filelist (fileinfo[]): Files to open.
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (raw[]): all the entries loaded.
2018-04-23 <NAME>
"""
from dataflow.fetch import url_get
from .loader import readSANSNexuz
if filelist is None:
filelist = []
data = []
for fileinfo in filelist:
path, mtime, entries = fileinfo['path'], fileinfo.get('mtime', None), fileinfo.get('entries', None)
name = basename(path)
fid = BytesIO(url_get(fileinfo, mtime_check=check_timestamps))
if name.upper().endswith(".DIV"):
sens_raw = readNCNRSensitivity(fid)
detectors = [{"detector": {"data": {"value": Uncertainty(sens_raw, sens_raw * 0.0001)}}}]
metadata = OrderedDict([
("run.filename", name),
("analysis.groupid", -1),
("analysis.intent", "DIV"),
("analysis.filepurpose", "Sensitivity"),
("run.experimentScanID", name),
("sample.description", "PLEX"),
("entry", "entry"),
("sample.labl", "PLEX"),
("run.configuration", "DIV"),
])
sens = RawSANSData(metadata=metadata, detectors=detectors)
entries = [sens]
else:
entries = readSANSNexuz(name, fid)
data.extend(entries)
return data
@cache
@module
def patch(data, patches=None):
"""
loads a data file into a VSansData obj and returns that.
**Inputs**
data (raw[]): datafiles with metadata to patch
patches (patch_metadata[]:run.filename): patches to be applied, with run.filename used as unique key
**Returns**
patched (raw[]): datafiles with patched metadata
2019-07-26 <NAME>
"""
if patches is None:
return data
from jsonpatch import JsonPatch
# make a master dict of metadata from provided key:
key="run.filename"
master = OrderedDict([(_s(d.metadata[key]), d.metadata) for d in data])
to_apply = JsonPatch(patches)
to_apply.apply(master, in_place=True)
return data
@cache
@module
def autosort(rawdata, subsort="det.des_dis", add_scattering=True):
"""
redirects a batch of files to different outputs based on metadata in the files
**Inputs**
rawdata (raw[]): datafiles with metadata to allow sorting
subsort (str): key on which to order subitems within output lists
add_scattering {Add sample scatterings together} (bool): Add sample scatterings, within
group defined by subsort key
**Returns**
sample_scatt (sans2d[]): Sample Scattering
blocked_beam (sans2d[]): Blocked Beam
empty_scatt (sans2d[]): Empty Cell Scattering
sample_trans (sans2d[]): Sample Transmission
empty_trans (sans2d[]): Empty Cell Transmission
2019-07-24 <NAME>
"""
sample_scatt = []
blocked_beam = []
empty_scatt = []
sample_trans = []
empty_trans = []
print(rawdata)
for r in rawdata:
purpose = _s(r.metadata['analysis.filepurpose'])
intent = _s(r.metadata['analysis.intent'])
if intent.lower().strip().startswith('blo'):
blocked_beam.extend(to_sansdata(r))
elif purpose.lower() == 'scattering' and intent.lower() == 'sample':
sample_scatt.extend(to_sansdata(r))
elif purpose.lower() == 'scattering' and intent.lower().startswith('empty'):
empty_scatt.extend(to_sansdata(r))
elif purpose.lower() == 'transmission' and intent.lower() == 'sample':
sample_trans.extend(to_sansdata(r))
elif purpose.lower() == 'transmission' and intent.lower().startswith('empty'):
empty_trans.extend(to_sansdata(r))
def keyFunc(l):
return l.metadata.get(subsort, 0)
for output in [sample_scatt, blocked_beam, empty_scatt, sample_trans, empty_trans]:
output.sort(key=keyFunc)
if add_scattering:
added_samples = OrderedDict()
for s in sample_scatt:
key = keyFunc(s)
added_samples.setdefault(key, [])
added_samples[key].append(s)
for key in added_samples:
added_samples[key] = addSimple(added_samples[key])
sample_scatt = list(added_samples.values())
return sample_scatt, blocked_beam, empty_scatt, sample_trans, empty_trans
@cache
@module
def LoadSANS(filelist=None, flip=False, transpose=False, check_timestamps=True):
"""
loads a data file into a SansData obj and returns that.
Checks to see if data being loaded is 2D; if not, quits
**Inputs**
filelist (fileinfo[]): Files to open.
flip (bool): flip the data up and down
transpose (bool): transpose the data
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (raw[]): all the entries loaded.
| 2019-07-26 <NAME>
| 2019-08-09 <NAME> adding new stripped sample description
"""
rawdata = LoadRawSANS(filelist, check_timestamps=check_timestamps)
sansdata = []
for r in rawdata:
sansdata.extend(to_sansdata(r, flip=flip, transpose=transpose))
return sansdata
def to_sansdata(rawdata, flip=False, transpose=False):
areaDetector = rawdata.detectors['detector']['data']['value']
shape = areaDetector.shape
if len(shape) < 2 or len(shape) > 3:
raise ValueError("areaDetector data must have dimension 2 or 3")
return
if len(shape) == 2:
# add another dimension at the front
shape = (1,) + shape
areaDetector = areaDetector.reshape(shape)
datasets = []
for i in range(shape[0]):
subset = areaDetector[i].copy()
if flip:
subset = np.fliplr(subset)
if transpose:
subset = subset.T
datasets.append(SansData(data=subset, metadata=rawdata.metadata))
return datasets
"""
Variable vz_1 = 3.956e5 //velocity [cm/s] of 1 A neutron
Variable g = 981.0 //gravity acceleration [cm/s^2]
Variable m_h = 252.8 // m/h [=] s/cm^2
//// //
Variable yg_d,acc,sdd,ssd,lambda0,DL_L,sig_l
Variable var_qlx,var_qly,var_ql,qx,qy,sig_perp,sig_para, sig_para_new
G = 981. //! ACCELERATION OF GRAVITY, CM/SEC^2
acc = vz_1 // 3.956E5 //! CONVERT WAVELENGTH TO VELOCITY CM/SEC
SDD = L2 //1317
SSD = L1 //1627 //cm
lambda0 = lambda // 15
DL_L = lambdaWidth //0.236
SIG_L = DL_L/sqrt(6)
YG_d = -0.5*G*SDD*(SSD+SDD)*(LAMBDA0/acc)^2
///// Print "DISTANCE BEAM FALLS DUE TO GRAVITY (CM) = ",YG
// Print "Gravity q* = ",-2*pi/lambda0*2*yg_d/sdd
sig_perp = kap*kap/12 * (3*(S1/L1)^2 + 3*(S2/LP)^2 + (proj_DDet/L2)^2)
sig_perp = sqrt(sig_perp)
FindQxQy(inQ,phi,qx,qy)
// missing a factor of 2 here, and the form is different than the paper, so re-write
// VAR_QLY = SIG_L^2 * (QY+4*PI*YG_d/(2*SDD*LAMBDA0))^2
// VAR_QLX = (SIG_L*QX)^2
// VAR_QL = VAR_QLY + VAR_QLX //! WAVELENGTH CONTRIBUTION TO VARIANCE
// sig_para = (sig_perp^2 + VAR_QL)^0.5
// r_dist is passed in, [=]cm
// from the paper
a_val = 0.5*G*SDD*(SSD+SDD)*m_h^2 * 1e-16 //units now are cm /(A^2)
r_dist = sqrt( (pixSize*((p+1)-xctr))^2 + (pixSize*((q+1)-yctr)+(2)*yg_d)^2 ) //radial distance from ctr to pt
var_QL = 1/6*(kap/SDD)^2*(DL_L)^2*(r_dist^2 - 4*r_dist*a_val*lambda0^2*sin(phi) + 4*a_val^2*lambda0^4)
sig_para_new = (sig_perp^2 + VAR_QL)^0.5
///// return values PBR
SigmaQX = sig_para_new
SigmaQy = sig_perp
////
results = "success"
Return results
End
"""
#@module
def draw_ellipse(data, ellipse=[0,0,0.01,0.01]):
"""
draw an ellipse on the data
**Inputs**
data (sans2d): data in
ellipse (range:ellipse): ellipse to draw
**Returns**
output (sans2d): the same data
2019-07-31 <NAME>
"""
return data
#@cache
#@module
def calculateDQ(data):
"""
Add the dQ column to the data, based on slit apertures and gravity
r_dist is the real-space distance from ctr of detector to QxQy pixel location
From `NCNR_Utils.ipf` (<NAME>) in which the math is in turn from:
| <NAME>, <NAME> & <NAME>. Appl. Cryst. (2011). 44, 1127-1129.
| *The effect of gravity on the resolution of small-angle neutron diffraction peaks*
| [ doi:10.1107/S0021889811033322 ]
**Inputs**
data (sans2d): data in
**Returns**
output (sans2d): data in with dQ column filled in
2017-06-16 <NAME>
"""
G = 981. #! ACCELERATION OF GRAVITY, CM/SEC^2
acc = vz_1 = 3.956e5 # velocity [cm/s] of 1 A neutron
m_h = 252.8 # m/h [=] s/cm^2
# the detector pixel is square, so correct for phi
DDetX = data.metadata["det.pixelsizex"]
DDetY = data.metadata["det.pixelsizey"]
xctr = data.metadata["det.beamx"]
yctr = data.metadata["det.beamy"]
shape = data.data.x.shape
x, y = np.indices(shape) + 1.0 # detector indexing starts at 1...
X = DDetX * (x-xctr)
Y = DDetY * (y-yctr)
sampleOff = data.metadata["sample.position"]
apOff = data.metadata["resolution.ap2Off"]
S1 = data.metadata["resolution.ap1"] / 2.0 # use radius
S2 = data.metadata["resolution.ap2"] / 2.0 # use radius
L1 = data.metadata["resolution.ap12dis"]
L2 = data.metadata["det.dis"] + sampleOff + apOff
LP = 1.0/( 1.0/L1 + 1.0/L2)
SDD = L2
SSD = L1
lambda0 = data.metadata["resolution.lmda"] # 15
DL_L = data.metadata["resolution.dlmda"] # 0.236
YG_d = -0.5*G*SDD*(SSD+SDD)*(lambda0/acc)**2
kap = 2.0*np.pi/lambda0
phi = np.mod(np.arctan2(Y + 2.0*YG_d, X), 2.0*np.pi) # from x-axis, from 0 to 2PI
proj_DDet = np.abs(DDetX*np.cos(phi)) + np.abs(DDetY*np.sin(phi))
r_dist = np.sqrt(X**2 + (Y + 2.0*YG_d)**2) #radial distance from ctr to pt
sig_perp = kap*kap/12.0 * (3.0*(S1/L1)**2 + 3.0*(S2/LP)**2 + (proj_DDet/L2)**2)
sig_perp = np.sqrt(sig_perp)
a_val = 0.5*G*SDD*(SSD+SDD)*m_h**2 * 1e-16 # units now are cm /(A^2)
var_QL = 1.0/6.0*((kap/SDD)**2)*(DL_L**2)*(r_dist**2 - 4.0*r_dist*a_val*(lambda0**2)*np.sin(phi) + 4.0*(a_val**2)*(lambda0**4))
sig_para_new = np.sqrt(sig_perp**2 + var_QL)
data.dq_perp = sig_perp
data.dq_para = sig_para_new
return data
def calculateMeanQ(data):
""" calculate the overlap of the beamstop with the pixel """
from scipy.special import erf
BS = data.metadata['det.bstop'] / 2.0 # diameter to radius, already in cm
DDetX = data.metadata["det.pixelsizex"]
DDetY = data.metadata["det.pixelsizey"]
sampleOff = data.metadata["sample.position"]
apOff = data.metadata["resolution.ap2Off"]
wavelength = data.metadata['resolution.lmda']
L1 = data.metadata["resolution.ap12dis"]
L2 = data.metadata["det.dis"] + sampleOff + apOff
LB = 20.1 + 1.61*BS # empirical formula from NCNR_Utils.ipf, line 123 in "getResolution"
BS_prime = BS + (BS * LB / (L2 - LB)) # adding triangular shadow from LB to L2
r0 = data.r
r0_mean = r0.copy()
# width of the resolution function, on average
# could be corrected for phi if taking into account non-square pixels...
v_d = ((DDetX + DDetY) / (2.0 * np.sqrt(np.log(256.0))))**2
# cutoff_weight ~ integral[-inf, r0-BS_prime]1/sqrt(2*pi*dq**2) * exp(-r**2 / (2*dq**2))
# = 0.5 * (1.0 + erf((r0 - BS_prime)/(2.0 * dq)))
shadow_factor = 0.5 * (1.0 + erf((r0 - BS_prime) / np.sqrt(2.0 * v_d)))
shadow_factor[shadow_factor<1e-16] = 1e-16
#inside_mask = (r0 <= BS_prime)
#outside_mask = np.logical_not(inside_mask)
# inside the beamstop, the center of mass of the distribution is displaced by
# the center of the cutoff tail (relative to r0) on the high side, approx.
# cutoff_weighted_integral ~ integral[BS_prime - r0, inf] 1/sqrt(2*pi*dq**2) * r * exp(-r**2 / (2*dq**2))
# = 1.0/sqrt(2*pi*dq**2) * 1.0/dq**2 * exp(-(BS_prime - r0)**2 / (2 * dq**2))
#
# then new_center = r0 + cutoff_weighted_integral / cutoff_integral
# but the cutoff_integral = shadow_factor, so :
#
# cutoff_weighted_integral_inside = 1.0/(np.sqrt(2.0 * np.pi) * dq[inside_mask]**3) * np.exp(-(BS_prime - r0[inside_mask])**2 / (2 * dq[inside_mask]**2))
# cutoff_center_inside = cutoff_weighted_integral_inside / shadow_factor[inside_mask]
# r0_mean[inside_mask] += cutoff_center_inside
# outside the beamstop, the center of mass of the distribution is displaced by the center
# of what is left after subtracting the cutoff tail, but the weighted sum of
# cutoff_center * cutoff_integral + remainder_center * remainder_integral == 0!
# (equivalent to saying cutoff_weighted_integral + remainder_weighted_integral = 0)
# and also we know that cutoff_integral + remainder_integral = 1 (normalized gaussian)
# cutoff_weighted_integral ~ integral[-inf, r0-BS_prime] 1/sqrt(2*pi*dq**2) r exp(-r**2 / (2*dq**2))
# = -1.0/sqrt(2*pi*dq**2) * 1.0/dq**2 * exp(-(r0 - BS_prime)**2 / (2 * dq**2))
# remainder_weighted_integral = -(cutoff_weighted_integral)
#
# remainder_center = remainder_weighted_integral / remainder_integral
# = remainder_weighted_integral / (1 - cutoff_integral)
# = -cutoff_weighted_integral / (1 - cutoff_integral)
# then new_center * = r0 - cutoff_weighted_integral / shadow_factor
# but the cutoff_weight = shadow_factor and total_weight = 1.0, so:
#
## cutoff_weighted_integral_outside = -1.0/(np.sqrt(2.0 * np.pi) * dq[outside_mask]**3) * np.exp(-(r0[outside_mask] - BS_prime)**2 / (2 * dq[outside_mask]**2))
# but noticing that the expression for cutoff_weighted_integral_inside is the same numerically
# (swapping positions of r0 and BS_prime has no effect) then this gets easier:
cutoff_weighted_integral = np.sqrt(v_d / (2.0 * np.pi)) * np.exp(-(r0 - BS_prime)**2 / (2 * v_d))
r0_mean += cutoff_weighted_integral / shadow_factor
meanTheta = np.arctan2(r0_mean, L2)/2.0 #remember to convert L2 to cm from meters
data.meanQ = (4*np.pi/wavelength)*np.sin(meanTheta)
# TODO: shadow factor is calculated, but shouldn't the normalization to solid angle
# include the reduction from the shadow factor? This will greatly increase the intensity
# of pixels near or below the beam stop!
data.shadow_factor = shadow_factor
return data
def calculateDQ_IGOR(data, inQ, del_r=None):
"""
Add the dQ column to the data, based on slit apertures and gravity
r_dist is the real-space distance from ctr of detector to QxQy pixel location
From `NCNR_Utils.ipf` (<NAME>) in which the math is in turn from:
| <NAME>, <NAME> & <NAME> J. Appl. Cryst. (2011). 44, 1127-1129.
| *The effect of gravity on the resolution of small-angle neutron diffraction peaks*
| [ doi:10.1107/S0021889811033322 ]
| J. Appl. Cryst. (1995). 28, 105-114
| https://doi.org/10.1107/S0021889894010095 (Cited by 90)
| Instrumental Smearing Effects in Radially Symmetric Small-Angle Neutron Scattering by Numerical and Analytical Methods
| <NAME> and <NAME>
**Inputs**
data (sans2d): data in
del_r (float): width of circular slice in realspace (cm)
**Returns**
output (sans2d): data in with dQ column filled in
2017-06-16 <NAME>
"""
from scipy.special import gammaln, gammainc, erf
G = 981. #! ACCELERATION OF GRAVITY, CM/SEC^2
vz_1 = 3.956e5 # velocity [cm/s] of 1 A neutron
# the detector pixel is square, so correct for phi
DDet = data.metadata["det.pixelsizex"]
if del_r is None:
del_r = DDet
apOff = data.metadata["resolution.ap2Off"]
sampleOff = data.metadata["sample.position"]
S1 = data.metadata["resolution.ap1"] * 0.5 # convert to radius, already cm
S2 = data.metadata["resolution.ap2"] * 0.5 # to radius
# no need to subtract apOff below - this is done in device model
# but for comparison with IGOR, leave it in:
L1 = data.metadata["resolution.ap12dis"] - apOff
L2 = data.metadata["det.dis"] + sampleOff + apOff
LP = 1.0/( 1.0/L1 + 1.0/L2)
BS = data.metadata['det.bstop'] / 2.0 # diameter to radius, already in cm
LB = 20.1 + 1.61*BS # empirical formula from NCNR_Utils.ipf, line 123 in "getResolution"
BS_prime = BS + (BS * LB / (L2 - LB)) # adding triangular shadow from LB to L2
lambda0 = data.metadata["resolution.lmda"] # 15
labmdaWidth = data.metadata["resolution.dlmda"] # 0.236
# these are defined in the IGOR code, but never get used therein...
##a2 = S1*L2/L1 + S2*(L1+L2)/L1
##q_small = 2.0*np.pi*(BS_prime-a2)*(1.0-lambdaWidth)/(lambda*L2)
LP = 1.0/( 1.0/L1 + 1.0/L2)
v_lambda = labmdaWidth**2/6.0
if 'LENS' in _s(data.metadata['run.guide'].upper()):
# NOTE: this might need adjustment. Ticket #677 filed in trac to change to:
# v_b = 0.25*(S1*L2/L1)**2 +0.25*(2/3)*(labmdaWidth)**2*(S2*L2/LP)**2
v_b = 0.25*(S1*L2/L1)**2 +0.25*(2/3)*(labmdaWidth/lambda0)**2*(S2*L2/LP)**2 # correction to 2nd term
else:
v_b = 0.25*(S1*L2/L1)**2 +0.25*(S2*L2/LP)**2 # original form
v_d = (DDet/2.3548)**2 + del_r**2/12.0 # the 2.3548 is a conversion from FWHM->Gauss, see https://mathworld.wolfram.com/GaussianFunction.html
vz = vz_1 / lambda0
yg = 0.5*G*L2*(L1+L2)/vz**2
v_g = 2.0*(2.0*yg**2*v_lambda) # factor of 2 correction, <NAME>, 2007
r0 = L2*np.tan(2.0*np.arcsin(lambda0*inQ/(4.0*np.pi) ))
delta = 0.5*(BS_prime - r0)**2/v_d
#if (r0 < BS_prime):
# inc_gamma=np.exp(gammaln(1.5))*(1-gammainc(1.5,delta))
#else:
# inc_gamma=np.exp(gammaln(1.5))*(1+gammainc(1.5,delta))
inc_gamma = np.ones_like(r0)
r0_less = (r0 < BS_prime)
r0_more = (r0 >= BS_prime)
inc_gamma[r0_less] = np.exp(gammaln(1.5))*(1-gammainc(1.5,delta[r0_less]))
inc_gamma[r0_more] = np.exp(gammaln(1.5))*(1+gammainc(1.5,delta[r0_more]))
fSubS = 0.5*(1.0+erf( (r0-BS_prime)/np.sqrt(2.0*v_d) ) )
#if (fSubS <= 0.0):
# fSubS = 1.e-10
fSubS[fSubS <= 0.0] = 1.e-10
fr = 1.0 + np.sqrt(v_d)*np.exp(-1.0*delta) /(r0*fSubS*np.sqrt(2.0*np.pi))
fv = inc_gamma/(fSubS*np.sqrt(np.pi)) - r0**2*(fr-1.0)**2/v_d
rmd = fr*r0
v_r1 = v_b + fv*v_d +v_g
rm = rmd + 0.5*v_r1/rmd
v_r = v_r1 - 0.5*(v_r1/rmd)**2
#if (v_r < 0.0):
# v_r = 0.0
v_r[v_r < 0.0] = 0.0
QBar = (4.0*np.pi/lambda0)*np.sin(0.5*np.arctan(rm/L2))
SigmaQ = QBar*np.sqrt(v_r/rmd**2 + v_lambda)
return QBar, SigmaQ
def _calculate_Q(X, Y, Z, q0):
r = np.sqrt(X**2+Y**2)
theta = np.arctan2(r, Z)/2 #remember to convert Z to cm from meters
q = q0*np.sin(theta)
phi = np.arctan2(Y, X)
qx = q*np.cos(theta)*np.cos(phi)
qy = q*np.cos(theta)*np.sin(phi)
qz = q*np.sin(theta)
return r, theta, q, phi, qx, qy, qz
def FX(xx,sx3,xcenter,sx):
return sx3*np.tan((xx-xcenter)*sx/sx3)
@cache
@module
def PixelsToQ(data, beam_center=[None,None], correct_solid_angle=True):
"""
generate a q_map for sansdata. Each pixel will have 4 values: (qx, qy, q, theta)
**Inputs**
data (sans2d): data in
beam_center {Beam Center Override} (coordinate?): If not blank, will override the beamx and beamy from the datafile.
correct_solid_angle {Correct solid angle} (bool): Apply correction for mapping
curved Ewald sphere to flat detector
**Returns**
output (sans2d): converted to I vs. Qx, Qy
2016-04-17 <NAME>
"""
sampleOffset = data.metadata["sample.position"]
Z = data.metadata["det.dis"] + sampleOffset
beamx_override, beamy_override = beam_center
x0 = beamx_override if beamx_override is not None else data.metadata['det.beamx'] #should be close to 64
y0 = beamy_override if beamy_override is not None else data.metadata['det.beamy'] #should be close to 64
wavelength = data.metadata['resolution.lmda']
q0 = (4*np.pi/wavelength)
shape = data.data.x.shape
x, y = np.indices(shape) + 1.0 # center of first pixel is 1, 1 (Detector indexing)
xcenter, ycenter = [(dd + 1.0)/2.0 for dd in shape] # = 64.5 for 128x128 array
sx = data.metadata['det.pixelsizex'] # cm
sy = data.metadata['det.pixelsizey']
sx3 = 1000.0 # constant, = 10000(mm) = 1000 cm; not in the nexus file for some reason.
sy3 = 1000.0 # (cm) also not in the nexus file
# centers of pixels:
dxbm = sx3*np.tan((x0-xcenter)*sx/sx3)
dybm = sy3*np.tan((y0-ycenter)*sy/sy3)
X = sx3*np.tan((x-xcenter)*sx/sx3) - dxbm # in mm in nexus, but converted by loader
Y = sy3*np.tan((y-ycenter)*sy/sy3) - dybm
r, theta, q, phi, qx, qy, qz = _calculate_Q(X, Y, Z, q0)
if correct_solid_angle:
"""
rad = sqrt(dtdis2 + xd^2 + yd^2)
domega = rad/dtdist
ratio = domega^3
xy = xx[ii]*yy[jj]
data[ii][jj] *= xy*ratio
"""
xx = (np.cos((x-xcenter)*sx/sx3))**2
yy = (np.cos((y-ycenter)*sy/sy3))**2
#data.data.x = data.data.x / (np.cos(theta)**3)
data.data.x = data.data.x * xx * yy / (np.cos(2*theta)**3)
# bin corners:
X_low = sx3*np.tan((x - 0.5 - xcenter)*sx/sx3) - dxbm # in mm in nexus, but converted by loader
X_high = sx3*np.tan((x + 0.5 - xcenter)*sx/sx3) - dxbm # in mm in nexus, but converted by loader
Y_low = sy3*np.tan((y - 0.5 - ycenter)*sy/sy3) - dybm
Y_high = sy3*np.tan((y + 0.5 - ycenter)*sy/sy3) - dybm
r_low, theta_low, q_low, phi_low, qx_low, qy_low, qz_low = _calculate_Q(X_low, Y_low, Z, q0)
r_high, theta_high, q_high, phi_high, qx_high, qy_high, qz_high = _calculate_Q(X_high, Y_high, Z, q0)
res = data.copy()
#Adding res.q
res.q = q
res.qx = qx
res.qy = qy
res.qz = qz
# bin edges:
res.qx_low = qx_low
res.qy_low = qy_low
res.qx_high = qx_high
res.qy_high = qy_high
res.X = X
res.Y = Y
res.Z = Z
res.r = r
res.metadata['det.beamx'] = x0
res.metadata['det.beamy'] = y0
res.qx_min = q0/2.0 * data.metadata['det.pixelsizex']*(0.5 - x0)/ Z
res.qy_min = q0/2.0 * data.metadata['det.pixelsizex']*(0.5 - y0)/ Z
res.qx_max = q0/2.0 * data.metadata['det.pixelsizex']*(128.5 - x0)/ Z
res.qy_max = q0/2.0 * data.metadata['det.pixelsizex']*(128.5 - y0)/ Z
res.xlabel = "Qx (inv. Angstroms)"
res.ylabel = "Qy (inv. Angstroms)"
res.theta = theta
calculateDQ(res)
calculateMeanQ(res)
return res
@cache
@module
def circular_av(data):
"""
Using a circular average, it converts data to 1D (Q vs. I)
**Inputs**
data (sans2d): data in
**Returns**
nominal_output (sans1d): converted to I vs. nominal Q
mean_output (sans1d): converted to I vs. mean Q within integrated region
2016-04-13 <NAME>
"""
from .draw_annulus_aa import annular_mask_antialiased
#annular_mask_antialiased(shape, center, inner_radius, outer_radius,
# background_value=0.0, mask_value=1.0, oversampling=8)
# calculate the change in q that corresponds to a change in pixel of 1
if data.qx is None:
raise ValueError("Q is not defined - convert pixels to Q first")
q_per_pixel = data.qx[1, 0]-data.qx[0, 0] / 1.0
# for now, we'll make the q-bins have the same width as a single pixel
step = q_per_pixel
shape1 = data.data.x.shape
x0 = data.metadata['det.beamx'] # should be close to 64
y0 = data.metadata['det.beamy'] # should be close to 64
sampleOffset = data.metadata["sample.position"]
L2 = data.metadata["det.dis"] + sampleOffset
wavelength = data.metadata['resolution.lmda']
center = (x0, y0)
Qmax = data.q.max()
Q = np.arange(step, Qmax, step) # start at first pixel out.
Q_edges = np.zeros((Q.shape[0] + 1,), dtype="float")
Q_edges[1:] = Q
Q_edges += step/2.0 # get a range from step/2.0 to (Qmax + step/2.0)
r_edges = L2 * np.tan(2.0*np.arcsin(Q_edges * wavelength/(4*np.pi))) / data.metadata['det.pixelsizex']
Q_mean = []
Q_mean_error = []
I = []
I_error = []
dx = np.zeros_like(Q, dtype="float")
for i, qq in enumerate(Q):
# inner radius is the q we're at right now, converted to pixel dimensions:
inner_r = r_edges[i]
# outer radius is the q of the next bin, also converted to pixel dimensions:
outer_r = r_edges[i+1]
#print(i, qq, inner_r, outer_r)
mask = annular_mask_antialiased(shape1, center, inner_r, outer_r)
if IGNORE_CORNER_PIXELS:
mask[0, 0] = mask[-1, 0] = mask[-1, -1] = mask[0, -1] = 0.0
#print("Mask: ", mask)
integrated_q = uncertainty.sum(data.q*mask.T)
integrated_intensity = uncertainty.sum(data.data*mask.T)
#error = getPoissonUncertainty(integrated_intensity)
#error = np.sqrt(integrated_intensity)
mask_sum = np.sum(mask)
if mask_sum > 0.0:
norm_integrated_intensity = integrated_intensity / mask_sum
norm_integrated_q = integrated_q / mask_sum
#error /= mask_sum
else:
norm_integrated_intensity = integrated_intensity
norm_integrated_q = integrated_q
I.append(norm_integrated_intensity.x) # not multiplying by step anymore
I_error.append(norm_integrated_intensity.variance)
Q_mean.append(norm_integrated_q)
Q_mean_error.append(0.0)
I = np.array(I, dtype="float")
I_error = np.array(I_error, dtype="float")
Q_mean = np.array(Q_mean, dtype="float")
Q_mean_error = np.array(Q_mean_error, dtype="float")
nominal_output = Sans1dData(Q, I, dx=dx, dv=I_error, xlabel="Q", vlabel="I",
xunits="inv. A", vunits="neutrons")
nominal_output.metadata = deepcopy(data.metadata)
nominal_output.metadata['extra_label'] = "_circ"
mean_output = Sans1dData(Q_mean, I, dx=Q_mean_error, dv=I_error, xlabel="Q", vlabel="I",
xunits="inv. A", vunits="neutrons")
mean_output.metadata = deepcopy(data.metadata)
mean_output.metadata['extra_label'] = "_circ"
return nominal_output, mean_output
def oversample_2d(input_array, oversampling):
return np.repeat(np.repeat(input_array, oversampling, 0), oversampling, 1)
@nocache
@module
def circular_av_new(data, q_min=None, q_max=None, q_step=None, mask_width=3, dQ_method='none'):
"""
Using a circular average, it converts data to 1D (Q vs. I)
**Inputs**
data (sans2d): data in
q_min (float): minimum Q value for binning (defaults to q_step)
q_max (float): maxiumum Q value for binning (defaults to max of q values in data)
q_step (float): step size for Q bins (defaults to minimum qx step)
mask_width (int): number of pixels to mask around the perimeter of the detector
dQ_method (opt:none|IGOR|statistical) : method for calculating dQ
**Returns**
nominal_output (sans1d): converted to I vs. nominal Q
mean_output (sans1d): converted to I vs. mean Q within integrated region
output (sansIQ): canonical I vs Q output for sans data.
| 2019-01-01 <NAME>
| 2019-09-05 Adding mask_width as a temporary way to handle basic masking
| 2019-12-11 <NAME> adding dQ_method opts
"""
# adding simple width-based mask around the perimeter:
mask = np.zeros_like(data.q, dtype=np.bool)
mask_width = abs(mask_width)
if (mask_width > 0):
mask[mask_width:-mask_width, mask_width:-mask_width] = True
else:
mask[:] = True
# calculate the change in q that corresponds to a change in pixel of 1
if data.qx is None:
raise ValueError("Q is not defined - convert pixels to Q first")
if q_step is None:
q_step = data.qx[1, 0]-data.qx[0, 0] / 1.0
if q_min is None:
q_min = q_step
if q_max is None:
q_max = data.q[mask].max()
q_bins = np.arange(q_min, q_max+q_step, q_step)
Q = (q_bins[:-1] + q_bins[1:])/2.0
oversampling = 3
o_mask = oversample_2d(mask, oversampling)
#o_q = oversample_2d(data.q, oversampling)
o_qxi, o_qyi = np.indices(o_mask.shape)
o_qx_offsets = ((o_qxi % oversampling) + 0.5) / oversampling
o_qy_offsets = ((o_qyi % oversampling) + 0.5) / oversampling
qx_width = oversample_2d(data.qx_high - data.qx_low, oversampling)
qy_width = oversample_2d(data.qy_high - data.qy_low, oversampling)
original_lookups = (np.floor_divide(o_qxi, oversampling), np.floor_divide(o_qyi, oversampling))
o_qx = data.qx_low[original_lookups] + (qx_width * o_qx_offsets)
o_qy = data.qy_low[original_lookups] + (qy_width * o_qy_offsets)
o_qz = oversample_2d(data.qz, oversampling)
o_q = np.sqrt(o_qx**2 + o_qy**2 + o_qz**2)
o_data = oversample_2d(data.data, oversampling) # Uncertainty object...
o_meanQ = oversample_2d(data.meanQ, oversampling)
o_shadow_factor = oversample_2d(data.shadow_factor, oversampling)
o_dq_para = oversample_2d(data.dq_para, oversampling)
# dq = data.dq_para if hasattr(data, 'dqpara') else np.ones_like(data.q) * q_step
I, _bins_used = np.histogram(o_q[o_mask], bins=q_bins, weights=o_data.x[o_mask])
I_norm, _ = np.histogram(o_q[o_mask], bins=q_bins, weights=np.ones_like(o_data.x[o_mask]))
I_var, _ = np.histogram(o_q[o_mask], bins=q_bins, weights=o_data.variance[o_mask])
#Q_ave, _ = np.histogram(o_q[o_mask], bins=q_bins, weights=o_q[o_mask])
#Q_var, _ = np.histogram(data.q, bins=q_bins, weights=data.dq_para**2)
Q_mean, _ = np.histogram(o_q[o_mask], bins=q_bins, weights=o_meanQ[o_mask])
ShadowFactor, _ =
|
np.histogram(o_q[o_mask], bins=q_bins, weights=o_shadow_factor[o_mask])
|
numpy.histogram
|
import numpy as np
def get_WMAE(real, forecasts, first_day, first_eday, last_eday):
print('\nFirst day for WMAE:\t'+ first_eday.strftime('%d.%m.%Y') + '\t(day of the week: ' + str(first_eday.weekday()) + ')' +
'\nLast day for WMAE:\t'+ last_eday.strftime('%d.%m.%Y') + '\t(day of the week: ' + str(last_eday.weekday()) + ')\n')
#returns average WMAE in %, number of weeks
first_index = (first_eday - first_day).days
last_index = (last_eday - first_day).days + 1
real = real[first_index:last_index,:]
forecasts = forecasts[first_index:last_index,:]
WMAE = np.mean(np.abs(real - forecasts), axis = 1, keepdims = True)
WMAE = np.mean(np.reshape(WMAE, (-1,7)), axis = 1, keepdims = True)
Pdash = np.mean(real, axis = 1, keepdims = True)
Pdash = np.mean(np.reshape(Pdash, (-1,7)), axis = 1, keepdims = True)
WMAE =
|
np.mean(WMAE/Pdash)
|
numpy.mean
|
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch import optim
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import higher
from models import GCN
from train import *
class BaseMeta(Module):
def __init__(self, nfeat, hidden_sizes, nclass, nnodes, lambda_, device):
super(BaseMeta, self).__init__()
self.hidden_sizes = hidden_sizes
self.nfeat = nfeat
self.nclass = nclass
self.lambda_ = lambda_
self.device = device
self.gcn = GCN(nfeat=nfeat,
nhid=hidden_sizes[0],
nclass=nclass).to(self.device)
self.nnodes = nnodes
self.adj_changes = Parameter(torch.FloatTensor(nnodes, nnodes)).to(self.device)
self.adj_changes.data.fill_(0)
def filter_potential_singletons(self, modified_adj):
"""
Computes a mask for entries potentially leading to singleton nodes, i.e. one of the two nodes corresponding to
the entry have degree 1 and there is an edge between the two nodes.
Returns
-------
torch.Tensor shape [N, N], float with ones everywhere except the entries of potential singleton nodes,
where the returned tensor has value 0.
"""
degrees = modified_adj.sum(0)
degree_one = (degrees == 1)
resh = degree_one.repeat(modified_adj.shape[0], 1).float()
l_and = resh * modified_adj
logical_and_symmetric = l_and + l_and.t()
flat_mask = 1 - logical_and_symmetric
return flat_mask
def train_surrogate(self, features, edges, labels, train_iters=200):
print('=== training surrogate model to predict unlabled data for self-training')
model = self.gcn
train(model, (features, edges, labels), self.device,
save_path=None, epochs=train_iters)
pre = model(features, edges)
_, pre = torch.max(pre.data, 1)
self.labels_self_training = pre.detach()
model.initialize()
def log_likelihood_constraint(self, modified_adj, ori_adj, ll_cutoff):
"""
Computes a mask for entries that, if the edge corresponding to the entry is added/removed, would lead to the
log likelihood constraint to be violated.
"""
t_d_min = torch.tensor(2.0).to(self.device)
t_possible_edges = np.array(np.triu(np.ones((self.nnodes, self.nnodes)), k=1).nonzero()).T
allowed_mask, current_ratio = likelihood_ratio_filter(t_possible_edges,
modified_adj,
ori_adj, t_d_min,
ll_cutoff)
return allowed_mask, current_ratio
class Metattack(BaseMeta):
def __init__(self, nfeat, hidden_sizes, nclass,
nnodes, lambda_, device):
super(Metattack, self).__init__(nfeat, hidden_sizes, nclass,
nnodes, lambda_, device)
def get_meta_grad(self, features, edges, labels, train_iters):
model = self.gcn
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
with higher.innerloop_ctx(model, optimizer) as (fmodel, diffopt):
for i in range(train_iters):
pre = fmodel(features, edges)
idx = select_index(labels, -1, same=False)
pre, Y = pre[idx], labels[idx]
cost = loss(pre, Y)
diffopt.step(cost)
pre = fmodel(features, edges)
idx = select_index(labels, -1, same=False)
sudo_idx = select_index(labels, -1, same=True)
cost = 0
if self.lambda_ > 0 :
cost =+ self.lambda_ * loss(pre[idx], labels[idx])
if (1-self.lambda_) > 0 :
cost =+ (1-self.lambda_) * loss(pre[sudo_idx], self.labels_self_training[sudo_idx])
return torch.autograd.grad(cost, self.adj_changes, retain_graph=False)[0]
def forward(self, features, ori_adj, labels, perturbations, train_iters, ll_constraint=True, ll_cutoff=0.004):
features, ori_adj, labels = features.to(self.device), ori_adj.to(self.device), labels.to(self.device)
if (1-self.lambda_) > 0 :
self.train_surrogate(features, ori_adj, labels)
for i in tqdm(range(perturbations), desc="Perturbing graph"):
adj_changes_square = self.adj_changes - torch.diag(torch.diag(self.adj_changes, 0))
ind =
|
np.diag_indices(self.adj_changes.shape[0])
|
numpy.diag_indices
|
import math
import os
import numpy as np
import pygame
from gym import spaces
from gym.utils import seeding
from scipy.spatial import distance as ssd
from .._utils import Agent
FPS = 15
class Archea(Agent):
def __init__(self, idx, radius, n_sensors, sensor_range, max_accel, speed_features=True):
self._idx = idx
self._radius = radius
self._n_sensors = n_sensors
self._sensor_range = sensor_range
self._max_accel = max_accel
# Number of observation coordinates from each sensor
self._sensor_obscoord = 5
if speed_features:
self._sensor_obscoord += 3
self._sensor_obs_coord = self._n_sensors * self._sensor_obscoord
self._obs_dim = self._sensor_obs_coord + 2 # +1 for is_colliding_evader, +1 for is_colliding_poison
self._position = None
self._velocity = None
# Generate self._n_sensors angles, evenly spaced from 0 to 2pi
# We generate 1 extra angle and remove it because linspace[0] = 0 = 2pi = linspace[-1]
angles = np.linspace(0., 2. * np.pi, self._n_sensors + 1)[:-1]
# Convert angles to x-y coordinates
sensor_vectors = np.c_[np.cos(angles), np.sin(angles)]
self._sensors = sensor_vectors
@property
def observation_space(self):
return spaces.Box(low=np.float32(-
|
np.sqrt(2)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import scipy.sparse as sp
import h5py
import torch
from torch.utils.data import Dataset
import scanpy as sc
from preprocess import read_dataset, process_normalize
from read_data import pre_processing_single
from sklearn.preprocessing import scale, minmax_scale
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import time
# import umap
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sp
import copy
import random
import seaborn as sns
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
import numpy as np
import torch
#_, term_width = os.popen('stty size', 'r').read().split()
term_width = 80
last_time = time.time()
begin_time = last_time
TOTAL_BAR_LENGTH = 65.
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH * current / total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width - int(TOTAL_BAR_LENGTH) - len(msg) - 3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width - int(TOTAL_BAR_LENGTH / 2) + 2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current + 1, total))
if current < total - 1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600 / 24)
seconds = seconds - days * 3600 * 24
hours = int(seconds / 3600)
seconds = seconds - hours * 3600
minutes = int(seconds / 60)
seconds = seconds - minutes * 60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds * 1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def print_time(f):
"""Decorator of viewing function runtime.
eg:
```py
from print_time import print_time as pt
@pt
def work(...):
print('work is running')
word()
# work is running
# --> RUN TIME: <work> : 2.8371810913085938e-05
```
"""
def fi(*args, **kwargs):
s = time.time()
res = f(*args, **kwargs)
print('--> RUN TIME: <%s> : %s' % (f.__name__, time.time() - s))
return res
return fi
def load_graph(dataset, k=None, n=10, label=None):
import os
graph_path = os.getcwd()
if k:
path = graph_path + '/{}{}_graph.txt'.format(dataset, k)
else:
path =graph_path + '/{}_graph.txt'.format(dataset)
idx = np.array([i for i in range(n)], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(path, dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(n, n), dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = adj + sp.eye(adj.shape[0])
adj = normalize(adj)
adj = sparse_mx_to_torch_sparse_tensor(adj)
import os
print("delete file: ", path)
os.remove(path)
return adj
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[
|
np.isinf(r_inv)
|
numpy.isinf
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from lorenz import Lorenz
import pickle
sigma = 10
b = 8/3
r = 1.5
lrz = Lorenz(sigma, b, r) # initialize lorenz object with given parameters
n_samples = 1000 # set number of training samples
lrz.X, lrz.U =
|
np.zeros((n_samples, 3))
|
numpy.zeros
|
#
# Lincense: Academic Free License (AFL) v3.0
#
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
plt.rc('text', usetex=True)
import tables as tb
import os
# import tqdm
MEAN_HIDDEN=False
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-17+11:39/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-17+21:16/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-23+01:56/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-23+13:05/'
outputdir = 'output/'+'dsc_run_audio.py.2016-04-21+09:50/'
outputdir = 'output/'+'dsc_run_audio.py.d570370/'
# outputdir = 'output/'+'dsc_run_audio.py.d587969/'
ofile = outputdir+'result.h5'
print(outputdir)
fh =tb.open_file(ofile,'r')
sampling_rate=16000.
sigma_all = fh.root.sigma.read().squeeze()
pi_all = fh.root.pi.read().squeeze()
W_all = fh.root.W.read()
H = W_all.shape[-1]
D = W_all.shape[-2]
h5nodes = [f.name for f in fh.root._f_list_nodes()]
rseries=None
if 'rseries' in h5nodes:
rseries = fh.root.rseries.read()
series=None
if 'series' in h5nodes:
series = fh.root.series.read()
channel=None
if 'channel' in h5nodes:
channel = fh.root.channel.read()
overlap=0.
overlap=0.5
if 'overlap' in h5nodes:
overlap = fh.root.overlap.read()[-1]
inf_poster=None
if 'infered_posterior' in h5nodes:
inf_poster = fh.root.infered_posterior.read().squeeze()
inf_states=None
if 'infered_states' in h5nodes:
inf_states = fh.root.infered_states.read().squeeze()
ry=None
if 'ry' in h5nodes:
ry = fh.root.ry.read().squeeze()
rs=None
if 'rs' in h5nodes:
rs = fh.root.rs.read().squeeze()
gamma=None
if 'gamma' in h5nodes:
gamma = fh.root.gamma.read().squeeze()
states=np.array([-2,-1.,0.,1.,2.])
if 'states' in h5nodes:
states = fh.root.states.read().squeeze()
N = fh.root.N.read()[0]
psz = np.int(np.sqrt(D))
fh.close()
epochs = W_all.shape[0]
Eprior = (inf_poster[:,:,None]*(inf_states!=0)).sum(1).mean(0)
EpriorInds = np.argsort(Eprior)[::-1]
def findpeaks(a,inds,max=True):
d=np.diff(a.squeeze())
di=np.diff(inds.squeeze())
p=[]
for i in range(2,d.shape[0]):
if max:
if a[i-2]<a[i-1] and a[i]<a[i-1] and np.all(di[i-2:i]==1):
p.append(i-1)
else:
if a[i-2]>a[i-1] and a[i]>a[i-1] and np.all(di[i-2:i]==1):
p.append(i-1)
p = np.array(p)
if p.shape[0]==0:
return np.array([])
else:
return inds[p]
cscale='local'
if not os.path.exists(outputdir+'montage_images'):
os.mkdir(outputdir+'montage_images')
if not os.path.exists(outputdir+'_images'):
os.mkdir(outputdir+'_images')
if not os.path.exists(outputdir+'reconstructions'):
os.mkdir(outputdir+'reconstructions')
if not os.path.exists(outputdir+'filters'):
os.mkdir(outputdir+'filters')
for e in range(0,epochs)[::-1]:
# minwg = -np.max(np.abs(W_all[e]))
# maxwg = -minwg
minwg = np.min(W_all[e])
maxwg = np.max(W_all[e])
meanw = np.mean(W_all)
if not os.path.exists('{}filters/W_e_{:03}.eps'.format(outputdir, e)):
# if not os.path.exists('{}montage_images/W_e_{:03}.eps'.format(outputdir, e)):
fig=plt.figure(2,(20,30))
# fig = plt.figure(2,figsize=(30,10))
for h in range(H):
# if os.path.exists(outputdir+'_images/'+'W_e_{:03}_h_{:03}.eps'.format(e,h)):
# continue
this_W = W_all[e,:,h]
# this_W=this_W.reshape((psz,psz))
minwl = -np.max(np.abs(this_W)) * 1.1
maxwl = -minwl * 1.1
# minwl = np.min(this_W) * 1.1
# maxwl = np.max(this_W) * 1.1
# meanwl = np.mean(this_W) * 1.1
if cscale == 'global':
maxw, minw = maxwg, minwg
elif cscale == 'local':
maxw, minw = maxwl, minwl
ax = fig.add_subplot(13,8,h+1)
plt.locator_params(nbins=4)
ax.plot(np.linspace(0,D//10,num=D) ,this_W)#scale in kHz
ax.axis([0,D//10,minwg,maxwg],fontsize=16)
# ax.axis([0,D//10,minwl,maxwl],fontsize=16)
# ax.savefig(outputdir+'_images/'+'W_e_{:03}_h_{:03}.jpg'.format(e,h))
ax.set_title("$W_{"+str(h+1)+"}$",fontsize=20)
ax.tick_params(axis='both',labelsize=20)
ax.axis('off')
# ax.clf()
if h%30 == 0 :
print("Finished epoch {:03} basis {:03}".format(e,h))
print("\tPlot settings scale: '{}', min: {}, max: {}, mean:{}".format(cscale,minwg,maxwg,meanw))
# ax.clf()
plt.tight_layout()
fig.savefig(outputdir+'filters/W_e_{:03}.eps'.format(e), bbox_inches = 'tight',dpi=600)
plt.close(fig)
if not os.path.exists('{}montage_images/pi_broken_{:03}.jpg'.format(outputdir, e)):
yl = pi_all[e,np.argsort(pi_all[e,:])[:-1]].sum()*1.25
if yl<1e-3:
continue
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
plt.locator_params(nbins=4)
ax.bar(states,pi_all[e,:],align='center')
plt.locator_params(nbins=4)
ax2.bar(states,pi_all[e,:],align='center')
plt.locator_params(nbins=4)
# print yl,np.argsort(pi_all[e,:])[:-1]
ax.set_ylim(1.-yl ,1.)
ax2.set_ylim(0,yl)
# hide the spines between ax and ax2
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop='off') # don't put tick labels at the top
ax2.xaxis.tick_bottom()
d = 0.015 # how big to make the diagonal lines in axes coordinates
# d = pi_all[e,np.argsort(pi_all[e,:])[1]]*1.2 # how big to make the diagonal lines in axes coordinates
# arguments to pass plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
yticks = np.around(np.linspace(1.-yl,1., 3), decimals=3)
yticks2 = np.around(np.linspace(0.,yl, 3), decimals=3)
ax.set_yticks(yticks)
ax2.set_yticks(yticks2)
# We change the fontsize of minor ticks label
ax.set_xticks([])
ax.tick_params(axis='x', which='major', length=1, labelsize=20)
ax.tick_params(axis='x', which='minor', length=1, labelsize=20)
ax.tick_params(axis='both', which='major', labelsize=20)
ax.tick_params(axis='both', which='minor', labelsize=20)
ax2.set_xticks(states)
ax2.tick_params(axis='x', which='major', length=1, labelsize=20)
ax2.tick_params(axis='x', which='minor', length=1, labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.tick_params(axis='both', which='minor', labelsize=20)
f.savefig(outputdir+'montage_images/pi_broken_{:03}.jpg'.format(e))
plt.close(f)
# os.system("montage -trim {}_images/W_e_{:03}_h*.jpg {}montage_images/W_e_{:03}.jpg".format(outputdir,e,outputdir,e))
# os.system("rm {}_images/W_e_{:03}_h*.jpg ".format(outputdir, e))
# os.system("convert -delay 10 {}montage_images/* {}W_training.gif".format(outputdir,outputdir))
state_list = []
title_font_dict = {"fontsize":"12"}
marker_font_dict = {"fontsize":"16"}
title_font_dict = None # {"fontsize":"12"}
marker_font_dict = None #{"fontsize":"16"}
time_scale = 1000/sampling_rate
if series is not None and rseries is not None:
#IC = series.squeeze()[5,:].squeeze()
series = series.squeeze()#[channel,:].squeeze()
step = int((1-overlap)*D)
tmp2=series[:,:step].reshape(-1)
tmp2=np.concatenate([tmp2,series[-1,D-step:]])
series=
|
np.array(tmp2)
|
numpy.array
|
#!/usr/bin/env python3
import numpy as np
from itertools import product
class SymmetryTool:
tol = 1e-5
def identify(self, tile: np.array) -> str:
"""Identify the symmetry group of a tile."""
return self.hashmap[self.truthtable(tile)]
def __init__(self):
self.hashmap = self.truthtable_symmetries()
def truthtable_symmetries(self) -> dict:
"""Derive symmetry type from propeties of simple representations."""
examples = {
'X': np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]),
'I': np.array([[1, 1, 1], [0, 1, 0], [1, 1, 1]]),
'T': np.array([[1, 1, 1], [0, 1, 0], [0, 1, 0]]),
'L': np.array([[0, 1, 0], [0, 1, 1], [0, 0, 0]]),
'\\': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])}
hmap = {self.truthtable(np.rot90(img, k=k)): sym
for k, (sym, img) in product(range(4), examples.items())}
return hmap
def truthtable(self, x: np.array) -> tuple:
"""Combine all matrix propertes of image."""
truthtable = [self.rotation(x), self.symmetric(x),
*[self.mirror(np.rot90(x, k=k)) for k in range(2)]]
return tuple(truthtable)
def mirror(self, x: np.array) -> bool:
"""Check for mirror symmetric matrix propetry."""
return np.allclose(x, np.fliplr(x), atol=self.tol)
def rotation(self, x: np.array) -> bool:
"""Check for rotational invariance of matrix for 180\deg."""
return np.allclose(x, np.rot90(x, k=2), atol=self.tol)
def symmetric(self, x: np.array) -> bool:
"""Check for a symmetric matrix property across all planes."""
if x.ndim == 2:
x = np.expand_dims(x, axis=-1)
return all(map(self.symmetric_2d,
|
np.rollaxis(x, axis=-1)
|
numpy.rollaxis
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 18:05:51 2019
@author: ben91
"""
from SimulationClasses import *
from TimeSteppingMethods import *
from FiniteVolumeSchemes import *
from FluxSplittingMethods import *
from InitialConditions import *
from Equations import *
from wholeNetworks import *
from LoadDataMethods import *
from keras import *
from keras.models import *
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anime
from matplotlib import style
from matplotlib import rcParams
import math
style.use('fivethirtyeight')
rcParams.update({'figure.autolayout': True})
'''
# Import modules/packages
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.close('all') # close all open figures
# Define and set custom LaTeX style
styleNHN = {
"pgf.rcfonts":False,
"pgf.texsystem": "pdflatex",
"text.usetex": False, #TODO: might need to change this to false
"font.family": "serif"
}
mpl.rcParams.update(styleNHN)
xx = np.linspace(0,1,100)
yy = xx**2
# Plotting defaults
ALW = 0.75 # AxesLineWidth
FSZ = 12 # Fontsize
LW = 2 # LineWidth
MSZ = 5 # MarkerSize
SMALL_SIZE = 8 # Tiny font size
MEDIUM_SIZE = 10 # Small font size
BIGGER_SIZE = 14 # Large font size
plt.rc('font', size=FSZ) # controls default text sizes
plt.rc('axes', titlesize=FSZ) # fontsize of the axes title
plt.rc('axes', labelsize=FSZ) # fontsize of the x and y labels
plt.rc('xtick', labelsize=FSZ) # fontsize of the x-tick labels
plt.rc('ytick', labelsize=FSZ) # fontsize of the y-tick labels
plt.rc('legend', fontsize=FSZ) # legend fontsize
plt.rc('figure', titlesize=FSZ) # fontsize of the figure title
plt.rcParams['axes.linewidth'] = ALW # sets the default axes lindewidth to ``ALW''
plt.rcParams["mathtext.fontset"] = 'cm' # Computer Modern mathtext font (applies when ``usetex=False'')
def discTrackStep(c,x,t,u,P,title, a, b, err):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
c: shock speed
x: x coordinates
y: y coordinates
u: velocity
P: periods advected for
err: plot error if True, otherwise plot solution
'''
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - c*tg
plt.figure()
if err:
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = eex-u
'''
plt.contourf(xp,tg,u)
plt.colorbar()
plt.title(title)
plt.figure()
plt.contourf(xp,tg,eex)
'''
for i in range(-2,int(P)):
plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.7,20))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
else:
for i in range(-2,int(P)+1):
plt.contourf(xp+i*L,tg,u,np.linspace(-0.2,1.2,57))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
def intError(c,x,t,u,title):
L = x[-1] - x[0] + x[1] - x[0]
dx = x[1] - x[0]
nx = np.size(x)
xg, tg = np.meshgrid(t,x)
xp = xg - c*tg
ons = np.ones_like(xp)
#eex = np.roll(np.greater(ons,xp%L),-1,axis = 0)
eex1 = xp/dx
eex1[eex1>=1] = 1
eex1[eex1<=0] = 0
eex2 = (-xp%L-L/2)/dx
eex2[eex2>=1] = 1
eex2[eex2<=0] = 0
eex3 = (-xp%L-L/2)/dx
eex3[eex3>(nx/2-1)] = -(eex3[eex3>(nx/2-1)]-nx/2)
eex3[eex3>=1] = 1
eex3[eex3<=0] = 0
er = eex3-u
ers = np.power(er,2)
ers0 = np.expand_dims(ers[0,:],axis = 0)
ers_aug = np.concatenate((ers,ers0), axis = 0)
err_int = np.trapz(ers_aug, dx = dx, axis = 0)
plt.plot(t,np.sqrt(err_int),'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('L2 Error')
#plt.ylim([0,0.02])
def totalVariation(t,u,title):#plot total variation over time
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
#plt.figure()
plt.plot(t,tv,'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('Total Variation')
#plt.ylim((1.999,2.01))
def totalEnergy(t,u, dx, title):#plot total energy
u0 = np.expand_dims(u[0,:],axis = 0)
u_aug = np.concatenate((u,u0), axis = 0)
energy = 0.5*np.trapz(np.power(u_aug,2), dx = dx, axis = 0)
plt.figure()
plt.plot(t,energy)
plt.title(title)
plt.xlabel('Time')
plt.ylabel('1/2*integral(u^2)')
plt.ylim([0,np.max(energy)*1.1])
def mwn(FVM):
'''
plot modified wavenumber of a finite volume scheme
Inputs:
FVM: finite volume method object to test
'''
nx = 100
nt = 10
L = 2
T = 0.00001
x = np.linspace(0,L,nx,endpoint=False)
t = np.linspace(0,T,nt)
dx = x[1]-x[0]
dt = t[1]-t[0]
sigma = T/dx
EQ = adv()
FS = LaxFriedrichs(EQ, 1)
RK = SSPRK3()
NK = int((np.size(x)-1)/2)
mwn = np.zeros(NK,dtype=np.complex_)
wn = np.zeros(NK)
A = 1
for k in range(2,NK):
IC = cosu(A,k/L)
testCos = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_cos = testCos.run()
u_f_cos = u_cos[:,0]
u_l_cos = u_cos[:,-1]
IC = sinu(A,k/L)
testSin = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_sin = testSin.run()
u_f_sin = u_sin[:,0]
u_l_sin = u_sin[:,-1]
u_h0 =np.fft.fft(u_f_cos+complex(0,1)*u_f_sin)
u_h = np.fft.fft(u_l_cos+complex(0,1)*u_l_sin)
v_h0 = u_h0[k]
v_h = u_h[k]
mwn[k] = -1/(complex(0,1)*sigma)*np.log(v_h/v_h0)
wn[k] = 2*k*np.pi/nx
plt.plot(wn,np.real(mwn))
#plt.hold
plt.plot(wn,wn)
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (real part)')
plt.figure()
plt.plot(wn,np.imag(mwn))
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (imaginary part)')
plt.figure()
plt.semilogy(wn,abs(wn-np.real(mwn)))
return wn
def animateSim(x,t,u,pas):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
t: t coordinates
u: velocity
pas: how long to pause between frames
'''
for i in range(0,len(t)):
plt.plot(x,u[:,i])
plt.pause(pas)
plt.clf()
plt.plot(x,u[:,-1])
def specAnalysis(model, u, RKM,WENONN, NNNN, h, giveModel, makePlots):
'''
perform spectral analysis of a finite volume method when operating on a specific waveform
Finds eigenvalues, and then uses this to compute max
Inputs:
Model: WENO5 neural network that will be analyzed
u: the data that is the input to the method
RKM: time stepping method object to analyze for space-time coupling
wenoName: name of layer in model that gives WENO5 coefficicents
NNname: name of layer in model that gives NN coefficients
giveModel: whether or not we are passing layer names or model names
'''
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
N = np.size(u)
M = 5#just assume stencil size is 5 for now
sortedU = np.zeros((N,M)) + 1j*np.zeros((N,M))
for i in range(0,M):#assume scheme is upwind or unbiased
sortedU[:,i] = np.roll(u,math.floor(M/2)-i)
def scale(sortedU, NNNN):
min_u = np.amin(sortedU,1)
max_u = np.amax(sortedU,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(sortedU[:,2])
u_tmp[:] = sortedU[:,2]
#for i in range(0,5):
# sortedU[:,i] = (sortedU[:,i]-min_u)/(max_u-min_u)
cff = NNNN.predict(sortedU)#compute \Delta u
cff[const_n,:] = np.array([1/30,-13/60,47/60,9/20,-1/20])
#print('fl: ', fl)
return cff
if(np.sum(np.iscomplex(u))>=1):
wec = WENONN.predict(np.real(sortedU)) + WENONN.predict(np.imag(sortedU))*1j
nnc = scale(np.real(sortedU), NNNN) + scale(np.imag(sortedU), NNNN)*1j
op_WENO5 = np.zeros((N,N)) + np.zeros((N,N))*1j
op_NN = np.zeros((N,N)) + np.zeros((N,N))*1j
else:
wec = WENONN.predict(np.real(sortedU))
nnc = scale(np.real(sortedU), NNNN)
op_WENO5 = np.zeros((N,N))
op_NN = np.zeros((N,N))
for i in range(0,N):
for j in range(0,M):
op_WENO5[i,(i+j-int(M/2))%N] -= wec[i,j]
op_WENO5[i,(i+j-int(M/2)-1)%N] += wec[(i-1)%N,j]
op_NN[i,(i+j-int(M/2))%N] -= nnc[i,j]
op_NN[i,(i+j-int(M/2)-1)%N] += nnc[(i-1)%N,j]
#print(i,': ', op_WENO5[i,:])
WEeigs, WEvecs = np.linalg.eig(op_WENO5)
NNeigs, NNvecs = np.linalg.eig(op_NN)
con_nn = np.linalg.solve(NNvecs, u)
#now do some rungekutta stuff
x = np.linspace(-3,3,301)
y = np.linspace(-3,3,301)
X,Y = np.meshgrid(x,y)
Z = X + Y*1j
g = abs(1 + Z + np.power(Z,2)/2 + np.power(Z,3)/6)
g_we = abs(1 + (h*WEeigs) + np.power(h*WEeigs,2)/2 + np.power(h*WEeigs,3)/6)
g_nn = abs(1 + (h*NNeigs) + np.power(h*NNeigs,2)/2 + np.power(h*NNeigs,3)/6)
#do some processing for that plot of the contributions vs the amplification factor
c_abs = np.abs(con_nn)
ords = np.argsort(c_abs)
g_sort = g_nn[ords]
c_sort = con_nn[ords]
c_norm = c_sort/np.linalg.norm(c_sort,1)
c_abs2 = np.abs(c_norm)
#do some processing for the most unstable mode
ordsG = np.argsort(g_nn)
unstb = NNvecs[:,ordsG[-1]]
if(makePlots>=1):
plt.figure()
plt.plot(np.sort(g_we),'.')
plt.plot(np.sort(g_nn),'.')
plt.legend(('WENO5','NN'))
plt.title('CFL = '+ str(h))
plt.xlabel('index')
plt.ylabel('|1+HL+(HL^2)/2+(HL^3)/6|')
plt.ylim([0,1.2])
plt.figure()
plt.plot(np.real(WEeigs),np.imag(WEeigs),'.')
plt.plot(np.real(NNeigs),np.imag(NNeigs),'.')
plt.title('Eigenvalues')
plt.legend(('WENO5','NN'))
plt.figure()
plt.plot(g_nn,abs(con_nn),'.')
plt.xlabel('Amplification Factor')
plt.ylabel('Contribution')
print('Max WENO g: ',np.max(g_we))
print('Max NN g: ',np.max(g_nn))
if(makePlots>=2):
plt.figure()
sml = 1E-2
plt.contourf(X, Y, g, [1-sml,1+sml])
plt.figure()
plt.plot(g_sort,c_abs2,'.')
plt.xlabel('Scaled Amplification Factor')
plt.ylabel('Contribution')
return g_nn, con_nn, unstb
#return np.max(g_we), np.max(g_nn)
#plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.025,20))
def specAnalysisData(model, u, RKM,WENONN, NNNN, CFL, giveModel):
nx, nt = np.shape(u)
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
maxWe = np.zeros(nt)
maxNN = np.zeros(nt)
for i in range(0,nt):
print(i)
maxWe[i], maxNN[i] = specAnalysis(model, u[:,i], RKM, WENONN, NNNN, CFL, True, False)
plt.figure()
plt.plot(maxWe)
plt.figure()
plt.plot(maxNN)
return maxWe, maxNN
def eigenvectorProj(model, u, WENONN, NNNN):
nx = np.shape(u)
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
def evalPerf(x,t,P,u,eex):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
y: y coordinates
P: periods advected for
u: velocity
Outputs:
tvm: max total variation in solution
swm: max shock width in solution
'''
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
tvm = np.max(tv)
u = np.transpose(u)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
#plot width of discontinuity over time for neural network and WENO5
'''
us = np.roll(u, 1, axis = 0)
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
plot width of discontinuity over time for neural network and WENO5
'''
u = np.transpose(u)
u_WE = np.transpose(u_WE)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
dx = x[1]-x[0]
'''
eex = (-xp%L-L/2)/dx
eex[eex>49] = -(eex[eex>49]-50)
eex[eex>=1] = 1
eex[eex<=0] = 0
'''
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
er_we = np.abs(eex-u_WE)
wdth = np.sum(np.greater(er,0.01),axis=1)*dx/2
wdth_we = np.sum(np.greater(er_we,0.01),axis=1)*dx/2
plt.figure()
plt.plot(t,wdth)
plt.plot(t,wdth_we)
plt.legend(('Neural Network','WENO5'))
plt.xlabel('t')
plt.ylabel('Discontinuity Width')
def convStudy():
'''
Test order of accuracy of an FVM
'''
nr = 21
errNN = np.zeros(nr)
errWE = np.zeros(nr)
errEN = np.zeros(nr)
dxs = np.zeros(nr)
for i in range(0,nr):
print(i)
nx = 10*np.power(10,0.1*i)
L = 2
x = np.linspace(0,L,int(nx),endpoint=False)
dx = x[1]-x[0]
FVM1 = NNWENO5dx(dx)
FVM2 = WENO5()
FVM3 = ENO3()
u = np.sin(4*np.pi*x) + np.cos(4*np.pi*x)
du = 4*np.pi*(np.cos(4*np.pi*x)-np.sin(4*np.pi*x))
resNN = FVM1.evalF(u)
resWE = FVM2.evalF(u)
resEN = FVM3.evalF(u)
du_EN = (resNN-np.roll(resEN,1))/dx
du_NN = (resNN-np.roll(resNN,1))/dx
du_WE = (resWE-np.roll(resWE,1))/dx
errNN[i] = np.linalg.norm(du_NN-du,ord = 2)/np.sqrt(nx)
errEN[i] = np.linalg.norm(du_EN-du,ord = 2)/np.sqrt(nx)
errWE[i] = np.linalg.norm(du_WE-du,ord = 2)/np.sqrt(nx)
dxs[i] = dx
nti = 6
toRegDx = np.ones((nti,2))
toRegDx[:,1] = np.log10(dxs[-nti:])
toRegWe = np.log10(errWE[-nti:])
toRegNN = np.log10(errNN[-nti:])
toRegEN = np.log10(errEN[-nti:])
c_we, m_we = np.linalg.lstsq(toRegDx, toRegWe, rcond=None)[0]
c_nn, m_nn = np.linalg.lstsq(toRegDx, toRegNN, rcond=None)[0]
c_en, m_en = np.linalg.lstsq(toRegDx, toRegEN, rcond=None)[0]
print('WENO5 slope: ',m_we)
print('NN slope: ',m_nn)
print('ENO3 slope: ',m_en)
plt.loglog(dxs,errNN,'o')
plt.loglog(dxs,errWE,'o')
plt.loglog(dxs,errEN,'o')
plt.loglog(dxs,(10**c_we)*(dxs**m_we))
plt.loglog(dxs,(10**c_nn)*(dxs**m_nn))
plt.loglog(dxs,(10**c_en)*(dxs**m_en))
plt.legend(['WENO-NN','WENO5-JS','ENO3'])
plt.xlabel('$\Delta x$')
plt.ylabel('$E$')
def plot_visc(x,t,uv,FVM,P,NN,contours):
nx, nt = np.shape(uv)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
def scheme(u,NN):
ust = np.zeros_like(u)
ust = ust + u
min_u = np.amin(u,1)
max_u = np.amax(u,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(u[:,2])
u_tmp[:] = u[:,2]
for i in range(0,5):
u[:,i] = (u[:,i]-min_u)/(max_u-min_u)
ep = 1E-6
#compute fluxes on sub stencils (similar to derivatives I guess)
f1 = 1/3*u[:,0]-7/6*u[:,1]+11/6*u[:,2]
f2 = -1/6*u[:,1]+5/6*u[:,2]+1/3*u[:,3]
f3 = 1/3*u[:,2]+5/6*u[:,3]-1/6*u[:,4]
#compute derivatives on sub stencils
justU = 1/30*ust[:,0]-13/60*ust[:,1]+47/60*ust[:,2]+9/20*ust[:,3]-1/20*ust[:,4]
dudx = 0*ust[:,0]+1/12*ust[:,1]-5/4*ust[:,2]+5/4*ust[:,3]-1/12*ust[:,4]
dudx = (dudx - np.roll(dudx,1))
d2udx2 = -1/4*ust[:,0]+3/2*ust[:,1]-2*ust[:,2]+1/2*ust[:,3]+1/4*ust[:,4]
d2udx2 = (d2udx2 - np.roll(d2udx2,1))
d3udx3 = 0*ust[:,0]-1*ust[:,1]+3*ust[:,2]-3*ust[:,3]+1*ust[:,4]
d3udx3 = (d3udx3 - np.roll(d3udx3,1))
#compute smoothness indicators
B1 = 13/12*np.power(u[:,0]-2*u[:,1]+u[:,2],2) + 1/4*np.power(u[:,0]-4*u[:,1]+3*u[:,2],2)
B2 = 13/12*np.power(u[:,1]-2*u[:,2]+u[:,3],2) + 1/4*np.power(u[:,1]-u[:,3],2)
B3 = 13/12*np.power(u[:,2]-2*u[:,3]+u[:,4],2) + 1/4*np.power(3*u[:,2]-4*u[:,3]+u[:,4],2)
#assign linear weights
g1 = 1/10
g2 = 3/5
g3 = 3/10
#compute the unscaled nonlinear weights
wt1 = g1/np.power(ep+B1,2)
wt2 = g2/np.power(ep+B2,2)
wt3 = g3/np.power(ep+B3,2)
wts = wt1 + wt2 + wt3
#scale the nonlinear weights
w1 = wt1/wts
w2 = wt2/wts
w3 = wt3/wts
#compute the coefficients
c1 = np.transpose(np.array([1/3*w1,-7/6*w1-1/6*w2,11/6*w1+5/6*w2+1/3*w3,1/3*w2+5/6*w3,-1/6*w3]))
#fl = np.multiply(fl,(max_u-min_u))+min_u
if(NN):
A1 = np.array([[-0.94130915, -0.32270527, -0.06769955],
[-0.37087336, -0.05059665, 0.55401474],
[ 0.40815187, -0.5602299 , -0.01871526],
[ 0.56200236, -0.5348897 , -0.04091108],
[-0.6982639 , -0.49512517, 0.52821904]])
b1 = np.array([-0.04064859, 0. , 0. ])
c2 = np.maximum(np.matmul(c1,A1)+b1,0)
A2 = np.array([[ 0.07149544, 0.9637294 , 0.41981453],
[ 0.75602794, -0.0222342 , -0.95690656],
[ 0.07406807, -0.41880417, -0.4687035 ]])
b2 = np.array([-0.0836111 , -0.00330033, -0.01930024])
c3 = np.maximum(np.matmul(c2,A2)+b2,0)
A3 = np.array([[ 0.8568574 , -0.5809458 , 0.04762125],
[-0.26066098, -0.23142155, -0.6449008 ],
[ 0.7623346 , 0.81388015, -0.03217626]])
b3 = np.array([-0.0133561 , -0.05374921, 0. ])
c4 = np.maximum(np.matmul(c3,A3)+b3,0)
A4 = np.array([[-0.2891752 , -0.53783405, -0.17556567, -0.7775279 , 0.69957024],
[-0.12895434, 0.13607207, 0.12294354, 0.29842544, -0.00198237],
[ 0.5356503 , 0.09317833, 0.5135357 , -0.32794708, 0.13765627]])
b4 = np.array([ 0.00881096, 0.01138764, 0.00464343, 0.0070305 , -0.01644066])
dc = np.matmul(c4,A4)+b4
ct = c1 - dc
Ac = np.array([[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2]])
bc = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
dc2 = np.matmul(ct,Ac)+bc
C = ct + dc2
Cons = C[:,0] + C[:,1] + C[:,2] + C[:,3] + C[:,4]
C_visc = -5/2*C[:,0] - 3/2*C[:,1] - 1/2*C[:,2] + 1/2*C[:,3] + 3/2*C[:,4]
C_visc2 = 19/6*C[:,0] + 7/6*C[:,1] + 1/6*C[:,2] + 1/6*C[:,3] + 7/6*C[:,4]
C_visc3 = -65/24*C[:,0] - 5/8*C[:,1] - 1/24*C[:,2] + 1/24*C[:,3] + 5/8*C[:,4]
C_visc = C_visc.flatten()
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
else:
Cons = c1[:,0] + c1[:,1] + c1[:,2] + c1[:,3] + c1[:,4]
C_visc = (-5/2*c1[:,0] - 3/2*c1[:,1] - 1/2*c1[:,2] + 1/2*c1[:,3] + 3/2*c1[:,4])
C_visc2 = (19/6*c1[:,0] + 7/6*c1[:,1] + 1/6*c1[:,2] + 1/6*c1[:,3] + 7/6*c1[:,4])
C_visc3 = (-65/24*c1[:,0] - 5/8*c1[:,1] - 1/24*c1[:,2] + 1/24*c1[:,3] + 5/8*c1[:,4])
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
return Cons,-C_visc,-C_visc2,-C_visc3, dudx, d2udx2, d3udx3
C_ = np.zeros_like(uv)
C_i = np.zeros_like(uv)
C_ii = np.zeros_like(uv)
C_iii = np.zeros_like(uv)
d_i = np.zeros_like(uv)
d_ii = np.zeros_like(uv)
d_iii = np.zeros_like(uv)
for i in range(0,nt):
u_part = FVM.partU(uv[:,i])
C_[:,i],C_i[:,i],C_ii[:,i],C_iii[:,i],d_i[:,i],d_ii[:,i],d_iii[:,i] = scheme(u_part,NN)
dx = x[1]-x[0]
C_ = np.transpose(C_)
C_i = np.transpose(C_i)*dx
C_ii = np.transpose(C_ii)*dx**2
C_iii = np.transpose(C_iii)*dx**3
d_i = np.transpose(d_i)/(dx**2)
d_ii = np.transpose(d_ii)/(dx**3)
d_iii = np.transpose(d_iii)/(dx**4)
indFirst = 100#ignore 1st few timesteps for scaling plots due to disconintuity
if(contours):
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(-0.3,0.3,100))
#first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
#first = ax1.contourf(xp+i*L,tg,C_i*np.abs(d_i),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
np.savetxt('firstXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('firstTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('firstVisc'+str(i)+'.csv',C_i[indsTP,:]*np.abs(d_i[indsTP,:]))
ax1.set_title('(A)')
ax1.set_xlim(x[0],x[-1])
ax1.set_xlabel('$x-ct$')
ax1.set_ylabel('$t$')
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
second = ax2.contourf(xtp[indsTP,:],tg[indsTP,:],C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]),np.linspace(-0.3,0.3,100))
#second = ax2.contourf(xp+i*L,tg,C_ii*np.abs(d_ii),np.linspace(np.min((C_ii*np.abs(d_ii))[indFirst:,:]),np.max((C_ii*np.abs(d_ii))[indFirst:,:]),100))
np.savetxt('secondXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('secondTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('secondVisc'+str(i)+'.csv',C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]))
ax2.set_title('(B)')
ax2.set_xlim(x[0],x[-1])
ax2.set_xlabel('$x-ct$')
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
third = ax3.contourf(xtp[indsTP,:],tg[indsTP,:],C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]),np.linspace(-0.3,0.3,100))
np.savetxt('thirdXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('thirdTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('thirdVisc'+str(i)+'.csv',C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]))
#third = ax3.contourf(xp+i*L,tg,C_iii*np.abs(d_iii),np.linspace(np.min((C_iii*np.abs(d_iii))[indFirst:,:]),np.max((C_iii*np.abs(d_iii))[indFirst:,:]),100))
ax3.set_title('(C)')
ax3.set_xlim(x[0],x[-1])
ax3.set_xlabel('$x-ct$')
f.subplots_adjust(right=0.8)
#cbar_ax1 = f.add_axes([.72, 0.15, 0.05, 0.7])
#cbar_ax2 = f.add_axes([.82, 0.15, 0.05, 0.7])
#cbar_ax3 = f.add_axes([.92, 0.15, 0.05, 0.7])
#f.colorbar(first, cax=cbar_ax1)
#f.colorbar(second, cax=cbar_ax2)
#f.colorbar(third, cax=cbar_ax3)
#f.colorbar(first, ax=ax1)
#f.colorbar(second, ax=ax2)
#f.colorbar(third, ax=ax3)
f.tight_layout()
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([.82, 0.15, 0.05, 0.7])
f.colorbar(third, cax=cbar_ax)
#f.tight_layout()
else:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(15, 4))
ax1.plot(x,C_i[150,:]*np.abs(d_i[150,:]))
ax1.plot(x,C_i[1500,:]*np.abs(d_i[1500,:]))
ax1.plot(x,C_i[3750,:]*np.abs(d_i[3750,:]))
ax1.plot(x,C_i[7500,:]*np.abs(d_i[7500,:]))
ax1.set_title('(A)')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$t$')
ax2.plot(x,C_ii[150,:]*np.abs(d_ii[150,:]))
ax2.plot(x,C_ii[1500,:]*np.abs(d_ii[1500,:]))
ax2.plot(x,C_ii[3750,:]*np.abs(d_ii[3750,:]))
ax2.plot(x,C_ii[7500,:]*np.abs(d_ii[7500,:]))
ax2.set_title('(B)')
ax2.set_xlabel('$x$')
ax3.plot(x,C_iii[150,:]*np.abs(d_iii[150,:]))
ax3.plot(x,C_iii[1500,:]*np.abs(d_iii[1500,:]))
ax3.plot(x,C_iii[3750,:]*np.abs(d_iii[3750,:]))
ax3.plot(x,C_iii[7500,:]*np.abs(d_iii[7500,:]))
ax3.set_title('(C)')
ax3.set_xlabel('$x$')
ax3.legend(('$t=2$','$t=20$','$t=50$','$t=100$'))
def plot_visc_new(x,t,uv,FVM,P,NN,contours):
nx, nt = np.shape(uv)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
def scheme(u,NN):
ust = np.zeros_like(u)
ust = ust + u
min_u =
|
np.amin(u,1)
|
numpy.amin
|
import time
from .score import Scorer
import numpy as np
from math import radians
import datetime as dt
import scipy
import sklearn
from sklearn.neighbors import DistanceMetric
from sklearn.metrics.pairwise import euclidean_distances
def compare_backward_forward(n):
scorer = Scorer()
scorer.import_torben_flight()
latlon = np.radians(
|
np.column_stack([scorer.lat, scorer.lon])
|
numpy.column_stack
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package implement graph sampling algorithm.
"""
import time
import copy
import numpy as np
import pgl
from pgl.utils.logger import log
from pgl import graph_kernel
__all__ = [
'graphsage_sample', 'node2vec_sample', 'deepwalk_sample',
'metapath_randomwalk', 'pinsage_sample', 'graph_saint_random_walk_sample'
]
def traverse(item):
"""traverse the list or numpy"""
if isinstance(item, list) or isinstance(item, np.ndarray):
for i in iter(item):
for j in traverse(i):
yield j
else:
yield item
def flat_node_and_edge(nodes, eids, weights=None):
"""flatten the sub-lists to one list"""
nodes = list(set(traverse(nodes)))
eids = list(traverse(eids))
if weights is not None:
weights = list(traverse(weights))
return nodes, eids, weights
def edge_hash(src, dst):
"""edge_hash
"""
return src * 100000007 + dst
def graphsage_sample(graph, nodes, samples, ignore_edges=[]):
"""Implement of graphsage sample.
Reference paper: https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf.
Args:
graph: A pgl graph instance
nodes: Sample starting from nodes
samples: A list, number of neighbors in each layer
ignore_edges: list of edge(src, dst) will be ignored.
Return:
A list of subgraphs
"""
start = time.time()
num_layers = len(samples)
start_nodes = nodes
nodes = list(start_nodes)
eids, edges = [], []
nodes_set = set(nodes)
layer_nodes, layer_eids, layer_edges = [], [], []
ignore_edge_set = set([edge_hash(src, dst) for src, dst in ignore_edges])
for layer_idx in reversed(range(num_layers)):
if len(start_nodes) == 0:
layer_nodes = [nodes] + layer_nodes
layer_eids = [eids] + layer_eids
layer_edges = [edges] + layer_edges
continue
batch_pred_nodes, batch_pred_eids = graph.sample_predecessor(
start_nodes, samples[layer_idx], return_eids=True)
start = time.time()
last_nodes_set = nodes_set
nodes, eids = copy.copy(nodes), copy.copy(eids)
edges = copy.copy(edges)
nodes_set, eids_set = set(nodes), set(eids)
for srcs, dst, pred_eids in zip(batch_pred_nodes, start_nodes,
batch_pred_eids):
for src, eid in zip(srcs, pred_eids):
if edge_hash(src, dst) in ignore_edge_set:
continue
if eid not in eids_set:
eids.append(eid)
edges.append([src, dst])
eids_set.add(eid)
if src not in nodes_set:
nodes.append(src)
nodes_set.add(src)
layer_edges = [edges] + layer_edges
start_nodes = list(nodes_set - last_nodes_set)
layer_nodes = [nodes] + layer_nodes
layer_eids = [eids] + layer_eids
start = time.time()
# Find new nodes
subgraphs = []
for i in range(num_layers):
subgraphs.append(
graph.subgraph(
nodes=layer_nodes[0], eid=layer_eids[i], edges=layer_edges[i]))
# only for this task
subgraphs[i].node_feat["index"] = np.array(
layer_nodes[0], dtype="int64")
return subgraphs
def alias_sample(size, alias, events):
"""Implement of alias sample.
Args:
size: Output shape.
alias: The alias table build by `alias_sample_build_table`.
events: The events table build by `alias_sample_build_table`.
Return:
samples: The generated random samples.
"""
rand_num = np.random.uniform(0.0, len(alias), size)
idx = rand_num.astype("int64")
uni = rand_num - idx
flags = (uni >= alias[idx])
idx[flags] = events[idx][flags]
return idx
def graph_alias_sample_table(graph, edge_weight_name):
"""Build alias sample table for weighted deepwalk.
Args:
graph: The input graph
edge_weight_name: The name of edge weight in edge_feat.
Return:
Alias sample tables for each nodes.
"""
edge_weight = graph.edge_feat[edge_weight_name]
_, eids_array = graph.successor(return_eids=True)
alias_array, events_array = [], []
for eids in eids_array:
probs = edge_weight[eids]
probs /= np.sum(probs)
alias, events = graph_kernel.alias_sample_build_table(probs)
alias_array.append(alias), events_array.append(events)
alias_array, events_array = np.array(alias_array), np.array(events_array)
return alias_array, events_array
def deepwalk_sample(graph, nodes, max_depth, alias_name=None,
events_name=None):
"""Implement of random walk.
This function get random walks path for given nodes and depth.
Args:
nodes: Walk starting from nodes
max_depth: Max walking depth
Return:
A list of walks.
"""
walk = []
# init
for node in nodes:
walk.append([node])
cur_walk_ids = np.arange(0, len(nodes))
cur_nodes = np.array(nodes)
for l in range(max_depth):
# select the walks not end
cur_succs = graph.successor(cur_nodes)
mask = [len(succ) > 0 for succ in cur_succs]
if np.any(mask):
cur_walk_ids = cur_walk_ids[mask]
cur_nodes = cur_nodes[mask]
cur_succs = cur_succs[mask]
else:
# stop when all nodes have no successor
break
if alias_name is not None and events_name is not None:
sample_index = [
alias_sample([1], graph.node_feat[alias_name][node],
graph.node_feat[events_name][node])[0]
for node in cur_nodes
]
else:
outdegree = [len(cur_succ) for cur_succ in cur_succs]
sample_index = np.floor(
np.random.rand(cur_succs.shape[0]) * outdegree).astype("int64")
nxt_cur_nodes = []
for s, ind, walk_id in zip(cur_succs, sample_index, cur_walk_ids):
walk[walk_id].append(s[ind])
nxt_cur_nodes.append(s[ind])
cur_nodes = np.array(nxt_cur_nodes)
return walk
def node2vec_sample(graph, nodes, max_depth, p=1.0, q=1.0):
"""Implement of node2vec random walk.
Reference paper: https://cs.stanford.edu/~jure/pubs/node2vec-kdd16.pdf.
Args:
graph: A pgl graph instance
nodes: Walk starting from nodes
max_depth: Max walking depth
p: Return parameter
q: In-out parameter
Return:
A list of walks.
"""
if p == 1.0 and q == 1.0:
return deepwalk_sample(graph, nodes, max_depth)
walk = []
# init
for node in nodes:
walk.append([node])
cur_walk_ids = np.arange(0, len(nodes))
cur_nodes = np.array(nodes)
prev_nodes = np.array([-1] * len(nodes), dtype="int64")
prev_succs = np.array([[]] * len(nodes), dtype="int64")
for l in range(max_depth):
# select the walks not end
cur_succs = graph.successor(cur_nodes)
mask = [len(succ) > 0 for succ in cur_succs]
if np.any(mask):
cur_walk_ids = cur_walk_ids[mask]
cur_nodes = cur_nodes[mask]
prev_nodes = prev_nodes[mask]
prev_succs = prev_succs[mask]
cur_succs = cur_succs[mask]
else:
# stop when all nodes have no successor
break
num_nodes = cur_nodes.shape[0]
nxt_nodes = np.zeros(num_nodes, dtype="int64")
for idx, (
succ, prev_succ, walk_id, prev_node
) in enumerate(zip(cur_succs, prev_succs, cur_walk_ids, prev_nodes)):
sampled_succ = graph_kernel.node2vec_sample(succ, prev_succ,
prev_node, p, q)
walk[walk_id].append(sampled_succ)
nxt_nodes[idx] = sampled_succ
prev_nodes, prev_succs = cur_nodes, cur_succs
cur_nodes = nxt_nodes
return walk
def metapath_randomwalk(graph,
start_nodes,
metapath,
walk_length,
alias_name=None,
events_name=None):
"""Implementation of metapath random walk in heterogeneous graph.
Args:
graph: instance of pgl heterogeneous graph
start_nodes: start nodes to generate walk
metapath: meta path for sample nodes.
e.g: "c2p-p2a-a2p-p2c"
walk_length: the walk length
Return:
a list of metapath walks.
"""
edge_types = metapath.split('-')
walk = []
for node in start_nodes:
walk.append([node])
cur_walk_ids = np.arange(0, len(start_nodes))
cur_nodes = np.array(start_nodes)
mp_len = len(edge_types)
for i in range(0, walk_length - 1):
g = graph[edge_types[i % mp_len]]
cur_succs = g.successor(cur_nodes)
mask = [len(succ) > 0 for succ in cur_succs]
if np.any(mask):
cur_walk_ids = cur_walk_ids[mask]
cur_nodes = cur_nodes[mask]
cur_succs = cur_succs[mask]
else:
# stop when all nodes have no successor
break
if alias_name is not None and events_name is not None:
sample_index = [
alias_sample([1], g.node_feat[alias_name][node],
g.node_feat[events_name][node])[0]
for node in cur_nodes
]
else:
outdegree = [len(cur_succ) for cur_succ in cur_succs]
sample_index = np.floor(
|
np.random.rand(cur_succs.shape[0])
|
numpy.random.rand
|
import numpy as np
import time
import oracles
from scipy.special import expit
class GDClassifier:
"""
Реализация метода градиентного спуска для произвольного
оракула, соответствующего спецификации оракулов из модуля oracles.py
"""
def __init__(self, loss_function, step_alpha=1, step_beta=1,
tolerance=1e-5, max_iter=1000, **kwargs):
"""
loss_function - строка, отвечающая за функцию потерь классификатора.
Может принимать значения:
- 'binary_logistic' - бинарная логистическая регрессия
- 'multinomial_logistic' - многоклассовая логистическая регрессия
step_alpha - float, параметр выбора шага из текста задания
step_beta- float, параметр выбора шага из текста задания
tolerance - точность, по достижении которой, необходимо прекратить оптимизацию.
Необходимо использовать критерий выхода по модулю разности соседних значений функции:
если (f(x_{k+1}) - f(x_{k})) < tolerance: то выход
max_iter - максимальное число итераций
**kwargs - аргументы, необходимые для инициализации
"""
self.loss_function = loss_function
if loss_function == 'multinomial_logistic':
self.loss = oracles.MulticlassLogistic(**kwargs)
else:
self.loss = oracles.BinaryLogistic(**kwargs)
self.alpha = step_alpha
self.beta = step_beta
self.tolerance = tolerance
self.max_iter = max_iter
self.kwargs = kwargs
def fit(self, X, y, w_0='random', trace=False, *, x_test=None, y_test=None): #, predict_log_freq=5):
"""
Обучение метода по выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
w_0 - начальное приближение в методе (если w_0='zeros', то заполняется нулями,
если w_0='random', то случайное приближение)
trace - переменная типа bool
Если trace = True, то метод должен вернуть словарь history, содержащий информацию
о поведении метода. Длина словаря history = количество итераций + 1 (начальное приближение)
history['time']: list of floats, содержит интервалы времени между двумя итерациями метода
history['func']: list of floats, содержит значения функции на каждой итерации
(0 для самой первой точки)
"""
if self.loss_function == 'multinomial_logistic':
self.classes_ = np.arange(self.loss.class_number, dtype=int) # w_0.shape[0]
w_0_shape = (self.loss.class_number, X.shape[1])
else:
self.classes_ = np.array([-1, 1])
w_0_shape = (X.shape[1], )
if isinstance(w_0, str):
if w_0 == 'zeros':
w_0 = np.zeros(w_0_shape)
elif w_0 == 'random':
w_0 = np.random.randn(*w_0_shape) / 100
elif not isinstance(w_0, np.ndarray):
raise TypeError('w_0 is incorrect')
self.w = w = w_0
f_prev = self.loss.func(X, y, w)
if trace:
history = {'func': [f_prev], 'time': [0]}
time_prev = time.time()
if x_test is not None:
history['score'] = [0]
for k in range(1, self.max_iter + 1):
grad = self.loss.grad(X, y, w)
eta = self.alpha / k ** self.beta
w -= eta * grad
f_cur = self.loss.func(X, y, w)
if trace:
history['func'].append(f_cur)
history['time'].append(time.time() - time_prev)
if x_test is not None:
history['score'].append(self.score(x_test, y_test))
time_prev = time.time()
if abs(f_cur - f_prev) < self.tolerance:
break
f_prev = f_cur
self.n_iters_ = k + 1
if trace:
return history
def predict(self, X):
"""
Получение меток ответов на выборке X
X - scipy.sparse.csr_matrix или двумерный numpy.array
return: одномерный numpy array с предсказаниями
"""
if self.loss_function == 'multinomial_logistic':
return self.classes_[np.argmax(X.dot(self.w.T), axis=1)]
else:
return self.classes_[(X.dot(self.w) >= 0).astype(int, copy=False)]
def score(self, X, y):
return (self.predict(X) == y).mean()
def predict_proba(self, X):
"""
Получение вероятностей принадлежности X к классу k
X - scipy.sparse.csr_matrix или двумерный numpy.array
return: двумерной numpy array, [i, k] значение соответветствует вероятности
принадлежности i-го объекта к классу k
"""
if self.loss_function == 'multinomial_logistic':
a = X.dot(w.T)
a -= a.max(axis=1, keepdims=True)
z = np.exp(a)
sumz = z.sum(axis=1, keepdims=True)
P = z / sumz
return P
else:
probas = expit(X.dot(self.w))
return np.vstack((probas, 1 - probas)).T
def get_objective(self, X, y):
"""
Получение значения целевой функции на выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
return: float
"""
return self.loss.func(X, y, self.w)
def get_gradient(self, X, y):
"""
Получение значения градиента функции на выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
return: numpy array, размерность зависит от задачи
"""
return self.loss.grad(X, y, self.w)
def get_weights(self):
"""
Получение значения весов функционала
"""
return self.w
class SGDClassifier(GDClassifier):
"""
Реализация метода стохастического градиентного спуска для произвольного
оракула, соответствующего спецификации оракулов из модуля oracles.py
"""
def __init__(self, loss_function, batch_size=1, step_alpha=1, step_beta=1,
tolerance=1e-5, max_iter=1000, random_seed=153, **kwargs):
"""
loss_function - строка, отвечающая за функцию потерь классификатора.
Может принимать значения:
- 'binary_logistic' - бинарная логистическая регрессия
- 'multinomial_logistic' - многоклассовая логистическая регрессия
batch_size - размер подвыборки, по которой считается градиент
step_alpha - float, параметр выбора шага из текста задания
step_beta- float, параметр выбора шага из текста задания
tolerance - точность, по достижении которой, необходимо прекратить оптимизацию
Необходимо использовать критерий выхода по модулю разности соседних значений функции:
если (f(x_{k+1}) - f(x_{k})) < tolerance: то выход
max_iter - максимальное число итераций
random_seed - в начале метода fit необходимо вызвать np.random.seed(random_seed).
Этот параметр нужен для воспроизводимости результатов на разных машинах.
**kwargs - аргументы, необходимые для инициализации
"""
self.loss_function = loss_function
if loss_function == 'multinomial_logistic':
self.loss = oracles.MulticlassLogistic(**kwargs)
else:
self.loss = oracles.BinaryLogistic(**kwargs)
self.batch_size = batch_size
self.alpha = step_alpha
self.beta = step_beta
self.tolerance = tolerance
self.max_iter = max_iter
self.random_seed = random_seed
self.kwargs = kwargs
def fit(self, X, y, w_0='random', trace=False, log_freq=1, *, x_test=None, y_test=None):
"""
Обучение метода по выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
w_0 - начальное приближение в методе (если w_0='zeros', то заполняется нулями,
если w_0='random', то случайное приближение)
Если trace = True, то метод должен вернуть словарь history, содержащий информацию
о поведении метода. Если обновлять history после каждой итерации, метод перестанет
превосходить в скорости метод GD. Поэтому, необходимо обновлять историю метода лишь
после некоторого числа обработанных объектов в зависимости от приближённого номера эпохи.
Приближённый номер эпохи:
{количество объектов, обработанных методом SGD} / {количество объектов в выборке}
log_freq - float от 0 до 1, параметр, отвечающий за частоту обновления.
Обновление должно проиходить каждый раз, когда разница между двумя значениями приближённого номера эпохи
будет превосходить log_freq.
history['epoch_num']: list of floats, в каждом элементе списка будет записан приближённый номер эпохи:
history['time']: list of floats, содержит интервалы времени между двумя соседними замерами
history['func']: list of floats, содержит значения функции после текущего приближённого номера эпохи
history['weights_diff']: list of floats, содержит квадрат нормы разности векторов весов с соседних замеров
(0 для самой первой точки)
"""
np.random.seed(self.random_seed)
if self.loss_function == 'multinomial_logistic':
self.classes_ = np.arange(self.loss.class_number, dtype=int) # w_0.shape[0]
w_0_shape = (self.loss.class_number, X.shape[1])
else:
self.classes_ = np.array([-1, 1])
w_0_shape = (X.shape[1], )
if isinstance(w_0, str):
if w_0 == 'zeros':
w_0 = np.zeros(w_0_shape)
elif w_0 == 'random':
w_0 = np.random.randn(*w_0_shape) / 100
elif not isinstance(w_0, np.ndarray):
raise TypeError('w_0 is incorrect')
self.w = w = w_0
f_prev = self.loss.func(X, y, w)
if trace:
history = {'func': [f_prev], 'time': [0], 'weights_diff': [0.], 'epoch_num': [0.]}
time_prev = time.time()
prev_epoch_num = 0
if x_test is not None:
history['score'] = [0]
num_obj_watched = 0 # количество просмотренных объектов
N = X.shape[0]
ind = N
for k in range(1, self.max_iter + 1):
# если осталось меньше batch_size / 2 непросмотренныхобъектов в выборке
if ind >= N - self.batch_size / 2:
perm = np.random.permutation(N)
ind = 0
idxs = perm[ind:ind + self.batch_size]
num_obj_watched += idxs.shape[0]
ind += self.batch_size
grad = self.loss.grad(X[idxs], y[idxs], w)
eta = self.alpha / k ** self.beta
w -= eta * grad
f_cur = self.loss.func(X, y, w)
if trace:
cur_epcoh_num = num_obj_watched / N
if cur_epcoh_num - prev_epoch_num > log_freq:
history['func'].append(f_cur)
history['time'].append(time.time() - time_prev)
history['epoch_num'].append(cur_epcoh_num)
history['weights_diff'].append(
|
np.linalg.norm(eta * grad)
|
numpy.linalg.norm
|
# -*- coding: utf-8 -*-
"""Test for the csdm object
1) generate csdm object.
2) split multiple dependent variables to individual objects.
3) add, sub, iadd, radd, isub, rsub, mul, imul, for scalar and ScalarQuantity.
4) rmul, truediv, itruediv, rtruediv, pow, ipow for scalar and ScalarQuantity.
5) min, max, clip, real, imag, conj, round, angle functions.
"""
import json
import numpy as np
import pytest
import csdmpy as cp
def get_test(type):
out = np.random.rand(10).astype(type)
a_test = cp.new()
a_test.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
a_test.add_dependent_variable(
{"type": "internal", "quantity_type": "scalar", "unit": "m", "components": out}
)
return out, a_test
def get_test_2d(type):
out = np.random.rand(50).astype(type).reshape(10, 5)
a_test = cp.new()
a_test.dimensions.append(cp.LinearDimension(count=5, increment="1s"))
a_test.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
a_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "m",
"components": out.ravel(),
}
)
return out, a_test
def test_csdm():
data = cp.new(description="This is a test")
assert data != "sd"
assert data.size == 1
# read_only
assert data.read_only is False
data.read_only = True
assert data.read_only is True
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.read_only = "True"
# tags
assert data.tags == []
data.tags = ["1", "2", "3"]
assert data.tags == ["1", "2", "3"]
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.tags = "23"
# version
assert data.version == cp.csdm.CSDM.__latest_CSDM_version__
# geographic_coordinate
assert data.geographic_coordinate == {}
error = "can't set attribute"
with pytest.raises(AttributeError, match=".*{0}.*".format(error)):
data.geographic_coordinate = {}
# description
assert data.description == "This is a test"
data.description = "Enough with the tests"
assert data.description == "Enough with the tests"
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.description = {}
# application
assert data.application == {}
data.application = {"csdmpy": "Some day"}
assert data.application == {"csdmpy": "Some day"}
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.application = "Some other day"
# filename
assert data.filename == ""
# data_structure
structure = {
"csdm": {
"version": "1.0",
"read_only": True,
"tags": ["1", "2", "3"],
"description": "Enough with the tests",
"application": {"csdmpy": "Some day"},
}
}
assert data.data_structure == str(
json.dumps(structure, ensure_ascii=False, sort_keys=False, indent=2)
)
assert data.dict() == structure
# equality check
dm = data.copy()
assert dm == data
assert dm.shape == ()
dm.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
assert dm != data
def test_split():
a = cp.new()
a.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
a.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 1, "quantity_type": "scalar"}
)
b = cp.new()
b.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
b.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 2, "quantity_type": "scalar"}
)
c = cp.new()
c.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
c.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 1, "quantity_type": "scalar"}
)
c.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 2, "quantity_type": "scalar"}
)
a_, b_ = c.split()
assert a_ == a
assert b_ == b
a_test = cp.new()
a_test.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
a_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "m",
"components": np.arange(10),
}
)
a1_test = cp.new()
a1_test.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
a1_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "m",
"components": np.arange(10),
}
)
b_test = cp.new()
b_test.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
b_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "km",
"components": np.arange(10, dtype=float),
}
)
b1_test = cp.new()
b1_test.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
b1_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "km",
"components": np.arange(10),
}
)
b1_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "vector_2",
"unit": "km",
"components": np.arange(20),
}
)
def test_add_sub():
# add
c = a_test + b_test
out = np.arange(10) + 1000 * np.arange(10)
assert np.allclose(c.dependent_variables[0].components, [out])
# sub
c = a_test - b_test
out = np.arange(10) - 1000 * np.arange(10)
assert np.allclose(c.dependent_variables[0].components, [out])
c = b_test - a_test
out = np.arange(10) - 1 / 1000 * np.arange(10)
assert np.allclose(c.dependent_variables[0].components, [out])
# add scalar
c = a_test + cp.ScalarQuantity("2m")
out = np.arange(10) + 2
assert np.allclose(c.dependent_variables[0].components, [out])
c = cp.ScalarQuantity("2m") + a_test
assert np.allclose(c.dependent_variables[0].components, [out])
a_t = cp.as_csdm(np.arange(10))
c = a_t + 5.12
out = np.arange(10) + 5.12
assert np.allclose(c.dependent_variables[0].components, [out])
c = 5.12 + a_t
assert np.allclose(c.dependent_variables[0].components, [out])
# add complex
c = a_t + 5.12 - 4j
out = np.arange(10) + 5.12 - 4j
assert np.allclose(c.dependent_variables[0].components, [out])
c = 5.12 - 4j + a_t
assert np.allclose(c.dependent_variables[0].components, [out])
c = a_test / cp.ScalarQuantity("m") + 3 + 4j
out = np.arange(10) + 3 + 4j
assert np.allclose(c.dependent_variables[0].components, [out])
# subtract scalar
c = a_test - cp.ScalarQuantity("2m")
out = np.arange(10) - 2
assert np.allclose(c.dependent_variables[0].components, [out])
c = cp.ScalarQuantity("2m") - a_test
assert np.allclose(c.dependent_variables[0].components, [-out])
error = r"Cannot operate on CSDM objects with different dimensions."
with pytest.raises(Exception, match=".*{0}.*".format(error)):
c = a1_test + b_test
error = r"Cannot operate on CSDM objects with differnet lengths of dependent"
with pytest.raises(Exception, match=".*{0}.*".format(error)):
c = a_test + b1_test
def test_iadd_isub():
c = a_test.astype("float32")
c += cp.ScalarQuantity("5.0cm")
out = np.arange(10) + 0.05
assert np.allclose(c.dependent_variables[0].components, [out])
c = a_test.astype("float32")
c -= cp.ScalarQuantity("5.0cm")
out = np.arange(10) - 0.05
assert np.allclose(c.dependent_variables[0].components, [out])
c = a_test.astype("float32") / cp.ScalarQuantity("cm")
c -= 0.05
out = np.arange(10) - 0.05
assert np.allclose(c.dependent_variables[0].components, [out])
out, a_test_ = get_test(float) # in units of m
a_test_ += b_test
out += 1000 *
|
np.arange(10)
|
numpy.arange
|
import numpy as np
import matplotlib.pyplot as plt
from itertools import compress
from scipy import interpolate
from stl import mesh
import OpenFOAM
def create_panel(p1, p2, N, k, kp, kn, X, Y, kN, kS, S, Solid, D, end):
dx = (p2[0] - p1[0]) / N
dy = (p2[1] - p1[1]) / N
D = interpolate.interp1d([0, N], D, 'linear')
kS.append(kp(D(0)))
S.append(0)
for i in range(N):
X.append(p1[0] + i * dx)
Y.append(p1[1] + i * dy)
kN.append(k(D(i)))
if i > 0:
kS.append(k(D(i)))
S.append(Solid)
class Parachute:
def __init__(self):
self.X = []
self.Y = []
self.KN = []
self.KS = []
self.S = []
self.LNO = []
self.LS0 = []
self.p = []
def create_parachute(self, Dd, Dv, Hg, Hb, Ls, Ns, Nb, Ng, Nd, Nv, Ed, Er, Es, Nsus, dp):
theta = np.arcsin(Dd / 2 / Ls)
pointsX = [-Ls * np.cos(theta), 0, Hb, Hb + Hg, Hb + Hg, Hb + Hg, Hb + Hg, Hb, 0, -Ls * np.cos(theta)]
pointsY = [0, Dd / 2, Dd / 2, Dd / 2, Dv / 2, -Dv / 2, -Dd / 2, -Dd / 2, -Dd / 2, 0]
k_sus = interpolate.interp1d([Dv - 0.01, Dd + 0.01], [Ns / Ls * (Es * Nsus * 4 / np.pi / Dd), Ns / Ls * (Es * Nsus * 4 / np.pi / Dd)], 'linear')
k_c = interpolate.interp1d([Dv - 0.01, Dd + 0.01], [Nd * 2 / (Dd - Dv) * (1 / np.pi / Dd * (np.pi * Dv * Ed + Nsus * (Er + Es))), Nd * 2 / (Dd - Dv) * (1 / np.pi / Dd * (np.pi * Dd * Ed + Nsus * (Er + Es)))], 'linear')
k_v = interpolate.interp1d([Dv - 0.01, Dd + 0.01], [Nv / Dv * ((Es + Er) * Nsus * 4 / np.pi / Dd), Nv / Dv * ((Es + Er) * Nsus * 4 / np.pi / Dd)], 'linear')
k_g = interpolate.interp1d([Dv - 0.01, Dd + 0.01], [Ng / Hg * (Es * Nsus * 4 / np.pi / Dd), Ng / Hg * (Es * Nsus * 4 / np.pi / Dd)], 'linear')
k_b = interpolate.interp1d([Dv - 0.01, Dd + 0.01], [Nb * 2 / Hb * (1 / np.pi / Dd * (np.pi * Dv * Ed + Nsus * (Er + Es))), Nb * 2 / Hb * (1 / np.pi / Dd * (np.pi * Dd * Ed + Nsus * (Er + Es)))], 'linear')
k = [k_sus, k_b, k_g, k_c, k_v, k_c,
k_g, k_b, k_sus]
X = []
Y = []
LN0 = []
LS0 = []
KN = []
KS = []
S = []
create_panel([pointsX[0], pointsY[0]], [pointsX[1], pointsY[1]], Ns, k[0], k[-1], k[1], X, Y, KN, KS, S, 0, [Dd, Dd], end=False)
create_panel([pointsX[1], pointsY[1]], [pointsX[2], pointsY[2]], Nb, k[1], k[0], k[2], X, Y, KN, KS, S, 1, [Dd, Dd], end=False)
create_panel([pointsX[2], pointsY[2]], [pointsX[3], pointsY[3]], Ng, k[2], k[1], k[3], X, Y, KN, KS, S, 0, [Dd, Dd], end=False)
create_panel([pointsX[3], pointsY[3]], [pointsX[4], pointsY[4]], Nd, k[3], k[2], k[4], X, Y, KN, KS, S, 1, [Dd, Dv], end=False)
create_panel([pointsX[4], pointsY[4]], [pointsX[5], pointsY[5]], Nv, k[4], k[3], k[5], X, Y, KN, KS, S, 0, [Dd, Dd], end=False)
create_panel([pointsX[5], pointsY[5]], [pointsX[6], pointsY[6]], Nd, k[5], k[4], k[6], X, Y, KN, KS, S, 1, [Dd, Dv], end=False)
create_panel([pointsX[6], pointsY[6]], [pointsX[7], pointsY[7]], Ng, k[6], k[5], k[7], X, Y, KN, KS, S, 0, [Dd, Dd], end=False)
create_panel([pointsX[7], pointsY[7]], [pointsX[8], pointsY[8]], Nb, k[7], k[6], k[8], X, Y, KN, KS, S, 1, [Dd, Dd], end=False)
create_panel([pointsX[8], pointsY[8]], [pointsX[9], pointsY[9]], Ns, k[8], k[7], k[0], X, Y, KN, KS, S, 0, [Dd, Dd], end=True)
for ind in range(len(X)):
i = ind % (len(X))
ip = (ind + 1) % (len(X))
im = (ind - 1) % (len(X))
dxS = X[i] - X[im]
dyS = Y[i] - Y[im]
dxN = X[i] - X[ip]
dyN = Y[i] - Y[ip]
LN0.append((dxN ** 2 + dyN ** 2) ** 0.5)
LS0.append((dxS ** 2 + dyS ** 2) ** 0.5)
self.X = X
self.Y = Y
self.LN0 = LN0
self.LS0 = LS0
self.S = S
self.KN = KN
self.KS = KS
self.dp = np.ones(len(X)) * dp
def compute_elastic_force(self):
X = np.array(self.X)
Y = np.array(self.Y)
LN0 = np.array(self.LN0)
LS0 = np.array(self.LS0)
KN = np.array(self.KN)
KS = np.array(self.KS)
LXN = X - np.roll(X, -1, 0)
LXS = X - np.roll(X, 1, 0)
LYN = Y - np.roll(Y, -1, 0)
LYS = Y - np.roll(Y, 1, 0)
LN = (LXN ** 2 + LYN ** 2) ** 0.5
LS = (LXS ** 2 + LYS ** 2) ** 0.5
dLN = LN - LN0
dLS = LS - LS0
FkNX = KN * dLN * LXN / LN
FkNY = KN * dLN * LYN / LN
FkSX = KS * dLS * LXS / LS
FkSY = KS * dLS * LYS / LS
FkN = np.array([FkNX, FkNY])
FkS = np.array([FkSX, FkSY])
return FkN, FkS
def simulate(self, kr, dp, Nt, T):
X = np.array(self.X)
Y = np.array(self.Y)
S = np.array(self.S, bool)
Vx = np.zeros(len(X))
Vy = np.zeros(len(Y))
dt = T / Nt
density = 10
C = 5
for i in range(Nt):
X1 = 0.5 * (X + np.roll(X, 1, 0))
X2 = 0.5 * (X + np.roll(X, -1, 0))
Y1 = 0.5 * (Y + np.roll(Y, 1, 0))
Y2 = 0.5 * (Y + np.roll(Y, -1, 0))
FkN, FkS = self.compute_elastic_force()
FkN[:, 0] = FkN[:, -1] = 0
FkS[:, 0] = FkS[:, -1] = 0
sumFX = np.zeros(len(X))
sumFY = np.zeros(len(X))
sumFX -= FkN[0]
sumFX -= FkS[0]
sumFY -= FkN[1]
sumFY -= FkS[1]
dX = X2 - X1
dY = Y2 - Y1
mod_n = (dX ** 2 + dY ** 2) ** 0.5
n = np.array([-dY / mod_n, dX / mod_n])
Surf = mod_n
# sumFX[S] += n[0][S] * dp * Surf[S]
# sumFY[S] += n[1][S] * dp * Surf[S]
sumFX[S] += n[0][S] * np.array(self.dp)[S] * Surf[S]
sumFY[S] += n[1][S] * np.array(self.dp)[S] * Surf[S]
sumFX -= C * Vx
sumFY -= C * Vy
sumFX[0] = sumFX[-1] = 0
sumFY[0] = sumFY[-1] = 0
Mass = density * Surf
Vx += sumFX / Mass * dt
Vy += sumFY / Mass * dt
X += Vx * dt
Y += Vy * dt
self.X = X
self.Y = Y
if i % 50000 == 0:
self.plotStrain()
Xdisp = list(X.copy())
Xdisp.append(X[0])
Ydisp = list(Y.copy())
Ydisp.append(Y[0])
notS = [not elem for elem in S]
# x = np.linspace(self.Lx, self.Rx, self.dimensions[1])
# y = np.linspace(self.Ly, self.Ry, self.dimensions[0])
# plt.imshow(np.flip(self.p(x, y).T, 0), extent=[self.Lx, self.Rx, self.Ly, self.Ry])
# plt.colorbar(orientation='horizontal')
plt.plot(Xdisp, Ydisp, linewidth=1)
plt.scatter(list(compress(X, S)), list(compress(Y, S)), marker="o", s=5)
plt.scatter(list(compress(X, notS)), list(compress(Y, notS)), marker="o", s=5)
plt.gca().set_aspect('equal')
#plt.grid()
plt.show()
print("Computing iteration", i, "!")
titles = ["X", "Y", "S"]
matrix = np.array([X, Y, S]).T
self.saveCSV("parachute", titles, matrix, True)
def saveSTL(self, thickness):
X = self.X
Y = self.Y
S = self.S
vertices = []
indices = []
for i in range(len(X)):
if S[i]:
vertices.append([X[i], Y[i], 0])
indices.append(i)
for i in range(len(X)):
if S[i]: vertices.append([X[i], Y[i], thickness])
offset = int(0.5 * (len(vertices)))
vertices = np.array(vertices)
faces = []
ind = 0
for i in range(offset - 1):
t11 = i
t12 = i + 1
t13 = i + offset
t21 = i + 1
t22 = i + offset
t23 = i + 1 + offset
if S[indices[i]] == True and S[indices[i] + 1] == True:
faces.append([t11, t12, t13])
faces.append([t21, t22, t23])
faces = np.array(faces)
parachute = mesh.Mesh(np.zeros(faces.shape[0], dtype=mesh.Mesh.dtype))
for i, f in enumerate(faces):
for j in range(3):
parachute.vectors[i][j] = vertices[f[j], :]
parachute.save('parachute.stl')
for i in range(offset - 1):
if S[indices[i]] == True and S[indices[i] + 1] == True:
faces = []
vertices_p = [vertices[i], vertices[i+1], vertices[i + offset], vertices[i + offset + 1]]
vertices_p = np.array(vertices_p)
t11 = 0
t12 = 1
t13 = 2
t21 = 1
t22 = 2
t23 = 3
faces.append([t11, t12, t13])
faces.append([t21, t22, t23])
faces = np.array(faces)
panel = mesh.Mesh(np.zeros(faces.shape[0], dtype=mesh.Mesh.dtype))
for k, f in enumerate(faces):
for j in range(3):
panel.vectors[k][j] = vertices_p[f[j], :]
panel.save('STL_Files/panel' + str(ind) + '.stl')
ind += 1
def importParachute(self, file):
data = np.genfromtxt(file, delimiter=",", skip_header=1).T
self.X = list(data[0])
self.Y = list(data[1])
self.S = list(data[2])
def saveCSV(self, name, Titles, Matrix, withTitles):
"""
This function saves arrays to a .csv file
:param name: (String) - the file name
:param Titles: (String[]) - the titles of the columns
:param Matrix: (float[][]) - the matrix of data
:param withTitles: (boolean) - True if titles should be saved
:return:
"""
if len(Titles) != len(Matrix[0]):
print("Columns don't match with titles!!")
else:
f = open(name + ".csv", 'w+')
if withTitles:
for i in range(len(Titles)):
if i < len(Titles) - 1:
f.write(Titles[i] + ',')
else:
f.write(Titles[i])
f.write('\n')
for i in range(len(Matrix)):
for j in range(len(Matrix[i])):
if j < len(Matrix[i]) - 1:
f.write(str(Matrix[i][j]) + ',')
else:
f.write(str(Matrix[i][j]) + '\n')
f.close()
def importPressure(self, file, dimensions, limits):
data = np.genfromtxt(file, skip_header=23, skip_footer=23)
p = data.reshape((dimensions[1], dimensions[0]))
Lx = limits[0][0]
Rx = limits[0][1]
Ly = limits[1][0]
Ry = limits[1][1]
x = np.linspace(Lx, Rx, dimensions[0])
y = np.linspace(Ly, Ry, dimensions[1])
distance = 0.05
self.p = interpolate.RectBivariateSpline(x, y, p.T)
x =
|
np.linspace(Lx, Rx, dimensions[0])
|
numpy.linspace
|
"""
Defines objective-function objects
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import itertools as _itertools
import sys as _sys
import time as _time
import pathlib as _pathlib
import numpy as _np
from pygsti import tools as _tools
from pygsti.layouts.distlayout import DistributableCOPALayout as _DistributableCOPALayout
from pygsti.tools import slicetools as _slct, mpitools as _mpit, sharedmemtools as _smt
from pygsti.circuits.circuitlist import CircuitList as _CircuitList
from pygsti.baseobjs.resourceallocation import ResourceAllocation as _ResourceAllocation
from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
def _objfn(objfn_cls, model, dataset, circuits=None,
regularization=None, penalties=None, op_label_aliases=None,
comm=None, mem_limit=None, method_names=None, array_types=None,
mdc_store=None, verbosity=0, **addl_args):
"""
A convenience function for creating an objective function.
Takes a number of common parameters and automates the creation of
intermediate objects like a :class:`ResourceAllocation` and
:class:`CircuitList`.
Parameters
----------
objfn_cls : class
The :class:`MDCObjectiveFunction`-derived class to create.
model : Model
The model.
dataset : DataSet
The data.
circuits : list, optional
The circuits.
regularization : dict, optional
A dictionary of regularization values.
penalties : dict, optional
A dictionary of penalty values.
op_label_aliases : dict, optional
An alias dictionary.
comm : mpi4py.MPI.Comm, optional
For splitting load among processors.
mem_limit : int, optional
Rough memory limit in bytes.
method_names : tuple
A tuple of the method names of the returned objective function
that will be called (used to estimate memory and setup resource division)
array_types : tuple
A tuple of array types that will be allocated, in addition to those contained in
the returned objective functon itself and within the methods given by `method_names`.
mdc_store : ModelDatasetCircuitsStore, optional
An object that bundles cached quantities along with a given model, dataset, and circuit
list. If given, `model` and `dataset` and `circuits` should be set to None.
verbosity : int or VerbosityPrinter, optional
Amount of information printed to stdout.
Returns
-------
ObjectiveFunction
"""
if mdc_store is None:
if circuits is None:
circuits = list(dataset.keys())
if op_label_aliases:
circuits = _CircuitList(circuits, op_label_aliases)
resource_alloc = _ResourceAllocation(comm, mem_limit)
ofn = objfn_cls.create_from(model, dataset, circuits, regularization, penalties,
resource_alloc, verbosity=verbosity,
method_names=method_names if (method_names is not None) else ('fn',),
array_types=array_types if (array_types is not None) else (),
**addl_args)
else:
#Create directly from store object, which contains everything else
assert(model is None and dataset is None and circuits is None and comm is None and mem_limit is None)
# Note: allow method_names and array_types to be non-None and still work with mdc_store, since
# the way this function is used in chi2fns.py and likelihoodfns.py hard-codes these values.
ofn = objfn_cls(mdc_store, regularization, penalties, verbosity=0, **addl_args)
return ofn
#def __len__(self):
# return len(self.circuits)
class ObjectiveFunctionBuilder(_NicelySerializable):
"""
A factory class for building objective functions.
This is useful because often times the user will want to
specify some but not all of the information needed to create
an actual objective function object. Namely, regularization
and penalty values are known ahead of time, while the model,
dataset, and circuits are supplied later, internally, when
running a protocol.
Parameters
----------
cls_to_build : class
The :class:`MDCObjectiveFunction`-derived objective function class to build.
name : str, optional
A name for the built objective function (can be anything).
description : str, optional
A description for the built objective function (can be anything)
regularization : dict, optional
Regularization values (allowed keys depend on `cls_to_build`).
penalties : dict, optional
Penalty values (allowed keys depend on `cls_to_build`).
"""
@classmethod
def cast(cls, obj):
"""
Cast `obj` to an `ObjectiveFunctionBuilder` instance.
If `obj` is already an `ObjectiveFunctionBuilder` instance, it is simply returned.
Otherwise a new `ObjectiveFunctionBuilder` instance is created from `obj` if possible.
Parameters
----------
obj : None or str or dict or list or tuple or ObjectiveFunctionBuilder
Object to cast.
Returns
-------
ObjectiveFunctionBuilder
"""
if isinstance(obj, cls): return obj
elif obj is None: return cls.create_from()
elif isinstance(obj, str): return cls.create_from(objective=obj)
elif isinstance(obj, dict): return cls.create_from(**obj)
elif isinstance(obj, (list, tuple)): return cls(*obj)
else: raise ValueError("Cannot create an %s object from '%s'" % (cls.__name__, str(type(obj))))
@classmethod
def create_from(cls, objective='logl', freq_weighted_chi2=False):
"""
Creates common :class:`ObjectiveFunctionBuilder`s from a few arguments.
Parameters
----------
objective : {'logl', 'chi2'}, optional
The objective function type: log-likelihood or chi-squared.
freq_weighted_chi2 : bool, optional
Whether to use 1/frequency values as the weights in the `"chi2"` case.
Returns
-------
ObjectiveFunctionBuilder
"""
if objective == "chi2":
if freq_weighted_chi2:
builder = FreqWeightedChi2Function.builder(
name='fwchi2',
description="Freq-weighted sum of Chi^2",
regularization={'min_freq_clip_for_weighting': 1e-4})
else:
builder = Chi2Function.builder(
name='chi2',
description="Sum of Chi^2",
regularization={'min_prob_clip_for_weighting': 1e-4})
elif objective == "logl":
builder = PoissonPicDeltaLogLFunction.builder(
name='dlogl',
description="2*Delta(log(L))",
regularization={'min_prob_clip': 1e-4,
'radius': 1e-4},
penalties={'cptp_penalty_factor': 0,
'spam_penalty_factor': 0})
elif objective == "tvd":
builder = TVDFunction.builder(
name='tvd',
description="Total Variational Distance (TVD)")
else:
raise ValueError("Invalid objective: %s" % objective)
assert(isinstance(builder, cls)), "This function should always return an ObjectiveFunctionBuilder!"
return builder
def __init__(self, cls_to_build, name=None, description=None, regularization=None, penalties=None, **kwargs):
self.name = name if (name is not None) else cls_to_build.__name__
self.description = description if (description is not None) else "_objfn" # "Sum of Chi^2" OR "2*Delta(log(L))"
self.cls_to_build = cls_to_build
self.regularization = regularization
self.penalties = penalties
self.additional_args = kwargs
def _to_nice_serialization(self):
state = super()._to_nice_serialization()
state.update({'name': self.name,
'description': self.description,
'class_to_build': self.cls_to_build.__module__ + '.' + self.cls_to_build.__name__,
'regularization': self.regularization,
'penalties': self.penalties,
'additional_arguments': self.additional_args,
})
return state
@classmethod
def _from_nice_serialization(cls, state):
from pygsti.io.metadir import _class_for_name
return cls(_class_for_name(state['class_to_build']), state['name'], state['description'],
state['regularization'], state['penalties'], *state['additional_arguments'])
def compute_array_types(self, method_names, forwardsim):
return self.cls_to_build.compute_array_types(method_names, forwardsim)
def build(self, model, dataset, circuits, resource_alloc=None, verbosity=0):
"""
Build an objective function. This is the workhorse method of an :class:`ObjectiveFunctionBuilder`.
Arguments are the additional information needed to construct a
:class:`MDCObjectiveFunction` object, beyond what is stored in
this builder object.
Parameters
----------
model : Model
The model.
dataset : DataSet.
The data set.
circuits : list
The circuits.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for objective
function computations.
verbosity : int, optional
Level of detail to print to stdout.
Returns
-------
MDCObjectiveFunction
"""
return self.cls_to_build.create_from(model=model, dataset=dataset, circuits=circuits,
resource_alloc=resource_alloc, verbosity=verbosity,
regularization=self.regularization, penalties=self.penalties,
name=self.name, description=self.description, **self.additional_args)
def build_from_store(self, mdc_store, verbosity=0):
"""
Build an objective function. This is a workhorse method of an :class:`ObjectiveFunctionBuilder`.
Takes a single "store" argument (apart from `verbosity`) that encapsulates all the remaining
ingredients needed to build a :class:`MDCObjectiveFunction` object (beyond what is stored in
this builder object).
Parameters
----------
mdc_store : ModelDatasetCircuitsStore
The store object, which doubles as a cache for reused information.
verbosity : int, optional
Level of detail to print to stdout.
Returns
-------
MDCObjectiveFunction
"""
return self.cls_to_build(mdc_store, verbosity=verbosity,
regularization=self.regularization, penalties=self.penalties,
name=self.name, description=self.description, **self.additional_args)
class ObjectiveFunction(object):
"""
So far, this is just a base class for organizational purposes
"""
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
For instance, if the objective function is DeltaLogL then this function would
multiply `objective_function_value` by 2, whereas in the case of a chi-squared
objective function this function just return `objective_function_value`.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
raise ValueError("This objective function does not have chi2_k distributed values!")
class RawObjectiveFunction(ObjectiveFunction):
"""
An objective function that acts on probabilities and counts directly.
Every :class:`RawObjectiveFunction` is assumed to perform a "local" function
element-wise on the vectors of probabilities, counts (usually for a single outcome),
and total-counts (usually for all the outcomes in a group), and sum the results
to arrive at the final objective function's value.
That is, the function must be of the form:
`objective_function = sum_i local_function(probability_i, counts_i, total_counts_i)`.
Each element of this sum (`local_function(probability_i, counts_i, total_counts_i)`)
is called a *term* of the objective function. A vector contains the square-roots
of the terms is referred to as the *least-squares vector* (since least-squares
optimizers use this vector as their objective function) and is abbreviated "lsvec".
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
"""
def __init__(self, regularization=None, resource_alloc=None, name=None, description=None, verbosity=0):
"""
Create a raw objective function.
A raw objective function acts on "raw" probabilities and counts,
and is usually a statistic comparing the probabilities to count data.
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
"""
self.resource_alloc = _ResourceAllocation.cast(resource_alloc)
self.printer = _VerbosityPrinter.create_printer(verbosity, self.resource_alloc.comm)
self.name = name if (name is not None) else self.__class__.__name__
self.description = description if (description is not None) else "_objfn"
if regularization is None: regularization = {}
self.set_regularization(**regularization)
def set_regularization(self):
"""
Set regularization values.
"""
pass # no regularization parameters
def _intermediates(self, probs, counts, total_counts, freqs):
""" Intermediate values used by multiple functions (similar to a temporary cache) """
return () # no intermdiate values
def fn(self, probs, counts, total_counts, freqs):
"""
Evaluate the objective function.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
Returns
-------
float
"""
return _np.sum(self.terms(probs, counts, total_counts, freqs))
def jacobian(self, probs, counts, total_counts, freqs):
"""
Evaluate the derivative of the objective function with respect to the probabilities.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each argument, corresponding to
the derivative with respect to each element of `probs`.
"""
return self.dterms(probs, counts, total_counts, freqs) # same as dterms b/c only i-th term depends on p_i
def hessian(self, probs, counts, total_counts, freqs):
"""
Evaluate the Hessian of the objective function with respect to the probabilities.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each argument, corresponding to
the 2nd derivative with respect to each element of `probs`. Note that this
is not a 2D matrix because all off-diagonal elements of the Hessian are
zero (because only the i-th term depends on the i-th probability).
"""
return self.hterms(probs, counts, total_counts, freqs) # same as dterms b/c only i-th term depends on p_i
def terms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the terms of the objective function.
The "terms" are the per-(probability, count, total-count) values
that get summed together to result in the objective function value.
These are the "local" or "per-element" values of the objective function.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
return self.lsvec(probs, counts, total_counts, freqs, intermediates)**2
def lsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the least-squares vector of the objective function.
This is the square-root of the terms-vector returned from :method:`terms`.
This vector is the objective function value used by a least-squares
optimizer when optimizing this objective function. Note that the existence
of this quantity requires that the terms be non-negative. If this is not
the case, an error is raised.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
return _np.sqrt(self.terms(probs, counts, total_counts, freqs, intermediates))
def dterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise derivative (or, the diagonal of a jacobian matrix),
i.e. the resulting values are the derivatives of the `local_function` at
each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
return 2 * self.lsvec(probs, counts, total_counts, freqs, intermediates) \
* self.dlsvec(probs, counts, total_counts, freqs, intermediates)
def dlsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the least-squares vector of this objective function.
Note that because each `lsvec` element only depends on the corresponding probability,
this is just an element-wise derivative (or, the diagonal of a jacobian matrix),
i.e. the resulting values are the derivatives of the `local_function` at
each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# lsvec = sqrt(terms)
# dlsvec = 0.5/lsvec * dterms
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
lsvec = self.lsvec(probs, counts, total_counts, freqs, intermediates)
pt5_over_lsvec = _np.where(lsvec < 1e-100, 0.0, 0.5 / _np.maximum(lsvec, 1e-100)) # lsvec=0 is *min* w/0 deriv
dterms = self.dterms(probs, counts, total_counts, freqs, intermediates)
return pt5_over_lsvec * dterms
def dlsvec_and_lsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the least-squares vector together with the vector itself.
This is sometimes more computationally efficient than calling :method:`dlsvec` and
:method:`lsvec` separately, as the former call may require computing the latter.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
dlsvec: numpy.ndarray
A 1D array of length equal to that of each array argument.
lsvec: numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
#Similar to above, just return lsvec too
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
lsvec = self.lsvec(probs, counts, total_counts, freqs, intermediates)
dlsvec = self.dlsvec(probs, counts, total_counts, freqs, intermediates)
return dlsvec, lsvec
def hterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the 2nd derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of the `local_function` at each
(probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# terms = lsvec**2
# dterms/dp = 2*lsvec*dlsvec/dp
# d2terms/dp2 = 2*[ (dlsvec/dp)^2 + lsvec*d2lsvec/dp2 ]
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
return 2 * (self.dlsvec(probs, counts, total_counts, freqs, intermediates)**2
+ self.lsvec(probs, counts, total_counts, freqs, intermediates)
* self.hlsvec(probs, counts, total_counts, freqs, intermediates))
def hlsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the 2nd derivatives of the least-squares vector of this objective function.
Note that because each `lsvec` element only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of `sqrt(local_function)` at each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# lsvec = sqrt(terms)
# dlsvec/dp = 0.5 * terms^(-0.5) * dterms/dp
# d2lsvec/dp2 = -0.25 * terms^(-1.5) * (dterms/dp)^2 + 0.5 * terms^(-0.5) * d2terms_dp2
# = 0.5 / sqrt(terms) * (d2terms_dp2 - 0.5 * (dterms/dp)^2 / terms)
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
terms = self.terms(probs, counts, total_counts, freqs, intermediates)
dterms = self.dterms(probs, counts, total_counts, freqs, intermediates)
hterms = self.hterms(probs, counts, total_counts, freqs, intermediates)
return 0.5 / _np.sqrt(terms) * (hterms - 0.5 * dterms**2 / terms)
#Required zero-term methods for omitted probs support in model-based objective functions
def zero_freq_terms(self, total_counts, probs):
"""
Evaluate objective function terms with zero frequency (where count and frequency are zero).
Such terms are treated specially because, for some objective functions,
having zero frequency is a special case and must be handled differently.
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
raise NotImplementedError("Derived classes must implement this!")
def zero_freq_dterms(self, total_counts, probs):
"""
Evaluate the derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
raise NotImplementedError("Derived classes must implement this!")
def zero_freq_hterms(self, total_counts, probs):
"""
Evaluate the 2nd derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
2nd derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
raise NotImplementedError("Derived classes must implement this!")
class ModelDatasetCircuitsStore(object):
"""
Contains all the information that we'd like to persist when performing
(multiple) evaluations of the same circuits using the same model and
data set. For instance, the evaluation of mubltiple (different) objective
functions.
This class holds only quantities that do *not* depend on the contained
model's parameters. See :class:`EvaluatedObjectiveFunction` for a class (TODO??)
that holds the values of an objective function at a certain parameter-space
point.
"""
def __init__(self, model, dataset, circuits=None, resource_alloc=None, array_types=(),
precomp_layout=None, verbosity=0):
self.dataset = dataset
self.model = model
#self.opBasis = mdl.basis
self.resource_alloc = _ResourceAllocation.cast(resource_alloc)
# expand = ??? get from model based on fwdsim type?
circuit_list = circuits if (circuits is not None) else list(dataset.keys())
bulk_circuit_list = circuit_list if isinstance(
circuit_list, _CircuitList) else _CircuitList(circuit_list)
self.circuits = bulk_circuit_list
#The model's forward simulator gets to determine how the circuit outcome
# probabilities (and other results) are stored in arrays - this makes sense
# because it understands how to make this layout amenable to fast computation.
if precomp_layout is None:
self.layout = model.sim.create_layout(bulk_circuit_list, dataset, self.resource_alloc,
array_types, verbosity=verbosity) # a CircuitProbabilityArrayLayout
else:
self.layout = precomp_layout
self.array_types = array_types
if isinstance(self.layout, _DistributableCOPALayout): # then store global circuit liste separately
self.global_circuits = self.circuits
self.circuits = _CircuitList(self.layout.circuits, self.global_circuits.op_label_aliases,
self.global_circuits.circuit_weights, name=None)
else:
self.global_circuits = self.circuits
#self.circuits = bulk_circuit_list[:]
#self.circuit_weights = bulk_circuit_list.circuit_weights
self.ds_circuits = self.circuits.apply_aliases()
# computed by add_count_vectors
self.counts = None
self.total_counts = None
self.freqs = None
# computed by add_omitted_freqs
self.firsts = None
self.indicesOfCircuitsWithOmittedData = None
self.dprobs_omitted_rowsum = None
self.time_dependent = False # indicates whether the data should be treated as time-resolved
#if not self.cache.has_evaltree():
# subcalls = self.get_evaltree_subcalls()
# evt_resource_alloc = _ResourceAllocation(self.raw_objfn.comm, evt_mlim,
# self.raw_objfn.profiler, self.raw_objfn.distribute_method)
# self.cache.add_evaltree(self.mdl, self.dataset, bulk_circuit_list, evt_resource_alloc,
# subcalls, self.raw_objfn.printer - 1)
#self.eval_tree = self.cache.eval_tree
#self.lookup = self.cache.lookup
#self.outcomes_lookup = self.cache.outcomes_lookup
#self.wrt_block_size = self.cache.wrt_block_size
#self.wrt_block_size2 = self.cache.wrt_block_size2
#convenience attributes (could make properties?)
if isinstance(self.layout, _DistributableCOPALayout):
self.global_nelements = self.layout.global_num_elements
self.global_nparams = self.layout.global_num_params
self.global_nparams2 = self.layout.global_num_params2
self.host_nelements = self.layout.host_num_elements
self.host_nparams = self.layout.host_num_params
self.host_nparams2 = self.layout.host_num_params2
self.nelements = _slct.length(self.layout.host_element_slice) # just for *this* proc
self.nparams = _slct.length(self.layout.host_param_slice) \
if self.layout.host_param_slice else self.model.num_params
self.nparams2 = _slct.length(self.layout.host_param2_slice) \
if self.layout.host_param2_slice else self.model.num_params
assert(self.global_nparams is None or self.global_nparams == self.model.num_params)
else:
self.global_nelements = self.host_nelements = self.nelements = len(self.layout)
self.global_nparams = self.host_nparams = self.nparams = self.model.num_params
self.global_nparams2 = self.host_nparams2 = self.nparams2 = self.model.num_params
@property
def opBasis(self):
return self.model.basis
def num_data_params(self):
"""
The number of degrees of freedom in the data used by this objective function.
Returns
-------
int
"""
return self.dataset.degrees_of_freedom(self.ds_circuits,
aggregate_times=not self.time_dependent)
def add_omitted_freqs(self, printer=None, force=False):
"""
Detect omitted frequences (assumed to be 0) so we can compute objective fn correctly
"""
if self.firsts is None or force:
# FUTURE: add any tracked memory? self.resource_alloc.add_tracked_memory(...)
self.firsts = []; self.indicesOfCircuitsWithOmittedData = []
for i, c in enumerate(self.circuits):
indices = _slct.to_array(self.layout.indices_for_index(i))
lklen = _slct.length(self.layout.indices_for_index(i))
if 0 < lklen < self.model.compute_num_outcomes(c):
self.firsts.append(indices[0])
self.indicesOfCircuitsWithOmittedData.append(i)
if len(self.firsts) > 0:
self.firsts = _np.array(self.firsts, 'i')
self.indicesOfCircuitsWithOmittedData = _np.array(self.indicesOfCircuitsWithOmittedData, 'i')
self.dprobs_omitted_rowsum = _np.empty((len(self.firsts), self.nparams), 'd')
#if printer: printer.log("SPARSE DATA: %d of %d rows have sparse data" %
# (len(self.firsts), len(self.circuits)))
else:
self.firsts = None # no omitted probs
def add_count_vectors(self, force=False):
"""
Ensure this store contains count and total-count vectors.
"""
if self.counts is None or self.total_counts is None or force:
#Assume if an item is not None the appropriate amt of memory has already been tracked
if self.counts is None: self.resource_alloc.add_tracked_memory(self.nelements) # 'e'
if self.total_counts is None: self.resource_alloc.add_tracked_memory(self.nelements) # 'e'
if self.freqs is None: self.resource_alloc.add_tracked_memory(self.nelements) # 'e'
# Note: in distributed case self.layout only holds *local* quantities (e.g.
# the .ds_circuits are a subset of all the circuits and .nelements is the local
# number of elements).
counts = _np.empty(self.nelements, 'd')
totals = _np.empty(self.nelements, 'd')
for (i, circuit) in enumerate(self.ds_circuits):
cnts = self.dataset[circuit].counts
totals[self.layout.indices_for_index(i)] = sum(cnts.values()) # dataset[opStr].total
counts[self.layout.indices_for_index(i)] = [cnts.get(x, 0) for x in self.layout.outcomes_for_index(i)]
if self.circuits.circuit_weights is not None:
for i in range(len(self.ds_circuits)): # multiply N's by weights
counts[self.layout.indices_for_index(i)] *= self.circuits.circuit_weights[i]
totals[self.layout.indices_for_index(i)] *= self.circuits.circuit_weights[i]
self.counts = counts
self.total_counts = totals
self.freqs = counts / totals
class EvaluatedModelDatasetCircuitsStore(ModelDatasetCircuitsStore):
"""
Additionally holds quantities at a specific model-parameter-space point.
"""
def __init__(self, mdc_store, verbosity):
super().__init__(mdc_store.model, mdc_store.dataset, mdc_store.global_circuits, mdc_store.resource_alloc,
mdc_store.array_types, mdc_store.layout, verbosity)
# Memory check - see if there's enough memory to hold all the evaluated quantities
#persistent_mem = self.layout.memory_estimate()
#in_gb = 1.0 / 1024.0**3 # in gigabytes
#if self.raw_objfn.mem_limit is not None:
# in_gb = 1.0 / 1024.0**3 # in gigabytes
# cur_mem = _profiler._get_max_mem_usage(self.raw_objfn.comm) # is this what we want??
# if self.raw_objfn.mem_limit - cur_mem < persistent_mem:
# raise MemoryError("Memory limit ({}-{} GB) is < memory required to hold final results "
# "({} GB)".format(self.raw_objfn.mem_limit * in_gb, cur_mem * in_gb,
# persistent_mem * in_gb))
#
# self.gthrMem = int(0.1 * (self.raw_objfn.mem_limit - persistent_mem - cur_mem))
# evt_mlim = self.raw_objfn.mem_limit - persistent_mem - self.gthrMem - cur_mem
# self.raw_objfn.printer.log("Memory limit = %.2fGB" % (self.raw_objfn.mem_limit * in_gb))
# self.raw_objfn.printer.log("Cur, Persist, Gather = %.2f, %.2f, %.2f GB" %
# (cur_mem * in_gb, persistent_mem * in_gb, self.gthrMem * in_gb))
# assert(evt_mlim > 0), 'Not enough memory, exiting..'
#else:
# evt_mlim = None
#Note: don't add any tracked memory to self.resource_alloc, as none is used yet.
self.probs = None
self.dprobs = None
self.jac = None
self.v = None # for time dependence - rename to objfn_terms or objfn_lsvec?
class MDCObjectiveFunction(ObjectiveFunction, EvaluatedModelDatasetCircuitsStore):
"""
An objective function whose probabilities and counts are given by a Model and DataSet, respectively.
Instances of this class glue a model, dataset, and circuit list to a
"raw" objective function, resulting in an objective function that is a
function of model-parameters and contains counts based on a data set.
The model is treated as a function that computes probabilities (as a function of
the model's parameters) for each circuit outcome, and the data set as a function
that similarly computes counts (and total-counts).
Parameters
----------
raw_objfn : RawObjectiveFunction
The raw objective function - specifies how probability and count values
are turned into objective function values.
mdl : Model
The model - specifies how parameter values are turned into probabilities
for each circuit outcome.
dataset : DataSet
The data set - specifies how counts and total_counts are obtained for each
circuit outcome.
circuits : list or CircuitList
The circuit list - specifies what probabilities and counts this objective
function compares. If `None`, then the keys of `dataset` are used.
enable_hessian : bool, optional
Whether hessian calculations are allowed. If `True` then more resources are
needed. If `False`, calls to hessian-requiring function will result in an
error.
Attributes
----------
name : str
The name of this objective function.
description : str
A description of this objective function.
"""
@classmethod
def create_from(cls, raw_objfn, model, dataset, circuits, resource_alloc=None, verbosity=0, array_types=()):
mdc_store = ModelDatasetCircuitsStore(model, dataset, circuits, resource_alloc, array_types)
return cls(raw_objfn, mdc_store, verbosity)
@classmethod
def _array_types_for_method(cls, method_name, fsim):
if method_name == 'fn': return cls._array_types_for_method('terms', fsim)
if method_name == 'jacobian': return cls._array_types_for_method('dterms', fsim)
if method_name == 'terms': return cls._array_types_for_method('lsvec', fsim) + ('e',) # extra 'E' for **2
if method_name == 'dterms': return cls._array_types_for_method('dlsvec', fsim) + ('ep',)
if method_name == 'percircuit': return cls._array_types_for_method('terms', fsim) + ('c',)
if method_name == 'dpercircuit': return cls._array_types_for_method('dterms', fsim) + ('cp',)
return ()
def __init__(self, raw_objfn, mdc_store, verbosity=0):
"""
Create a new MDCObjectiveFunction.
mdc_store is thought to be a normal MDC store, but could also be an evaluated one,
in which case should we take enable_hessian from it?
"""
EvaluatedModelDatasetCircuitsStore.__init__(self, mdc_store, verbosity)
self.raw_objfn = raw_objfn
@property
def name(self):
"""
Name of this objective function.
"""
return self.raw_objfn.name
@property
def description(self):
"""
A description of this objective function.
"""
return self.raw_objfn.description
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
For instance, if the objective function is DeltaLogL then this function would
multiply `objective_function_value` by 2, whereas in the case of a chi-squared
objective function this function just return `objective_function_value`.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
return self.raw_objfn.chi2k_distributed_qty(objective_function_value)
def lsvec(self, paramvec=None, oob_check=False):
"""
Compute the least-squares vector of the objective function.
This is the square-root of the terms-vector returned from :method:`terms`.
This vector is the objective function value used by a least-squares
optimizer when optimizing this objective function. Note that the existence
of this quantity requires that the terms be non-negative. If this is not
the case, an error is raised.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
oob_check : bool, optional
Whether the objective function should raise an error if it is being
evaluated in an "out of bounds" region.
Returns
-------
numpy.ndarray
An array of shape `(nElements,)` where `nElements` is the number
of circuit outcomes.
"""
raise NotImplementedError("Derived classes should implement this!")
def dlsvec(self, paramvec=None):
"""
The derivative (jacobian) of the least-squares vector.
Derivatives are taken with respect to model parameters.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nElements,nParams)` where `nElements` is the number
of circuit outcomes and `nParams` is the number of model parameters.
"""
raise NotImplementedError("Derived classes should implement this!")
def terms(self, paramvec=None):
"""
Compute the terms of the objective function.
The "terms" are the per-circuit-outcome values that get summed together
to result in the objective function value.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nElements,)` where `nElements` is the number
of circuit outcomes.
"""
with self.resource_alloc.temporarily_track_memory(self.nelements): # 'e'
return self.lsvec(paramvec)**2
def dterms(self, paramvec=None):
"""
Compute the jacobian of the terms of the objective function.
The "terms" are the per-circuit-outcome values that get summed together
to result in the objective function value. Differentiation is with
respect to model parameters.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nElements,nParams)` where `nElements` is the number
of circuit outcomes and `nParams` is the number of model parameters.
"""
with self.resource_alloc.temporarily_track_memory(self.nelements * self.nparams): # 'ep'
lsvec = self.lsvec(paramvec) # least-squares objective fn: v is a vector s.t. obj_fn = ||v||^2 (L2 norm)
dlsvec = self.dlsvec(paramvec) # jacobian of dim N x M where N = len(v) and M = len(pv)
assert(dlsvec.shape == (len(lsvec), self.nparams)), "dlsvec returned a Jacobian with the wrong shape!"
return 2.0 * lsvec[:, None] * dlsvec # terms = lsvec**2, so dterms = 2*lsvec*dlsvec
def percircuit(self, paramvec=None):
"""
Compute the per-circuit contributions to this objective function.
These values collect (sum) together the contributions of
the outcomes of a single circuit.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nCircuits,)` where `nCircuits` is the number
of circuits (specified when this objective function was constructed).
"""
num_circuits = len(self.circuits)
with self.resource_alloc.temporarily_track_memory(num_circuits): # 'c'
terms = self.terms(paramvec)
#Aggregate over outcomes:
# obj_per_el[iElement] contains contributions per element - now aggregate over outcomes
# percircuit[iCircuit] will contain contributions for each original circuit (aggregated over outcomes)
percircuit = _np.empty(num_circuits, 'd')
for i in range(num_circuits):
percircuit[i] = _np.sum(terms[self.layout.indices_for_index(i)], axis=0)
return percircuit
def dpercircuit(self, paramvec=None):
"""
Compute the jacobian of the per-circuit contributions of this objective function.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nCircuits, nParams)` where `nCircuits` is the number
of circuits and `nParams` is the number of model parameters (the circuits
and model were specified when this objective function was constructed).
"""
num_circuits = len(self.circuits)
with self.resource_alloc.temporarily_track_memory(num_circuits * self.nparams): # 'cp'
dterms = self.dterms(paramvec)
#Aggregate over outcomes:
# obj_per_el[iElement] contains contributions per element - now aggregate over outcomes
# percircuit[iCircuit] will contain contributions for each original circuit (aggregated over outcomes)
dpercircuit = _np.empty((num_circuits, self.nparams), 'd')
for i in range(num_circuits):
dpercircuit[i] = _np.sum(dterms[self.layout.indices_for_index(i)], axis=0)
return dpercircuit
def lsvec_percircuit(self, paramvec=None):
"""
Compute the square root of per-circuit contributions to this objective function.
These values are primarily useful for interfacing with a least-squares
optimizer.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nCircuits,)` where `nCircuits` is the number
of circuits (specified when this objective function was constructed).
"""
return _np.sqrt(self.percircuit(paramvec))
def dlsvec_percircuit(self, paramvec=None):
"""
Compute the jacobian of the sqrt(per-circuit) values given by :method:`lsvec_percircuit`.
This jacobian is primarily useful for interfacing with a least-squares optimizer.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nCircuits, nParams)` where `nCircuits` is the number
of circuits and `nParams` is the number of model parameters (the circuits
and model were specified when this objective function was constructed).
"""
denom = self.lsvec_percircuit(paramvec)
denom = _np.clip(denom, 1e-10, None)
# Note: don't need paramvec here since above call sets it
return (0.5 / denom)[:, None] * self.dpercircuit()
def fn_local(self, paramvec=None):
"""
Evaluate the *local* value of this objective function.
When the objective function's layout is distributed, each processor only holds a
portion of the objective function terms, and this function returns only the
sum of these local terms. See :method:`fn` for the global objective function value.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
float
"""
return _np.sum(self.terms(paramvec))
def fn(self, paramvec=None):
"""
Evaluate the value of this objective function.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
float
"""
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
local = _np.array([self.fn_local(paramvec)], 'd')
unit_ralloc = self.layout.resource_alloc('atom-processing') # proc group that computes same els
self.resource_alloc.allreduce_sum(result, local, unit_ralloc)
global_fnval = result[0]
_smt.cleanup_shared_ndarray(result_shm)
return global_fnval
def jacobian(self, paramvec=None):
"""
Compute the Jacobian of this objective function.
Derivatives are takes with respect to model parameters.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nParams,)` where `nParams` is the number
of model parameters.
"""
return _np.sum(self.dterms(paramvec), axis=0)
def hessian(self, paramvec=None):
"""
Compute the Hessian of this objective function.
Derivatives are takes with respect to model parameters.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nParams, nParams)` where `nParams` is the number
of model parameters.
"""
raise NotImplementedError("Derived classes should implement this!")
def approximate_hessian(self, paramvec=None):
"""
Compute an approximate Hessian of this objective function.
This is typically much less expensive than :method:`hessian` and
does not require that `enable_hessian=True` was set upon initialization.
It computes an approximation to the Hessian that only utilizes the
information in the Jacobian. Derivatives are takes with respect to model
parameters.
Parameters
----------
paramvec : numpy.ndarray, optional
The vector of (model) parameters to evaluate the objective function at.
If `None`, then the model's current parameter vector is used (held internally).
Returns
-------
numpy.ndarray
An array of shape `(nParams, nParams)` where `nParams` is the number
of model parameters.
"""
raise NotImplementedError("Derived classes should implement this!")
#MOVED - but these versions have updated names
#def _persistent_memory_estimate(self, num_elements=None):
# # Estimate & check persistent memory (from allocs within objective function)
# """
# Compute the amount of memory needed to perform evaluations of this objective function.
#
# This number includes both intermediate and final results, and assumes
# that the types of evauations given by :method:`_evaltree_subcalls`
# are required.
#
# Parameters
# ----------
# num_elements : int, optional
# The number of elements (circuit outcomes) that will be computed.
#
# Returns
# -------
# int
# """
# if num_elements is None:
# nout = int(round(_np.sqrt(self.mdl.dim))) # estimate of avg number of outcomes per string
# nc = len(self.circuits)
# ne = nc * nout # estimate of the number of elements (e.g. probabilities, # LS terms, etc) to compute
# else:
# ne = num_elements
# np = self.mdl.num_params
#
# # "persistent" memory is that used to store the final results.
# obj_fn_mem = FLOATSIZE * ne
# jac_mem = FLOATSIZE * ne * np
# hess_mem = FLOATSIZE * ne * np**2
# persistent_mem = 4 * obj_fn_mem + jac_mem # 4 different objective-function sized arrays, 1 jacobian array?
# if any([nm == "bulk_fill_hprobs" for nm in self._evaltree_subcalls()]):
# persistent_mem += hess_mem # we need room for the hessian too!
# # TODO: what about "bulk_hprobs_by_block"?
#
# return persistent_mem
#
#def _evaltree_subcalls(self):
# """
# The types of calls that will be made to an evaluation tree.
#
# This information is used for memory estimation purposes.
#
# Returns
# -------
# list
# """
# calls = ["bulk_fill_probs", "bulk_fill_dprobs"]
# if self.enable_hessian: calls.append("bulk_fill_hprobs")
# return calls
#
#def num_data_params(self):
# """
# The number of degrees of freedom in the data used by this objective function.
#
# Returns
# -------
# int
# """
# return self.dataset.degrees_of_freedom(self.ds_circuits,
# aggregate_times=not self.time_dependent)
#def _precompute_omitted_freqs(self):
# """
# Detect omitted frequences (assumed to be 0) so we can compute objective fn correctly
# """
# self.firsts = []; self.indicesOfCircuitsWithOmittedData = []
# for i, c in enumerate(self.circuits):
# lklen = _slct.length(self.lookup[i])
# if 0 < lklen < self.mdl.compute_num_outcomes(c):
# self.firsts.append(_slct.to_array(self.lookup[i])[0])
# self.indicesOfCircuitsWithOmittedData.append(i)
# if len(self.firsts) > 0:
# self.firsts = _np.array(self.firsts, 'i')
# self.indicesOfCircuitsWithOmittedData = _np.array(self.indicesOfCircuitsWithOmittedData, 'i')
# self.dprobs_omitted_rowsum = _np.empty((len(self.firsts), self.nparams), 'd')
# self.raw_objfn.printer.log("SPARSE DATA: %d of %d rows have sparse data" %
# (len(self.firsts), len(self.circuits)))
# else:
# self.firsts = None # no omitted probs
#
#def _compute_count_vectors(self):
# """
# Ensure self.cache contains count and total-count vectors.
# """
# if not self.cache.has_count_vectors():
# self.cache.add_count_vectors(self.dataset, self.ds_circuits, self.circuit_weights)
# return self.cache.counts, self.cache.total_counts
def _construct_hessian(self, counts, total_counts, prob_clip_interval):
"""
Framework for constructing a hessian matrix row by row using a derived
class's `_hessian_from_hprobs` method. This function expects that this
objective function has been setup for hessian computation, and it's evaltree
may be split in order to facilitate this.
"""
#Note - we could in the future use comm to distribute over
# subtrees here. We currently don't because we parallelize
# over columns and it seems that in almost all cases of
# interest there will be more hessian columns than processors,
# so adding the additional ability to parallelize over
# subtrees would just add unnecessary complication.
#Note2: this function follows a similar pattern to DistributableForwardSimulator's
# _bulk_fill_hprobs method, which will also uses the layout's param_dimension_blk_sizes
# to divide up the computation of the Hessian of each circuit outcome probability
# individually.
blk_size1, blk_size2 = self.layout.param_dimension_blk_sizes
atom_resource_alloc = self.layout.resource_alloc('atom-processing')
#param_resource_alloc = self.layout.resource_alloc('param-processing')
param2_resource_alloc = self.layout.resource_alloc('param2-processing')
layout = self.layout
global_param_slice = layout.global_param_slice
global_param2_slice = layout.global_param2_slice
my_nparams1 = _slct.length(layout.host_param_slice) # the number of params this processor is supposed to
my_nparams2 = _slct.length(layout.host_param2_slice) # compute (a subset of those its host computes)
row_parts = int(round(my_nparams1 / blk_size1)) if (blk_size1 is not None) else 1
col_parts = int(round(my_nparams2 / blk_size2)) if (blk_size2 is not None) else 1
row_parts = max(row_parts, 1) # can't have 0 parts!
col_parts = max(col_parts, 1)
blocks1 = _mpit.slice_up_range(my_nparams1, row_parts)
blocks2 = _mpit.slice_up_range(my_nparams2, col_parts)
blocks1 = [_slct.shift(s, global_param_slice.start) for s in blocks1]
blocks2 = [_slct.shift(s, global_param2_slice.start) for s in blocks2]
slicetup_list = list(_itertools.product(blocks1, blocks2)) # *global* parameter indices
#TODO: see if we can reimplement this 2x speedup - with layout-assigned portions of H the below code won't work
##cull out lower triangle blocks, which have no overlap with
## the upper triangle of the hessian
#slicetup_list = [(slc1, slc2) for slc1, slc2 in slicetup_list_all
# if slc1.start <= slc2.stop] # these are the local "blocks" for this proc
#UPDATE: use shared memory, so allocate within loop b/c need different shared memory chunks
# when different processors on same node are give different atoms.
# Allocate memory (alloc max required & take views)
# max_nelements = max([self.layout.atoms[i].num_elements for i in my_atom_indices])
# probs_mem = _np.empty(max_nelements, 'd')
rank = self.resource_alloc.comm_rank
sub_rank = atom_resource_alloc.comm_rank
with self.resource_alloc.temporarily_track_memory(my_nparams1 * my_nparams2): # (atom_hessian)
# Each atom-processor (atom_resource_alloc) contains processors assigned to *disjoint*
# sections of the Hessian, so these processors can all act simultaneously to fill out a
# full-size `atom_hessian`. Then we'll need to add together the contributions from
# different atom processors at the end.
atom_hessian = _np.zeros((my_nparams1, my_nparams2), 'd')
tm = _time.time()
#Loop over atoms
for iAtom, atom in enumerate(layout.atoms): # iterates over *local* atoms
atom_nelements = atom.num_elements
if self.raw_objfn.printer.verbosity > 3 or (self.raw_objfn.printer.verbosity == 3 and sub_rank == 0):
print("rank %d: %gs: beginning atom %d/%d, atom-size (#circuits) = %d"
% (rank, _time.time() - tm, iAtom + 1, len(layout.atoms), atom_nelements))
_sys.stdout.flush()
# Create views into pre-allocated memory
probs = _np.empty(atom_nelements, 'd') # Note: this doesn't need to be shared as we never gather it
# Take portions of count arrays for this subtree
atom_counts = counts[atom.element_slice]
atom_total_counts = total_counts[atom.element_slice]
freqs = atom_counts / atom_total_counts
assert(len(atom_counts) == len(probs))
#compute probs separately
self.model.sim._bulk_fill_probs_atom(probs, atom, atom_resource_alloc) # need to reach into internals!
if prob_clip_interval is not None:
_np.clip(probs, prob_clip_interval[0], prob_clip_interval[1], out=probs)
k, kmax = 0, len(slicetup_list)
blk_rank = param2_resource_alloc.comm_rank
for (slice1, slice2, hprobs, dprobs12) in self.model.sim._iter_atom_hprobs_by_rectangle(
atom, slicetup_list, True, param2_resource_alloc):
local_slice1 = _slct.shift(slice1, -global_param_slice.start) # indices into atom_hessian
local_slice2 = _slct.shift(slice2, -global_param2_slice.start) # indices into atom_hessian
if self.raw_objfn.printer.verbosity > 3 or \
(self.raw_objfn.printer.verbosity == 3 and blk_rank == 0):
print("rank %d: %gs: block %d/%d, atom %d/%d, atom-size (#circuits) = %d"
% (self.resource_alloc.comm_rank, _time.time() - tm, k + 1, kmax, iAtom + 1,
len(layout.atoms), atom.num_elements))
_sys.stdout.flush(); k += 1
hessian_blk = self._hessian_from_block(hprobs, dprobs12, probs, atom_counts,
atom_total_counts, freqs, param2_resource_alloc)
#NOTE: _hessian_from_hprobs MAY modify hprobs and dprobs12
#NOTE2: we don't account for memory within _hessian_from_block - maybe we should?
atom_hessian[local_slice1, local_slice2] += hessian_blk
#This shouldn't be necessary:
##copy upper triangle to lower triangle (we only compute upper)
#for i in range(final_hessian.shape[0]):
# for j in range(i + 1, final_hessian.shape[1]):
# final_hessian[j, i] = final_hessian[i, j]
return atom_hessian # (my_nparams1, my_nparams2)
def _hessian_from_block(self, hprobs, dprobs12, probs, counts, total_counts, freqs, resource_alloc):
raise NotImplementedError("Derived classes should implement this!")
def _gather_hessian(self, local_hessian):
nparams = self.model.num_params
interatom_ralloc = self.layout.resource_alloc('param2-interatom')
param2_ralloc = self.layout.resource_alloc('param2-processing')
my_nparams1, my_nparams2 = local_hessian.shape
global_param_slice = self.layout.global_param_slice
global_param2_slice = self.layout.global_param2_slice
# also could use self.resource_alloc.temporarily_track_memory(self.global_nelements**2): # 'PP'
with self.resource_alloc.temporarily_track_memory(nparams * nparams): # 'PP' (final_hessian)
final_hessian_blk, final_hessian_blk_shm = _smt.create_shared_ndarray(
interatom_ralloc, (my_nparams1, my_nparams2), 'd')
final_hessian, final_hessian_shm = _smt.create_shared_ndarray(
self.resource_alloc, (nparams, nparams), 'd') \
if self.resource_alloc.host_index == 0 else None
#We've computed 'local_hessian': the portion of the total hessian assigned to
# this processor's atom-proc and param-procs (param slices). Now, we sum all such atom_hessian pieces
# corresponding to the same param slices (but different atoms). This is the "param2-interatom" comm.
#Note: really we just need a reduce_sum here - getting the sum on the root procs is sufficient
interatom_ralloc.allreduce_sum(final_hessian_blk, local_hessian, unit_ralloc=param2_ralloc)
if self.resource_alloc.comm is not None:
self.resource_alloc.comm.barrier() # make sure reduce call above finishes (needed?)
#Finally, we need to gather the different pieces on each root param2-interatom proc into the final hessian
self.resource_alloc.gather(final_hessian, final_hessian_blk, (global_param_slice, global_param2_slice),
unit_ralloc=interatom_ralloc)
if self.resource_alloc.comm_rank == 0:
final_hessian_cpy = final_hessian.copy() # so we don't return shared mem...
else:
final_hessian_cpy = None
if self.resource_alloc.comm is not None:
self.resource_alloc.host_comm_barrier() # make sure we don't deallocate too early
_smt.cleanup_shared_ndarray(final_hessian_blk_shm)
_smt.cleanup_shared_ndarray(final_hessian_shm)
return final_hessian_cpy # (N,N)
#NOTE on chi^2 expressions:
#in general case: chi^2 = sum (p_i-f_i)^2/p_i (for i summed over outcomes)
#in 2-outcome case: chi^2 = (p+ - f+)^2/p+ + (p- - f-)^2/p-
# = (p - f)^2/p + (1-p - (1-f))^2/(1-p)
# = (p - f)^2 * (1/p + 1/(1-p))
# = (p - f)^2 * ( ((1-p) + p)/(p*(1-p)) )
# = 1/(p*(1-p)) * (p - f)^2
class RawChi2Function(RawObjectiveFunction):
"""
The function `N(p-f)^2 / p`
Note that this equals `Nf (1-x)^2 / x` where `x := p/f`.
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
"""
def __init__(self, regularization=None, resource_alloc=None, name="chi2", description="Sum of Chi^2", verbosity=0):
super().__init__(regularization, resource_alloc, name, description, verbosity)
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
return objective_function_value
def set_regularization(self, min_prob_clip_for_weighting=1e-4):
"""
Set regularization values.
Parameters
----------
min_prob_clip_for_weighting : float, optional
Cutoff for probability `prob` in `1 / prob` weighting factor (the maximum
of `prob` and `min_prob_clip_for_weighting` is used in the denominator).
Returns
-------
None
"""
self.min_prob_clip_for_weighting = min_prob_clip_for_weighting
def lsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the least-squares vector of the objective function.
This is the square-root of the terms-vector returned from :method:`terms`.
This vector is the objective function value used by a least-squares
optimizer when optimizing this objective function. Note that the existence
of this quantity requires that the terms be non-negative. If this is not
the case, an error is raised.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
return (probs - freqs) * self._weights(probs, freqs, total_counts) # Note: ok if this is negative
def dlsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the least-squares vector of this objective function.
Note that because each `lsvec` element only depends on the corresponding probability,
this is just an element-wise derivative (or, the diagonal of a jacobian matrix),
i.e. the resulting values are the derivatives of the `local_function` at
each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
weights = self._weights(probs, freqs, total_counts)
return weights + (probs - freqs) * self._dweights(probs, freqs, weights)
def hlsvec(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the 2nd derivatives of the least-squares vector of this objective function.
Note that because each `lsvec` element only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of `sqrt(local_function)` at each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# lsvec = (p-f)*sqrt(N/cp) = (p-f)*w
# dlsvec/dp = w + (p-f)*dw/dp
# d2lsvec/dp2 = dw/dp + (p-f)*d2w/dp2 + dw/dp = 2*dw/dp + (p-f)*d2w/dp2
weights = self._weights(probs, freqs, total_counts)
return 2 * self._dweights(probs, freqs, weights) + (probs - freqs) * self._hweights(probs, freqs, weights)
def hterms_alt(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Alternate computation of the 2nd derivatives of the terms of this objective function.
This should give exactly the same results as :method:`hterms`, but may be a little faster.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# v = N * (p-f)**2 / p => dv/dp = 2N * (p-f)/p - N * (p-f)**2 / p**2 = 2N * t - N * t**2
# => d2v/dp2 = 2N*dt - 2N*t*dt = 2N(1-t)*dt
cprobs = _np.clip(probs, self.min_prob_clip_for_weighting, None)
iclip = (cprobs == self.min_prob_clip_for_weighting)
t = ((probs - freqs) / cprobs) # should think of as (p-f)/p
dtdp = (1.0 - t) / cprobs # 1/p - (p-f)/p**2 => 1/cp - (p-f)/cp**2 = (1-t)/cp
d2v_dp2 = 2 * total_counts * (1.0 - t) * dtdp
d2v_dp2[iclip] = 2 * total_counts[iclip] / self.min_prob_clip_for_weighting
# with cp constant v = N*(p-f)**2/cp => dv/dp = 2N*(p-f)/cp => d2v/dp2 = 2N/cp
return d2v_dp2
#Required zero-term methods for omitted probs support in model-based objective functions
def zero_freq_terms(self, total_counts, probs):
"""
Evaluate objective function terms with zero frequency (where count and frequency are zero).
Such terms are treated specially because, for some objective functions,
having zero frequency is a special case and must be handled differently.
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
clipped_probs = _np.clip(probs, self.min_prob_clip_for_weighting, None)
return total_counts * probs**2 / clipped_probs
def zero_freq_dterms(self, total_counts, probs):
"""
Evaluate the derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
clipped_probs = _np.clip(probs, self.min_prob_clip_for_weighting, None)
return _np.where(probs == clipped_probs, total_counts, 2 * total_counts * probs / clipped_probs)
def zero_freq_hterms(self, total_counts, probs):
"""
Evaluate the 2nd derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
2nd derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
clipped_probs = _np.clip(probs, self.min_prob_clip_for_weighting, None)
return _np.where(probs == clipped_probs, 0.0, 2 * total_counts / clipped_probs)
#Support functions
def _weights(self, p, f, total_counts):
"""
Get the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
total_counts : numpy.ndarray
The total counts.
Returns
-------
numpy.ndarray
"""
cp = _np.clip(p, self.min_prob_clip_for_weighting, None)
return _np.sqrt(total_counts / cp) # nSpamLabels x nCircuits array (K x M)
def _dweights(self, p, f, wts): # derivative of weights w.r.t. p
"""
Get the derivative of the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
wts : numpy.ndarray
The weights, as computed by :method:`_weights`.
Returns
-------
numpy.ndarray
"""
cp = _np.clip(p, self.min_prob_clip_for_weighting, None)
dw = -0.5 * wts / cp # nSpamLabels x nCircuits array (K x M)
dw[p < self.min_prob_clip_for_weighting] = 0.0
return dw
def _hweights(self, p, f, wts): # 2nd derivative of weights w.r.t. p
# wts = sqrt(N/cp), dwts = (-1/2) sqrt(N) *cp^(-3/2), hwts = (3/4) sqrt(N) cp^(-5/2)
"""
Get the 2nd derivative of the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
wts : numpy.ndarray
The weights, as computed by :method:`_weights`.
Returns
-------
numpy.ndarray
"""
cp = _np.clip(p, self.min_prob_clip_for_weighting, None)
hw = 0.75 * wts / cp**2 # nSpamLabels x nCircuits array (K x M)
hw[p < self.min_prob_clip_for_weighting] = 0.0
return hw
class RawChiAlphaFunction(RawObjectiveFunction):
"""
The function `N[x + 1/(alpha * x^alpha) - (1 + 1/alpha)]` where `x := p/f`.
This function interpolates between the log-likelihood function (alpha=>0)
and the chi2 function (alpha=1).
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
alpha : float, optional
The alpha parameter, which lies in the interval (0,1].
"""
def __init__(self, regularization=None, resource_alloc=None, name="chialpha", description="Sum of ChiAlpha",
verbosity=0, alpha=1):
super().__init__(regularization, resource_alloc, name, description, verbosity)
self.alpha = alpha
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
return objective_function_value
def set_regularization(self, pfratio_stitchpt=0.01, pfratio_derivpt=0.01, radius=None, fmin=None):
"""
Set regularization values.
Parameters
----------
pfratio_stitchpt : float, optional
The x-value (x = probility/frequency ratio) below which the function is
replaced with it second-order Taylor expansion.
pfratio_derivpt : float, optional
The x-value at which the Taylor expansion derivatives are evaluated at. If
this is the same as `pfratio_stitchpt` then the function is smooth to 2nd
order at this point. However, choosing a larger value of `pfratio_derivpt`
will make the stitched part of the function less steep, which is sometimes
more helpful to an optimizer than having the stitch-point be smooth.
radius : float, optional
If `radius` is not None then a "harsh" method of regularizing the zero-frequency
terms (where the local function = `N*p`) is used. Specifically, for `p < radius`
we splice in the cubic polynomial, `-(1/3)*p^3/r^2 + p^2/r + (1/3)*r` (where `r == radius`).
This has the nice properties that 1) it matches the value, first-derivative,
and second derivative of `N*p` at `p=r` and 2) it, like `N*p` has a minimum at `p=0`
with value `0`. The `radius` dictates the amount of curvature or sharpness of this
stitching function, with smaller values making the function more pointed. We recommend
making this value smaller than the smallest expected frequencies, so as not to alter
the objective function in regions we near the ML point. If `radius` is None, then
`fmin` is used to handle the zero-frequency terms.
fmin : float, optional
The minimum expected frequency. When `radius` is None a "relaxed" regularization of
the zero-frequency terms is used that stitches the quadratic `N * C * p^2` to `N*p` when
`p < 1/C`, with `C = 1/(2 fmin) * (1 + alpha) / pfratio_derivpt^(2 + alpha)`. This
matches the value of the stitch and `N*p` at `p=1/C` but *not* the derivative, but
makes up for this by being less steep - the value of `C` is chosen so that the derivative
(steepness) of the zero-frequency terms at the stitch point is similar to the regular
nonzero-frequency terms at their stitch points.
Returns
-------
None
"""
self.x0 = pfratio_stitchpt
self.x1 = pfratio_derivpt
if radius is None:
#Infer the curvature of the regularized zero-f-term functions from
# the largest curvature we use at the stitch-points of nonzero-f terms.
assert(fmin is not None), "Must specify 'fmin' when radius is None (should be smalled allowed frequency)."
self.radius = None
self.zero_freq_terms = self._zero_freq_terms_relaxed
self.zero_freq_dterms = self._zero_freq_dterms_relaxed
self.zero_freq_hterms = None # no hessian support
self.fmin = fmin # = max(1e-7, _np.min(freqs_nozeros)) # lowest non-zero frequency
else:
#Use radius to specify the curvature/"roundness" of f == 0 terms,
# though this uses a more aggressive p^3 function to penalize negative probs.
self.radius = radius
self.zero_freq_terms = self._zero_freq_terms_harsh
self.zero_freq_dterms = self._zero_freq_dterms_harsh
self.zero_freq_hterms = None # no hessian support
self.fmin = None
def _intermediates(self, probs, counts, total_counts, freqs):
""" Intermediate values used by both terms(...) and dterms(...) """
freqs_nozeros = _np.where(counts == 0, 1.0, freqs)
x = probs / freqs_nozeros
itaylor = x < self.x0 # indices where we patch objective function with taylor series
c0 = 1. - 1. / (self.x1**(1 + self.alpha))
c1 = 0.5 * (1. + self.alpha) / self.x1**(2 + self.alpha)
return x, itaylor, c0, c1
def terms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the terms of the objective function.
The "terms" are the per-(probability, count, total-count) values
that get summed together to result in the objective function value.
These are the "local" or "per-element" values of the objective function.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
x0 = self.x0
x, itaylor, c0, c1 = intermediates
xt = x.copy(); xt[itaylor] = x0 # so we evaluate expression below at x0 (first taylor term) at itaylor indices
terms = counts * (xt + 1.0 / (self.alpha * xt**self.alpha) - (1.0 + 1.0 / self.alpha))
terms = _np.where(itaylor, terms + c0 * counts * (x - x0) + c1 * counts * (x - x0)**2, terms)
terms = _np.where(counts == 0, self.zero_freq_terms(total_counts, probs), terms)
return terms
def dterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise derivative (or, the diagonal of a jacobian matrix),
i.e. the resulting values are the derivatives of the `local_function` at
each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
x0 = self.x0
x, itaylor, c0, c1 = intermediates
dterms = total_counts * (1 - 1. / x**(1. + self.alpha))
dterms_taylor = total_counts * (c0 + 2 * c1 * (x - x0))
dterms[itaylor] = dterms_taylor[itaylor]
dterms = _np.where(counts == 0, self.zero_freq_dterms(total_counts, probs), dterms)
return dterms
def hterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the 2nd derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of the `local_function` at each
(probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
raise NotImplementedError("Hessian not implemented for ChiAlpha function yet")
def hlsvec(self, probs, counts, total_counts, freqs):
"""
Compute the 2nd derivatives of the least-squares vector of this objective function.
Note that because each `lsvec` element only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of `sqrt(local_function)` at each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
raise NotImplementedError("Hessian not implemented for ChiAlpha function yet")
#Required zero-term methods for omitted probs support in model-based objective functions
def _zero_freq_terms_harsh(self, total_counts, probs):
a = self.radius
return total_counts * _np.where(probs >= a, probs,
(-1.0 / (3 * a**2)) * probs**3 + probs**2 / a + a / 3.0)
def _zero_freq_dterms_harsh(self, total_counts, probs):
a = self.radius
return total_counts * _np.where(probs >= a, 1.0, (-1.0 / a**2) * probs**2 + 2 * probs / a)
def _zero_freq_terms_relaxed(self, total_counts, probs):
c1 = (0.5 / self.fmin) * (1. + self.alpha) / (self.x1**(2 + self.alpha))
p0 = 1.0 / c1
return total_counts * _np.where(probs > p0, probs, c1 * probs**2)
def _zero_freq_dterms_relaxed(self, total_counts, probs):
c1 = (0.5 / self.fmin) * (1. + self.alpha) / (self.x1**(2 + self.alpha))
p0 = 1.0 / c1
return total_counts * _np.where(probs > p0, 1.0, 2 * c1 * probs)
class RawFreqWeightedChi2Function(RawChi2Function):
"""
The function `N(p-f)^2 / f`
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
"""
def __init__(self, regularization=None, resource_alloc=None, name="fwchi2",
description="Sum of freq-weighted Chi^2", verbosity=0):
super().__init__(regularization, resource_alloc, name, description, verbosity)
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
return objective_function_value # default is to assume the value *is* chi2_k distributed
def set_regularization(self, min_freq_clip_for_weighting=1e-4):
"""
Set regularization values.
Parameters
----------
min_freq_clip_for_weighting : float, optional
The minimum frequency that will be used in the `1/f` weighting factor.
That is, the weighting factor is the `1 / max(f, min_freq_clip_for_weighting)`.
Returns
-------
None
"""
self.min_freq_clip_for_weighting = min_freq_clip_for_weighting
def _weights(self, p, f, total_counts):
#Note: this could be computed once and cached?
"""
Get the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
total_counts : numpy.ndarray
The total counts.
Returns
-------
numpy.ndarray
"""
return _np.sqrt(total_counts / _np.clip(f, self.min_freq_clip_for_weighting, None))
def _dweights(self, p, f, wts):
"""
Get the derivative of the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
wts : numpy.ndarray
The weights, as computed by :method:`_weights`.
Returns
-------
numpy.ndarray
"""
return _np.zeros(len(p), 'd')
def _hweights(self, p, f, wts):
"""
Get the 2nd derivative of the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
wts : numpy.ndarray
The weights, as computed by :method:`_weights`.
Returns
-------
numpy.ndarray
"""
return _np.zeros(len(p), 'd')
def zero_freq_terms(self, total_counts, probs):
"""
Evaluate objective function terms with zero frequency (where count and frequency are zero).
Such terms are treated specially because, for some objective functions,
having zero frequency is a special case and must be handled differently.
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
return total_counts * probs**2 / self.min_freq_clip_for_weighting # N * p^2 / fmin
def zero_freq_dterms(self, total_counts, probs):
"""
Evaluate the derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
return 2 * total_counts * probs / self.min_freq_clip_for_weighting
def zero_freq_hterms(self, total_counts, probs):
"""
Evaluate the 2nd derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
2nd derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
return 2 * total_counts / self.min_freq_clip_for_weighting
class RawCustomWeightedChi2Function(RawChi2Function):
"""
The function `custom_weight^2 (p-f)^2`, with custom weights that default to 1.
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
custom_weights : numpy.ndarray, optional
One-dimensional array of the custom weights, which linearly multiply the
*least-squares* terms, i.e. `(p - f)`. If `None`, then unit weights are
used and the objective function computes the sum of unweighted squares.
"""
def __init__(self, regularization=None, resource_alloc=None, name="cwchi2",
description="Sum of custom-weighted Chi^2", verbosity=0, custom_weights=None):
super().__init__(regularization, resource_alloc, name, description, verbosity)
self.custom_weights = custom_weights
def set_regularization(self):
"""
Set regularization values.
Returns
-------
None
"""
pass
def _weights(self, p, f, total_counts):
#Note: this could be computed once and cached?
"""
Get the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
total_counts : numpy.ndarray
The total counts.
Returns
-------
numpy.ndarray
"""
if self.custom_weights is not None:
return self.custom_weights
else:
return _np.ones(len(p), 'd')
def _dweights(self, p, f, wts):
"""
Get the derivative of the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
wts : numpy.ndarray
The weights, as computed by :method:`_weights`.
Returns
-------
numpy.ndarray
"""
return _np.zeros(len(p), 'd')
def _hweights(self, p, f, wts):
"""
Get the 2nd derivative of the chi2 weighting factor.
Parameters
----------
p : numpy.ndarray
The probabilities.
f : numpy.ndarray
The frequencies
wts : numpy.ndarray
The weights, as computed by :method:`_weights`.
Returns
-------
numpy.ndarray
"""
return _np.zeros(len(p), 'd')
def zero_freq_terms(self, total_counts, probs):
"""
Evaluate objective function terms with zero frequency (where count and frequency are zero).
Such terms are treated specially because, for some objective functions,
having zero frequency is a special case and must be handled differently.
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
if self.custom_weights is not None:
return self.custom_weights**2 * probs**2 # elementwise cw^2 * p^2
else:
return probs**2 # p^2
def zero_freq_dterms(self, total_counts, probs):
"""
Evaluate the derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
if self.custom_weights is not None:
return 2 * self.custom_weights**2 * probs
else:
return 2 * probs # p^2
def zero_freq_hterms(self, total_counts, probs):
"""
Evaluate the 2nd derivative of zero-frequency objective function terms.
Zero frequency terms are treated specially because, for some objective functions,
these are a special case and must be handled differently. Derivatives are
evaluated element-wise, i.e. the i-th element of the returned array is the
2nd derivative of the i-th term with respect to the i-th probability (derivatives
with respect to all other probabilities are zero because of the function structure).
Parameters
----------
total_counts : numpy.ndarray
The total counts.
probs : numpy.ndarray
The probabilities.
Returns
-------
numpy.ndarray
A 1D array of the same length as `total_counts` and `probs`.
"""
if self.custom_weights is not None:
return 2 * self.custom_weights**2
else:
return 2 * _np.ones(len(probs))
# The log(Likelihood) within the Poisson picture is: # noqa
# # noqa
# L = prod_{i,sl} lambda_{i,sl}^N_{i,sl} e^{-lambda_{i,sl}} / N_{i,sl}! # noqa
# # noqa
# Where lamba_{i,sl} := p_{i,sl}*N[i] is a rate, i indexes the operation sequence, # noqa
# and sl indexes the spam label. N[i] is the total counts for the i-th circuit, and # noqa
# so sum_{sl} N_{i,sl} == N[i]. We can ignore the p-independent N_j! and take the log: # noqa
# # noqa
# log L = sum_{i,sl} N_{i,sl} log(N[i]*p_{i,sl}) - N[i]*p_{i,sl} # noqa
# = sum_{i,sl} N_{i,sl} log(p_{i,sl}) - N[i]*p_{i,sl} (where we ignore the p-independent log(N[i]) terms) # noqa
# # noqa
# The objective function computes the negative log(Likelihood) as a vector of leastsq # noqa
# terms, where each term == sqrt( N_{i,sl} * -log(p_{i,sl}) + N[i] * p_{i,sl} ) # noqa
# # noqa
# See LikelihoodFunctions.py for details on patching # noqa
# The log(Likelihood) within the standard picture is:
#
# L = prod_{i,sl} p_{i,sl}^N_{i,sl}
#
# Where i indexes the operation sequence, and sl indexes the spam label.
# N[i] is the total counts for the i-th circuit, and
# so sum_{sl} N_{i,sl} == N[i]. We take the log:
#
# log L = sum_{i,sl} N_{i,sl} log(p_{i,sl})
#
# The objective function computes the negative log(Likelihood) as a vector of leastsq
# terms, where each term == sqrt( N_{i,sl} * -log(p_{i,sl}) )
#
# See LikelihoodFunction.py for details on patching
class RawPoissonPicDeltaLogLFunction(RawObjectiveFunction):
"""
The function `N*f*log(f/p) - N*(f-p)`.
Note that this equals `Nf(-log(x) - 1 + x)` where `x := p/f`.
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
"""
def __init__(self, regularization=None,
resource_alloc=None, name='dlogl', description="2*Delta(log(L))", verbosity=0):
super().__init__(regularization, resource_alloc, name, description, verbosity)
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
return 2 * objective_function_value # 2 * deltaLogL is what is chi2_k distributed
def set_regularization(self, min_prob_clip=1e-4, pfratio_stitchpt=None, pfratio_derivpt=None,
radius=1e-4, fmin=None):
"""
Set regularization values.
Parameters
----------
min_prob_clip : float, optional
The probability below which the objective function is replaced with its
second order Taylor expansion. This must be `None` if `pfratio_stitchpt`
is not None, this specifies an alternate stitching method where the
stitch-point is given in `x=p/f` units.
pfratio_stitchpt : float, optional
The x-value (x = probility/frequency ratio) below which the function is
replaced with it second order Taylor expansion. Conflicts with
`min_prob_clip`, which specifies an alternate stitching method.
pfratio_derivpt : float, optional
Specified if and only if `pfratio_stitchpt` is. The x-value at which the
Taylor expansion derivatives are evaluated at. If this is the same as
`pfratio_stitchpt` then the function is smooth to 2nd order at this point.
However, choosing a larger value of `pfratio_derivpt` will make the stitched
part of the function less steep, which is sometimes more helpful to an
optimizer than having the stitch-point be smooth.
radius : float, optional
If `radius` is not None then a "harsh" method of regularizing the zero-frequency
terms (where the local function = `N*p`) is used. Specifically, for `p < radius`
we splice in the cubic polynomial, `-(1/3)*p^3/r^2 + p^2/r + (1/3)*r` (where `r == radius`).
This has the nice properties that 1) it matches the value, first-derivative,
and second derivative of `N*p` at `p=r` and 2) it, like `N*p` has a minimum at `p=0`
with value `0`. The `radius` dictates the amount of curvature or sharpness of this
stitching function, with smaller values making the function more pointed. We recommend
making this value smaller than the smallest expected frequencies, so as not to alter
the objective function in regions we near the ML point. If `radius` is None, then
`fmin` is used to handle the zero-frequency terms.
fmin : float, optional
The minimum expected frequency. When `radius` is None a "relaxed" regularization of
the zero-frequency terms is used that stitches the quadratic `N * C * p^2` to `N*p` when
`p < 1/C`, with `C = 1/(2 fmin) * (1 + alpha) / pfratio_derivpt^(2 + alpha)`. This
matches the value of the stitch and `N*p` at `p=1/C` but *not* the derivative, but
makes up for this by being less steep - the value of `C` is chosen so that the derivative
(steepness) of the zero-frequency terms at the stitch point is similar to the regular
nonzero-frequency terms at their stitch points.
Returns
-------
None
"""
if min_prob_clip is not None:
assert(pfratio_stitchpt is None and pfratio_derivpt is None), \
"Cannot specify pfratio and min_prob_clip arguments as non-None!"
self.min_p = min_prob_clip
self.regtype = "minp"
else:
assert(min_prob_clip is None), "Cannot specify pfratio and min_prob_clip arguments as non-None!"
self.x0 = pfratio_stitchpt
self.x1 = pfratio_derivpt
self.regtype = "pfratio"
if radius is None:
#Infer the curvature of the regularized zero-f-term functions from
# the largest curvature we use at the stitch-points of nonzero-f terms.
assert(self.regtype == 'pfratio'), "Must specify `radius` when %s regularization type" % self.regtype
assert(fmin is not None), "Must specify 'fmin' when radius is None (should be smalled allowed frequency)."
self.radius = None
self.zero_freq_terms = self._zero_freq_terms_relaxed
self.zero_freq_dterms = self._zero_freq_dterms_relaxed
self.zero_freq_hterms = self._zero_freq_hterms_relaxed
self.fmin = fmin # = max(1e-7, _np.min(freqs_nozeros)) # lowest non-zero frequency
else:
#Use radius to specify the curvature/"roundness" of f == 0 terms,
# though this uses a more aggressive p^3 function to penalize negative probs.
assert(fmin is None), "Cannot specify 'fmin' when radius is specified."
self.radius = radius
self.zero_freq_terms = self._zero_freq_terms_harsh
self.zero_freq_dterms = self._zero_freq_dterms_harsh
self.zero_freq_hterms = self._zero_freq_hterms_harsh
self.fmin = None
def _intermediates(self, probs, counts, total_counts, freqs):
""" Intermediate values used by both terms(...) and dterms(...) """
# Quantities depending on data only (not probs): could be computed once and
# passed in as arguments to this (and other) functions?
freqs_nozeros = _np.where(counts == 0, 1.0, freqs)
if self.regtype == 'pfratio':
x0 = self.x0
x1 = self.x1
x = probs / freqs_nozeros # objective is -Nf*(log(x) + 1 - x)
pos_x = _np.where(x < x0, x0, x)
c0 = counts * (1 - 1 / x1) # deriv wrt x at x == x1 (=min_p)
c1 = 0.5 * counts / (x1**2) # 0.5 * 2nd deriv at x1
return x, pos_x, c0, c1, freqs_nozeros
elif self.regtype == 'minp':
freq_term = counts * (_np.log(freqs_nozeros) - 1.0)
pos_probs = _np.where(probs < self.min_p, self.min_p, probs)
c0 = total_counts - counts / self.min_p
c1 = 0.5 * counts / (self.min_p**2)
return freq_term, pos_probs, c0, c1, freqs_nozeros
else:
raise ValueError("Invalid regularization type: %s" % self.regtype)
def terms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the terms of the objective function.
The "terms" are the per-(probability, count, total-count) values
that get summed together to result in the objective function value.
These are the "local" or "per-element" values of the objective function.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
if self.regtype == 'pfratio':
x0 = self.x0
x, pos_x, c0, c1, _ = intermediates
terms = -counts * (1.0 - pos_x + _np.log(pos_x))
#Note: order of +/- terms above is important to avoid roundoff errors when x is near 1.0
# (see patching line below). For example, using log(x) + 1 - x causes significant loss
# of precision because log(x) is tiny and so is |1-x| but log(x) + 1 == 1.0.
# remove small negative elements due to roundoff error (above expression *cannot* really be negative)
terms = _np.maximum(terms, 0)
# quadratic extrapolation of logl at x0 for probabilities/frequencies < x0
terms = _np.where(x < x0, terms + c0 * (x - x0) + c1 * (x - x0)**2, terms)
#terms = _np.where(x > 1 / x0, terms + T * (x - x0) + T2 * (x - x0)**2, terms)
elif self.regtype == 'minp':
freq_term, pos_probs, c0, c1, _ = intermediates
terms = freq_term - counts * _np.log(pos_probs) + total_counts * pos_probs
# remove small negative elements due to roundoff error (above expression *cannot* really be negative)
terms = _np.maximum(terms, 0)
# quadratic extrapolation of logl at min_p for probabilities < min_p
terms = _np.where(probs < self.min_p,
terms + c0 * (probs - self.min_p) + c1 * (probs - self.min_p)**2, terms)
else:
raise ValueError("Invalid regularization type: %s" % self.regtype)
terms = _np.where(counts == 0, self.zero_freq_terms(total_counts, probs), terms)
# special handling for f == 0 terms
# using cubit rounding of function that smooths N*p for p>0:
# has minimum at p=0; matches value, 1st, & 2nd derivs at p=a.
if terms.size > 0 and _np.min(terms) < 0.0:
#Since we set terms = _np.maximum(terms, 0) above we know it was the regularization that caused this
if self.regtype == 'minp':
raise ValueError(("Regularization => negative terms! Is min_prob_clip (%g) too large? "
"(it should be smaller than the smallest frequency)") % self.min_p)
else:
raise ValueError("Regularization => negative terms!")
return terms
def lsvec(self, probs, counts, total_counts, freqs, intermediates=None):
# lsvec = sqrt(terms), but don't use base class fn b/c of special taylor patch...
"""
Compute the least-squares vector of the objective function.
This is the square-root of the terms-vector returned from :method:`terms`.
This vector is the objective function value used by a least-squares
optimizer when optimizing this objective function. Note that the existence
of this quantity requires that the terms be non-negative. If this is not
the case, an error is raised.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
lsvec = _np.sqrt(self.terms(probs, counts, total_counts, freqs, intermediates))
if self.regtype == "pfratio": # post-sqrt(v) 1st order taylor patch for x near 1.0 - maybe unnecessary
freqs_nozeros = _np.where(counts == 0, 1.0, freqs)
x = probs / freqs_nozeros # objective is -Nf*(log(x) + 1 - x)
lsvec = _np.where(_np.abs(x - 1) < 1e-6, _np.sqrt(counts) * _np.abs(x - 1) / _np.sqrt(2), lsvec)
return lsvec
def dterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise derivative (or, the diagonal of a jacobian matrix),
i.e. the resulting values are the derivatives of the `local_function` at
each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
if self.regtype == 'pfratio':
x0 = self.x0
x, pos_x, c0, c1, freqs_nozeros = intermediates
dterms = (total_counts * (-1 / pos_x + 1))
dterms_taylor = (c0 + 2 * c1 * (x - x0)) / freqs_nozeros
#dterms_taylor2 = (T + 2 * T2 * (x - x0)) / self.freqs_nozeros
dterms = _np.where(x < x0, dterms_taylor, dterms)
#terms = _np.where(x > 1 / x0, dprobs_taylor2, dterms)
elif self.regtype == 'minp':
_, pos_probs, c0, c1, freqs_nozeros = intermediates
dterms = total_counts - counts / pos_probs
dterms_taylor = c0 + 2 * c1 * (probs - self.min_p)
dterms = _np.where(probs < self.min_p, dterms_taylor, dterms)
dterms_zerofreq = self.zero_freq_dterms(total_counts, probs)
dterms = _np.where(counts == 0, dterms_zerofreq, dterms)
return dterms
def hterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the 2nd derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of the `local_function` at each
(probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# terms = Nf*(log(f)-log(p)) + N*(p-f) OR const + S*(p - minp) + S2*(p - minp)^2
# dterms/dp = -Nf/p + N OR c0 + 2*S2*(p - minp)
# d2terms/dp2 = Nf/p^2 OR 2*S2
if(self.regtype != "minp"):
raise NotImplementedError("Hessian only implemented for 'minp' regularization type so far.")
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
_, pos_probs, c0, c1, freqs_nozeros = intermediates
d2terms_dp2 = _np.where(probs < self.min_p, 2 * c1, counts / pos_probs**2)
zfc = _np.where(probs >= self.radius, 0.0,
total_counts * ((-2.0 / self.radius**2) * probs + 2.0 / self.radius))
d2terms_dp2 = _np.where(counts == 0, zfc, d2terms_dp2)
return d2terms_dp2 # a 1D array of d2(logl)/dprobs2 values; shape = (nEls,)
#Required zero-term methods for omitted probs support in model-based objective functions
def _zero_freq_terms_harsh(self, total_counts, probs):
a = self.radius
return total_counts * _np.where(probs >= a, probs,
(-1.0 / (3 * a**2)) * probs**3 + probs**2 / a + a / 3.0)
def _zero_freq_dterms_harsh(self, total_counts, probs):
a = self.radius
return total_counts * _np.where(probs >= a, 1.0, (-1.0 / a**2) * probs**2 + 2 * probs / a)
def _zero_freq_hterms_harsh(self, total_counts, probs):
a = self.radius
return total_counts * _np.where(probs >= a, 0.0, (-2.0 / a**2) * probs + 2 / a)
def _zero_freq_terms_relaxed(self, total_counts, probs):
# quadratic N*C0*p^2 that == N*p at p=1/C0.
# Pick C0 so it is ~ magnitude of curvature at patch-pt p/f = x1
# Note that at d2f/dx2 at x1 is 0.5 N*f / x1^2 so d2f/dp2 = 0.5 (N/f) / x1^2 (dx/dp = 1/f)
# Thus, we want C0 ~ 0.5(N/f)/x1^2; the largest this value can be is when f=fmin
c1 = (0.5 / self.fmin) * 1.0 / (self.x1**2)
p0 = 1.0 / c1
return total_counts * _np.where(probs > p0, probs, c1 * probs**2)
def _zero_freq_dterms_relaxed(self, total_counts, probs):
c1 = (0.5 / self.fmin) * 1.0 / (self.x1**2)
p0 = 1.0 / c1
return total_counts * _np.where(probs > p0, 1.0, 2 * c1 * probs)
def _zero_freq_hterms_relaxed(self, total_counts, probs):
raise NotImplementedError() # This is straightforward, but do it later.
class RawDeltaLogLFunction(RawObjectiveFunction):
"""
The function `N*f*log(f/p)`.
Note that this equals `-Nf log(x)` where `x := p/f`.
Parameters
----------
regularization : dict, optional
Regularization values.
resource_alloc : ResourceAllocation, optional
Available resources and how they should be allocated for computations.
name : str, optional
A name for this objective function (can be anything).
description : str, optional
A description for this objective function (can be anything)
verbosity : int, optional
Level of detail to print to stdout.
"""
def __init__(self, regularization=None,
resource_alloc=None, name='dlogl', description="2*Delta(log(L))", verbosity=0):
super().__init__(regularization, resource_alloc, name, description, verbosity)
def chi2k_distributed_qty(self, objective_function_value):
"""
Convert a value of this objective function to one that is expected to be chi2_k distributed.
Parameters
----------
objective_function_value : float
A value of this objective function, i.e. one returned from `self.fn(...)`.
Returns
-------
float
"""
return 2 * objective_function_value # 2 * deltaLogL is what is chi2_k distributed
def set_regularization(self, min_prob_clip=1e-4, pfratio_stitchpt=None, pfratio_derivpt=None):
"""
Set regularization values.
Parameters
----------
min_prob_clip : float, optional
The probability below which the objective function is replaced with its
second order Taylor expansion. This must be `None` if `pfratio_stitchpt`
is not None, this specifies an alternate stitching method where the
stitch-point is given in `x=p/f` units.
pfratio_stitchpt : float, optional
The x-value (x = probility/frequency ratio) below which the function is
replaced with it second order Taylor expansion. Conflicts with
`min_prob_clip`, which specifies an alternate stitching method.
pfratio_derivpt : float, optional
Specified if and only if `pfratio_stitchpt` is. The x-value at which the
Taylor expansion derivatives are evaluated at. If this is the same as
`pfratio_stitchpt` then the function is smooth to 2nd order at this point.
However, choosing a larger value of `pfratio_derivpt` will make the stitched
part of the function less steep, which is sometimes more helpful to an
optimizer than having the stitch-point be smooth.
Returns
-------
None
"""
if min_prob_clip is not None:
assert(pfratio_stitchpt is None and pfratio_derivpt is None), \
"Cannot specify pfratio and min_prob_clip arguments as non-None!"
self.min_p = min_prob_clip
self.regtype = "minp"
else:
assert(min_prob_clip is None), "Cannot specify pfratio and min_prob_clip arguments as non-None!"
self.x0 = pfratio_stitchpt
self.x1 = pfratio_derivpt
self.regtype = "pfratio"
def _intermediates(self, probs, counts, total_counts, freqs):
""" Intermediate values used by both terms(...) and dterms(...) """
# Quantities depending on data only (not probs): could be computed once and
# passed in as arguments to this (and other) functions?
freqs_nozeros = _np.where(counts == 0, 1.0, freqs)
if self.regtype == 'pfratio':
x0 = self.x0
x1 = self.x1
x = probs / freqs_nozeros # objective is -Nf*log(x)
pos_x = _np.where(x < x0, x0, x)
c0 = -counts * (1 / x1) # deriv wrt x at x == x1 (=min_p)
c1 = 0.5 * counts / (x1**2) # 0.5 * 2nd deriv at x1
return x, pos_x, c0, c1, freqs_nozeros
elif self.regtype == 'minp':
freq_term = counts * _np.log(freqs_nozeros) # objective is Nf*(log(f) - log(p))
pos_probs = _np.where(probs < self.min_p, self.min_p, probs)
c0 = -counts / self.min_p
c1 = 0.5 * counts / (self.min_p**2)
return freq_term, pos_probs, c0, c1, freqs_nozeros
else:
raise ValueError("Invalid regularization type: %s" % self.regtype)
def terms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the terms of the objective function.
The "terms" are the per-(probability, count, total-count) values
that get summed together to result in the objective function value.
These are the "local" or "per-element" values of the objective function.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
if self.regtype == 'pfratio':
x0 = self.x0
x, pos_x, c0, c1, freqs_nozeros = intermediates
terms = -counts * _np.log(pos_x)
terms = _np.where(x < x0, terms + c0 * (x - x0) + c1 * (x - x0)**2, terms)
elif self.regtype == 'minp':
freq_term, pos_probs, c0, c1, _ = intermediates
terms = freq_term - counts * _np.log(pos_probs)
terms = _np.where(probs < self.min_p,
terms + c0 * (probs - self.min_p) + c1 * (probs - self.min_p)**2, terms)
else:
raise ValueError("Invalid regularization type: %s" % self.regtype)
terms = _np.where(counts == 0, 0.0, terms)
#Note: no penalty for omitted probabilities (objective fn == 0 whenever counts == 0)
return terms
def dterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise derivative (or, the diagonal of a jacobian matrix),
i.e. the resulting values are the derivatives of the `local_function` at
each (probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
if self.regtype == 'pfratio':
x0 = self.x0
x, pos_x, c0, c1, freqs_nozeros = intermediates
dterms = total_counts * (-1 / pos_x) # note Nf/p = N/x
dterms_taylor = (c0 + 2 * c1 * (x - x0)) / freqs_nozeros # (...) is df/dx and want df/dp = df/dx * (1/f)
dterms = _np.where(x < x0, dterms_taylor, dterms)
elif self.regtype == 'minp':
_, pos_probs, c0, c1, freqs_nozeros = intermediates
dterms = -counts / pos_probs
dterms_taylor = c0 + 2 * c1 * (probs - self.min_p)
dterms = _np.where(probs < self.min_p, dterms_taylor, dterms)
dterms = _np.where(counts == 0, 0.0, dterms)
return dterms
def hterms(self, probs, counts, total_counts, freqs, intermediates=None):
"""
Compute the 2nd derivatives of the terms of this objective function.
Note that because each term only depends on the corresponding probability,
this is just an element-wise 2nd derivative, i.e. the resulting values are
the 2nd-derivatives of the `local_function` at each
(probability, count, total-count) value.
Parameters
----------
probs : numpy.ndarray
Array of probability values.
counts : numpy.ndarray
Array of count values.
total_counts : numpy.ndarray
Array of total count values.
freqs : numpy.ndarray
Array of frequency values. This should always equal `counts / total_counts`
but is supplied separately to increase performance.
intermediates : tuple, optional
Used internally to speed up computations.
Returns
-------
numpy.ndarray
A 1D array of length equal to that of each array argument.
"""
# terms = Nf*log(p) OR const + S*(p - minp) + S2*(p - minp)^2
# dterms/dp = Nf/p OR c0 + 2*S2*(p - minp)
# d2terms/dp2 = -Nf/p^2 OR 2*S2
if(self.regtype != "minp"):
raise NotImplementedError("Hessian only implemented for 'minp' regularization type so far.")
if intermediates is None:
intermediates = self._intermediates(probs, counts, total_counts, freqs)
_, pos_probs, c0, c1, freqs_nozeros = intermediates
d2terms_dp2 = _np.where(probs < self.min_p, 2 * c1, counts / pos_probs**2)
d2terms_dp2 =
|
_np.where(counts == 0, 0.0, d2terms_dp2)
|
numpy.where
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 11:27:05 2020
@author: Mark
"""
import tkinter
import tkinter.ttk as tk
from tkinter import TOP
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backend_bases import MouseEvent, LocationEvent
from matplotlib.widgets import RectangleSelector
import numpy as np
class Figure:
"""Parameters
----------
frame : TK Frame
The container for the figure.
x : ARRAY or LIST of FLOATS
The x-values that should be used for the plot.
y : ARRAY or LIST of FLOATS
The y-values used for the plot.
params : DICTIONARY
Should contain keys 'xlabel', 'ylabel', 'title'. Values should be
STRING.
dpi : INT, optional
DESCRIPTION. The default is 100.
Returns
-------
None.
"""
def __init__(self, frame, x, y, params, dpi = 100):
self.frame = frame
self.x = x
self.y = y
self.normalized = False
self._getParams(params)
self.fig, self.ax = plt.subplots(figsize=self.size, dpi=dpi)
self.fig.patch.set_facecolor('0.9411')
self.ax.plot(x, y)
self._setLabels()
self._setUp()
def _getParams(self, params):
if 'title' in params.keys():
self.title = params['title']
else:
self.title = ''
if 'xlabel' in params.keys():
self.xlabel = params['xlabel']
else:
self.xlabel = ''
if 'ylabel' in params.keys():
self.ylabel = params['ylabel']
else:
self.ylabel = ''
if 'colours' in params.keys():
self.colours = params['colours']
else:
self.colours = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd','#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
if 'size' in params.keys():
self.size = params['size']
else:
self.size = (5,4)
if 'axis_label_fontsize' in params.keys():
self.axis_label_fontsize = params['axis_label_fontsize']
else:
self.axis_label_fontsize = 12
if 'axis_tick_font' in params.keys():
self.axis_tick_font = params['axis_tick_font']
else:
self.axis_tick_font = 8
def _setLabels(self):
self.ax.set_xlabel(self.xlabel, fontsize=self.axis_label_fontsize)
self.ax.set_ylabel(self.ylabel, fontsize=self.axis_label_fontsize)
self.ax.set_title(self.title)
def _setUp(self):
self.ax.tick_params(direction='out', length=4, width=1,
colors='black', grid_color='black',
labelsize=self.axis_tick_font, grid_alpha=0.5)
self.fig.tight_layout()
self.chart = FigureCanvasTkAgg(self.fig, self.frame)
self.chart.mpl_connect('button_press_event', self.doubleClkChart)
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.2, fill=True)
self.RS = RectangleSelector(self.ax,
self.selector,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True,
rectprops = rectprops)
self.RS.set_active(True)
self.chart.get_tk_widget().pack(side=TOP)
def doubleClkChart(self, event):
if event.dblclick:
self.zoomOut()
def rePlotFig(self, data, params):
"""
This fuction replots the figure and re-sets the x and y limits'''
Parameters
----------
data : LIST of DICTIONARIES, where the keys in the DICTIONARIES are
'x', and 'y', and the respective values are lists of floats.
'idx' which is the lindex of the plotted line.
params : DICTIONARY
Can contain key 'rescale', with value 0 or 1. This determines
if the plot is rescaled.
Returns
-------
None.
"""
left,right = self.ax.get_xlim()
bottom, top = self.ax.get_ylim()
self.ax.clear()
self._setLabels()
def _getIdx(i):
if 'idx' in i.keys():
idx = i['idx']
else:
idx=0
return idx
def plot(x,y,idx):
colour_idx = self._getColourIdx(idx)
self.ax.plot(x,y,c=self.colours[colour_idx])
if self.normalized:
for i in data:
x = i['x']
y = ((
|
np.array(i['y'])
|
numpy.array
|
# coding=utf-8
import numpy.matlib as np
import numpy
npfloat = numpy.float
# To use:
# - Call threedize(xs, ys, view, cameraposor, laserposor), where:
# - xs and ys are numpy arrays
# - view is created with View
# - the *posors are created with Posor(pos, theta, phi, psi)
# - pos is created with coords
class Posor(object):
def __init__(self, pos, theta, phi, psi):
self.pos = pos
self.theta = theta
self.phi = phi
self.psi = psi
class View(object):
def __init__(self, centerx, centery, dist, angle):
self.centerx = centerx
self.centery = centery
self.dist = dist
self.angle = angle
# data is a list of pairs whose first element is phi and whose second element is
# an array of x--y pairs. Return an array of x--y--z triples
def threedize_phi_angles(data, view, cameraposor, laserpos, lasertheta):
"""Call threedize on each element of data. Data is a list of pairs whose first
element is phi and whose second element is an array of x--y pairs. phi is
combined with laserpos and lasertheta to create laserposor."""
per_angles = [threedize(xys, view,
cameraposor,
Posor(laserpos, lasertheta, phi, 0))
for (phi, xys) in data]
return np.concatenate(per_angles)
# Take an array of pairs xys, along with the view, represented as an object
# with centerx, centery, dist, angle, and two objects camerapos, with
# pos, theta, phi, and psi, and laserpos, with pos, theta, and phi.
# Return an array of x--y--z triples, giving the corresponding points in
# absolute coördinates
# Laser starts out as the plane with normal y = z = 0. Orientation is created
# in the following way: Start by looking along the positive x axis, with z up.
# Rotate theta radians clockwise along the z axis, towards positive y. Rotate
# phi radians upwards along the new y axis, towards positive z. Finally, rotate
# psi radians counterclockwise along the new x axis, towards positive z.
# The transformation from camera coördinates to absolute coördinates is then
# given by the matrix product:
# [ cos th -sin th 0 ] [ cos ph 0 -sin ph ] [ 1 0 0 ]
# [ sin th cos th 0 ] [ 0 1 0 ] [ 0 cos ps -sin ps ]
# [ 0 0 1 ] [ sin ph 0 cos ph ] [ 0 sin ps cos ps ]
def threedize(xys, view, cameraposor, laserposor):
"""Calculate, as an array of x-y-z triples, the 3d points corresponding to the
pixels xys, an array of x-y pairs, as seen by a camera with view and posor
cameraposor and generated by a laser with posor laserposor."""
plane = calc_plane(laserposor)
return threedize_plane(xys, view, cameraposor, plane)
def threedize_plane(xys, view, cameraposor, plane):
"""Like threedize, but providing the laser plane explicitly instead of
calculating it from the laser posor."""
rays = calc_rays(xys, view)
rot_rays = rotate(rays, cameraposor)
threed_points = intersect(plane, cameraposor.pos, rot_rays)
return threed_points
class Plane(object):
def __init__(self, pos, normal):
self.pos = pos
self.normal = normal
# Take a posor object and return another object with point x, y, z and normal
# dx, dy, dz
def calc_plane(posor):
"""Calculate the plane associated with the laser posor"""
normal = rotate(coord(1, 0, 0), posor)
return Plane(posor.pos, normal)
# points is a matrix
def rotate(points, posor):
"""Rotate the matrix of column vectors points according to posor"""
rot_matrix = calc_rot_matrix(posor)
return rot_matrix * points
def unrotate(points, posor):
"""Rotate the matrix of column vectors points according to posor, i.e., from
absolute coordinates to camera coordinates"""
rot_matrix = calc_rot_matrix(posor)
return rot_matrix.I * points
def calc_rays(xys, view):
"""Return a matrix of column vectors of the rays corresponding to the pixels
xys, given as a list of pairs. view defines the camera. The results are in
camera coordinates."""
something = view_number(view)
cxys = xys - np.array([view.centerx, view.centery])
return np.mat([np.full(len(xys), something), cxys[:, 0], -cxys[:, 1]], dtype=npfloat)
def view_number(view):
return view.dist/np.tan(view.angle)
def calc_rot_matrix(posor):
"""Calculate the rotation matrix that takes a column vector from the camera
coordinates associated with posor to absolute coordinates"""
th = posor.theta
theta = np.mat([[np.cos(th), -
|
np.sin(th)
|
numpy.matlib.sin
|
# Author: <NAME> at 15/02/2022 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
import numpy as np
import pytest
from numpy.testing import assert_equal
from .._base import DistantFeedback, check_n_sequences, check_one_sequence, check_xy
from .dummy_nodes import *
def idfn(val):
if isinstance(val, np.ndarray):
return str(val.shape)
if isinstance(val, list):
return f"list[{len(val)}]"
if isinstance(val, dict):
return str(val)
else:
return val
@pytest.mark.parametrize(
"x,kwargs,expects",
[
(np.ones((1, 5)), {}, np.ones((1, 5))),
(np.ones((5,)), {}, np.ones((1, 5))),
(np.ones((1, 5)), {"expected_dim": 6}, ValueError),
("foo", {}, TypeError),
(1, {}, np.ones((1, 1))),
([np.ones((1, 5)),
|
np.ones((1, 6))
|
numpy.ones
|
# Imports
import _pickle as cPickle
import numpy as np
import os
import cv2
# Keras Imports
from keras.applications import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.layers import Input,Conv2D,MaxPooling2D,Conv2DTranspose, multiply, concatenate, Dense, Flatten, Dropout, Lambda
from keras.callbacks import ModelCheckpoint
from keras import losses
from keras import backend as K
# ISBI Model Imports
from code.isbi_model.isbi_model_train_generator import Generator
from code.isbi_model.isbi_model_utilities import create_isbi_model
# CUDA Environment Variables (adapt them to your personal settings)
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="2"
# GLOBAL VARIABLES
# ISBI Model Results Directory
# Main Results Directory
results_directory = 'results'
if os.path.isdir(results_directory) == False:
os.mkdir(results_directory)
# ISBI Model Results Directory
isbi_model_results_dir = os.path.join(results_directory, 'isbi-model')
if os.path.isdir(isbi_model_results_dir) == False:
os.mkdir(isbi_model_results_dir)
# ISBI Model Weights Directory
isbi_model_results_weights_dir = os.path.join(isbi_model_results_dir, 'weights')
if os.path.isdir(isbi_model_results_weights_dir) == False:
os.mkdir(isbi_model_results_weights_dir)
# Data directory
data_dir = 'data/resized'
# X data (images)
with open(os.path.join(data_dir, 'X_train_221.pickle'), 'rb') as fp:
X_train = cPickle.load(fp)
with open(os.path.join(data_dir, 'X_test_221.pickle'), 'rb') as fp:
X_test = cPickle.load(fp)
X = np.concatenate((X_train, X_test))
# Heatmaps data (heatmaps)
with open(os.path.join(data_dir, 'heatmaps_train_221.pickle'), 'rb') as fp:
heatmaps_train = cPickle.load(fp)
with open(os.path.join(data_dir, 'heatmaps_test_221.pickle'), 'rb') as fp:
heatmaps_test = cPickle.load(fp)
heatmaps = np.concatenate((heatmaps_train, heatmaps_test))
# y data (keypoints)
with open(os.path.join(data_dir, 'y_train_221.pickle'), 'rb') as fp:
y_train = cPickle.load(fp)
with open(os.path.join(data_dir, 'y_test_221.pickle'), 'rb') as fp:
y_test = cPickle.load(fp)
y =
|
np.concatenate((y_train, y_test))
|
numpy.concatenate
|
# Quantile utilities for processing MERRA/AIRS data
import numpy
import numpy.ma as ma
import calculate_VPD
import netCDF4
from netCDF4 import Dataset
from numpy import random, linalg
import datetime
import pandas
import os, sys
from scipy import stats
import h5py
def quantile_cloud_locmask(airsdr, mtdr, indr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, msk):
# Construct cloud variable quantiles and z-scores, with a possibly irregular location mask
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (airsdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f[msk][:,:]
latmet = f['plat'][:]
lonmet = f['plon'][:]
f.close()
mask[mask <= 0] = 0
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mask,axis=0)
print(lnsq.shape)
print(lnsm.shape)
print(lnsm)
ltsm = numpy.sum(mask,axis=1)
print(ltsq.shape)
print(ltsm.shape)
print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
#latflt = latin.flatten()
#lonflt = lonin.flatten()
#mskflt = mask.flatten()
#lcsq = numpy.arange(mskflt.shape[0])
#lcsb = lcsq[mskflt > 0]
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mask[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
print(tmhld.shape)
print(numpy.amin(tmhld))
print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing_IncludesCloudParams.h5' % (indr,yrlst[k],hrchc)
f = h5py.File(fnm,'r')
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(mtnm,'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
cfrc1 = cfrc1.flatten()
cfrc2 = cfrc2.flatten()
cfrc12 = cfrc12.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
nslbtmp = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
if tsmp == 0:
nslabout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
nslabout[:] = nslbtmp[msksb]
else:
nslabout = numpy.append(nslabout,nslbtmp[msksb])
flsq = numpy.arange(ctyp1.shape[0])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = flsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
print(numpy.mean(slabswap))
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = flsq[(nslbtmp == 2) & (dpslbtmp < 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 1.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((msksb.shape[0],)) - 9999.0
psfcout[:] = psfc[msksb]
prsbot1out = numpy.zeros((msksb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[msksb]
dpcld1out = numpy.zeros((msksb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[msksb]
dpslbout = numpy.zeros((msksb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[msksb]
dpcld2out = numpy.zeros((msksb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[msksb]
else:
psfcout = numpy.append(psfcout,psfc[msksb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[msksb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[msksb])
dpslbout = numpy.append(dpslbout,zdslbtmp[msksb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[msksb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[msksb]
slbtyp2out = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[msksb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[msksb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[msksb])
# Cloud Fraction Logit, still account for swapping
z1tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
z2tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
z12tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
for t in range(z1tmp.shape[0]):
if ( (cfrc1[t] > 0.0) and (cfrc2[t] > 0.0) and (cfrc12[t] > 0.0) ):
# Must adjust amounts
if (slabswap[t] == 0):
prptmp = numpy.array( [cfrc1[t]-cfrc12[t], cfrc2[t]-cfrc12[t], cfrc12[t], 0.0] )
else:
prptmp = numpy.array( [cfrc2[t]-cfrc12[t], cfrc1[t]-cfrc12[t], cfrc12[t], 0.0] )
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = ztmp[1]
z12tmp[t] = ztmp[2]
elif ( (cfrc1[t] > 0.0) and (cfrc2[t] > 0.0) ):
if (slabswap[t] == 0):
prptmp = numpy.array( [cfrc1[t], cfrc2[t], 0.0] )
else:
prptmp = numpy.array( [cfrc2[t], cfrc1[t], 0.0] )
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = ztmp[1]
z12tmp[t] = -9999.0
elif ( cfrc1[t] > 0.0 ):
prptmp = numpy.array( [cfrc1[t], 1.0 - cfrc1[t] ] )
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = -9999.0
z12tmp[t] = -9999.0
else:
z1tmp[t] = -9999.0
z2tmp[t] = -9999.0
z12tmp[t] = -9999.0
if tsmp == 0:
cfclgt1out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt1out[:] = z1tmp[msksb]
cfclgt2out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt2out[:] = z2tmp[msksb]
cfclgt12out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt12out[:] = z12tmp[msksb]
else:
cfclgt1out = numpy.append(cfclgt1out,z1tmp[msksb])
cfclgt2out = numpy.append(cfclgt2out,z2tmp[msksb])
cfclgt12out = numpy.append(cfclgt12out,z12tmp[msksb])
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((msksb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[msksb]
ngwt2out = numpy.zeros((msksb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[msksb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[msksb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[msksb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((msksb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[msksb]
cttp2out = numpy.zeros((msksb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[msksb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[msksb])
cttp2out = numpy.append(cttp2out,cttptmp2[msksb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((msksb.shape[0],)) - 9999.0
latout[:] = lthld[msksb]
lonout = numpy.zeros((msksb.shape[0],)) - 9999.0
lonout[:] = lnhld[msksb]
yrout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[msksb]
else:
latout = numpy.append(latout,lthld[msksb])
lonout = numpy.append(lonout,lnhld[msksb])
yrtmp = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[msksb])
tsmp = tsmp + msksb.shape[0]
# Process quantiles
nslbqs = calculate_VPD.quantile_msgdat_discrete(nslabout,prbs)
str1 = '%.2f Number Slab Quantile: %d' % (prbs[53],nslbqs[53])
print(str1)
print(nslbqs)
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f Surface Pressure Quantile: %.3f' % (prbs[53],psfcqs[53])
print(str1)
prsbt1qs = calculate_VPD.quantile_msgdat(prsbot1out,prbs)
str1 = '%.2f CldBot1 Pressure Quantile: %.3f' % (prbs[53],prsbt1qs[53])
print(str1)
dpcld1qs = calculate_VPD.quantile_msgdat(dpcld1out,prbs)
str1 = '%.2f DPCloud1 Quantile: %.3f' % (prbs[53],dpcld1qs[53])
print(str1)
dpslbqs = calculate_VPD.quantile_msgdat(dpslbout,prbs)
str1 = '%.2f DPSlab Quantile: %.3f' % (prbs[53],dpslbqs[53])
print(str1)
dpcld2qs = calculate_VPD.quantile_msgdat(dpcld2out,prbs)
str1 = '%.2f DPCloud2 Quantile: %.3f' % (prbs[53],dpcld2qs[53])
print(str1)
slb1qs = calculate_VPD.quantile_msgdat_discrete(slbtyp1out,prbs)
str1 = '%.2f Type1 Quantile: %d' % (prbs[53],slb1qs[53])
print(str1)
slb2qs = calculate_VPD.quantile_msgdat_discrete(slbtyp2out,prbs)
str1 = '%.2f Type2 Quantile: %d' % (prbs[53],slb2qs[53])
print(str1)
lgt1qs = calculate_VPD.quantile_msgdat(cfclgt1out,prbs)
str1 = '%.2f Logit 1 Quantile: %.3f' % (prbs[53],lgt1qs[53])
print(str1)
lgt2qs = calculate_VPD.quantile_msgdat(cfclgt2out,prbs)
str1 = '%.2f Logit 2 Quantile: %.3f' % (prbs[53],lgt2qs[53])
print(str1)
lgt12qs = calculate_VPD.quantile_msgdat(cfclgt12out,prbs)
str1 = '%.2f Logit 1/2 Quantile: %.3f' % (prbs[53],lgt12qs[53])
print(str1)
ngwt1qs = calculate_VPD.quantile_msgdat(ngwt1out,prbs)
str1 = '%.2f NGWater1 Quantile: %.3f' % (prbs[53],ngwt1qs[53])
print(str1)
ngwt2qs = calculate_VPD.quantile_msgdat(ngwt2out,prbs)
str1 = '%.2f NGWater2 Quantile: %.3f' % (prbs[53],ngwt2qs[53])
print(str1)
cttp1qs = calculate_VPD.quantile_msgdat(cttp1out,prbs)
str1 = '%.2f CTTemp1 Quantile: %.3f' % (prbs[53],cttp1qs[53])
print(str1)
cttp2qs = calculate_VPD.quantile_msgdat(cttp2out,prbs)
str1 = '%.2f CTTemp2 Quantile: %.3f' % (prbs[53],cttp2qs[53])
print(str1)
# Should be no missing for number of slabs
print('Slab summary')
print(numpy.amin(nslabout))
print(numpy.amax(nslabout))
print(tsmp)
# Output Quantiles
mstr = dyst.strftime('%b')
qfnm = '%s/%s_US_JJA_%02dUTC_%04d_Cloud_Quantile.nc' % (dtdr,rgchc,hrchc,yrlst[k])
qout = Dataset(qfnm,'w')
dimp = qout.createDimension('probability',nprb)
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
varnslb = qout.createVariable('NumberSlab_quantile','i2',['probability'], fill_value = -99)
varnslb[:] = nslbqs
varnslb.long_name = 'Number of cloud slabs quantiles'
varnslb.units = 'Count'
varnslb.missing_value = -99
varcbprs = qout.createVariable('CloudBot1Logit_quantile','f4',['probability'], fill_value = -9999)
varcbprs[:] = prsbt1qs
varcbprs.long_name = 'Slab 1 cloud bottom pressure logit quantiles'
varcbprs.units = 'hPa'
varcbprs.missing_value = -9999
vardpc1 = qout.createVariable('DPCloud1Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc1[:] = dpcld1qs
vardpc1.long_name = 'Slab 1 cloud pressure depth logit quantiles'
vardpc1.units = 'hPa'
vardpc1.missing_value = -9999
vardpslb = qout.createVariable('DPSlabLogit_quantile','f4',['probability'], fill_value = -9999)
vardpslb[:] = dpslbqs
vardpslb.long_name = 'Two-slab vertical separation logit quantiles'
vardpslb.units = 'hPa'
vardpslb.missing_value = -9999
vardpc2 = qout.createVariable('DPCloud2Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc2[:] = dpcld2qs
vardpc2.long_name = 'Slab 2 cloud pressure depth logit quantiles'
vardpc2.units = 'hPa'
vardpc2.missing_value = -9999
vartyp1 = qout.createVariable('CType1_quantile','i2',['probability'], fill_value = -99)
vartyp1[:] = slb1qs
vartyp1.long_name = 'Slab 1 cloud type quantiles'
vartyp1.units = 'None'
vartyp1.missing_value = -99
vartyp1.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
vartyp2 = qout.createVariable('CType2_quantile','i2',['probability'], fill_value = -99)
vartyp2[:] = slb2qs
vartyp2.long_name = 'Slab 2 cloud type quantiles'
vartyp2.units = 'None'
vartyp2.missing_value = -99
vartyp2.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
varlgt1 = qout.createVariable('CFrcLogit1_quantile','f4',['probability'], fill_value = -9999)
varlgt1[:] = lgt1qs
varlgt1.long_name = 'Slab 1 cloud fraction (cfrac1x) logit quantiles'
varlgt1.units = 'None'
varlgt1.missing_value = -9999
varlgt2 = qout.createVariable('CFrcLogit2_quantile','f4',['probability'], fill_value = -9999)
varlgt2[:] = lgt2qs
varlgt2.long_name = 'Slab 2 cloud fraction (cfrac2x) logit quantiles'
varlgt2.units = 'None'
varlgt2.missing_value = -9999
varlgt12 = qout.createVariable('CFrcLogit12_quantile','f4',['probability'], fill_value = -9999)
varlgt12[:] = lgt12qs
varlgt12.long_name = 'Slab 1/2 overlap fraction (cfrac12) logit quantiles'
varlgt12.units = 'None'
varlgt12.missing_value = -9999
varngwt1 = qout.createVariable('NGWater1_quantile','f4',['probability'], fill_value = -9999)
varngwt1[:] = ngwt1qs
varngwt1.long_name = 'Slab 1 cloud non-gas water quantiles'
varngwt1.units = 'g m^-2'
varngwt1.missing_value = -9999
varngwt2 = qout.createVariable('NGWater2_quantile','f4',['probability'], fill_value = -9999)
varngwt2[:] = ngwt2qs
varngwt2.long_name = 'Slab 2 cloud non-gas water quantiles'
varngwt2.units = 'g m^-2'
varngwt2.missing_value = -9999
varcttp1 = qout.createVariable('CTTemp1_quantile','f4',['probability'], fill_value = -9999)
varcttp1[:] = cttp1qs
varcttp1.long_name = 'Slab 1 cloud top temperature'
varcttp1.units = 'K'
varcttp1.missing_value = -9999
varcttp2 = qout.createVariable('CTTemp2_quantile','f4',['probability'], fill_value = -9999)
varcttp2[:] = cttp2qs
varcttp2.long_name = 'Slab 2 cloud top temperature'
varcttp2.units = 'K'
varcttp2.missing_value = -9999
qout.close()
# Set up transformations
znslb = calculate_VPD.std_norm_quantile_from_obs(nslabout, nslbqs, prbs, msgval=-99)
zpsfc = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
zprsbt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(prsbot1out, prsbt1qs, prbs, msgval=-9999.)
zdpcld1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld1out, dpcld1qs, prbs, msgval=-9999.)
zdpslb = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpslbout, dpslbqs, prbs, msgval=-9999.)
zdpcld2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld2out, dpcld2qs, prbs, msgval=-9999.)
zctyp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp1out, slb1qs, prbs, msgval=-99)
zctyp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp2out, slb2qs, prbs, msgval=-99)
zlgt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt1out, lgt1qs, prbs, msgval=-9999.)
zlgt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt2out, lgt2qs, prbs, msgval=-9999.)
zlgt12 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt12out, lgt12qs, prbs, msgval=-9999.)
zngwt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt1out, ngwt1qs, prbs, msgval=-9999.)
zngwt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt2out, ngwt2qs, prbs, msgval=-9999.)
zcttp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp1out, cttp1qs, prbs, msgval=-9999.)
zcttp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp2out, cttp2qs, prbs, msgval=-9999.)
# Output transformed quantile samples
zfnm = '%s/%s_US_JJA_%02dUTC_%04d_Cloud_StdGausTrans.nc' % (dtdr,rgchc,hrchc,yrlst[k])
zout = Dataset(zfnm,'w')
dimsmp = zout.createDimension('sample',tsmp)
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varnslb = zout.createVariable('NumberSlab_StdGaus','f4',['sample'], fill_value = -9999)
varnslb[:] = znslb
varnslb.long_name = 'Quantile transformed number of cloud slabs'
varnslb.units = 'None'
varnslb.missing_value = -9999.
varcbprs = zout.createVariable('CloudBot1Logit_StdGaus','f4',['sample'], fill_value = -9999)
varcbprs[:] = zprsbt1
varcbprs.long_name = 'Quantile transformed slab 1 cloud bottom pressure logit'
varcbprs.units = 'None'
varcbprs.missing_value = -9999.
vardpc1 = zout.createVariable('DPCloud1Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc1[:] = zdpcld1
vardpc1.long_name = 'Quantile transformed slab 1 cloud pressure depth logit'
vardpc1.units = 'None'
vardpc1.missing_value = -9999.
vardpslb = zout.createVariable('DPSlabLogit_StdGaus','f4',['sample'], fill_value = -9999)
vardpslb[:] = zdpslb
vardpslb.long_name = 'Quantile transformed two-slab vertical separation logit'
vardpslb.units = 'None'
vardpslb.missing_value = -9999.
vardpc2 = zout.createVariable('DPCloud2Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc2[:] = zdpcld2
vardpc2.long_name = 'Quantile transformed slab 2 cloud pressure depth logit'
vardpc2.units = 'None'
vardpc2.missing_value = -9999.
vartyp1 = zout.createVariable('CType1_StdGaus','f4',['sample'], fill_value = -9999)
vartyp1[:] = zctyp1
vartyp1.long_name = 'Quantile transformed slab 1 cloud type logit'
vartyp1.units = 'None'
vartyp1.missing_value = -9999.
vartyp2 = zout.createVariable('CType2_StdGaus','f4',['sample'], fill_value = -9999)
vartyp2[:] = zctyp2
vartyp2.long_name = 'Quantile transformed slab 2 cloud type'
vartyp2.units = 'None'
vartyp2.missing_value = -9999.
varlgt1 = zout.createVariable('CFrcLogit1_StdGaus','f4',['sample'], fill_value = -9999)
varlgt1[:] = zlgt1
varlgt1.long_name = 'Quantile transformed slab 1 cloud fraction logit'
varlgt1.units = 'None'
varlgt1.missing_value = -9999.
varlgt2 = zout.createVariable('CFrcLogit2_StdGaus','f4',['sample'], fill_value = -9999)
varlgt2[:] = zlgt2
varlgt2.long_name = 'Quantile transformed slab 2 cloud fraction logit'
varlgt2.units = 'None'
varlgt2.missing_value = -9999.
varlgt12 = zout.createVariable('CFrcLogit12_StdGaus','f4',['sample'], fill_value = -9999)
varlgt12[:] = zlgt12
varlgt12.long_name = 'Quantile transformed slab 1/2 overlap fraction logit'
varlgt12.units = 'None'
varlgt12.missing_value = -9999.
varngwt1 = zout.createVariable('NGWater1_StdGaus','f4',['sample'], fill_value = -9999)
varngwt1[:] = zngwt1
varngwt1.long_name = 'Quantile transformed slab 1 non-gas water'
varngwt1.units = 'None'
varngwt1.missing_value = -9999.
varngwt2 = zout.createVariable('NGWater2_StdGaus','f4',['sample'], fill_value = -9999)
varngwt2[:] = zngwt2
varngwt2.long_name = 'Quantile transformed slab 2 non-gas water'
varngwt2.units = 'None'
varngwt2.missing_value = -9999.
varcttp1 = zout.createVariable('CTTemp1_StdGaus','f4',['sample'], fill_value = -9999)
varcttp1[:] = zcttp1
varcttp1.long_name = 'Quantile transformed slab 1 cloud top temperature'
varcttp1.units = 'None'
varcttp1.missing_value = -9999.
varcttp2 = zout.createVariable('CTTemp2_StdGaus','f4',['sample'], fill_value = -9999)
varcttp2[:] = zcttp2
varcttp2.long_name = 'Quantile transformed slab 2 cloud top temperature'
varcttp2.units = 'None'
varcttp2.missing_value = -9999.
zout.close()
return
# Temp/RH Quantiles
def quantile_profile_locmask(airsdr, mtdr, indr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, msk):
# Construct profile/sfc variable quantiles and z-scores, with a possibly irregular location mask
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (airsdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
nzout = 101
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f[msk][:,:]
latmet = f['plat'][:]
lonmet = f['plon'][:]
f.close()
mask[mask <= 0] = 0
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mask,axis=0)
print(lnsq.shape)
print(lnsm.shape)
print(lnsm)
ltsm = numpy.sum(mask,axis=1)
print(ltsq.shape)
print(ltsm.shape)
print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mask[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = h5py.File(mtnm,'r')
stparr = f['/stemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
psfarr = f['/spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
salarr = f['/salti'][ltmn:ltmx,lnmn:lnmx]
f.close()
nt = psfarr.shape[0]
msksq1 = numpy.arange(mskflt.shape[0])
msksb1 = msksq1[mskflt > 0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
stparr = stparr.flatten()
psfarr = psfarr.flatten()
salarr = salarr.flatten()
if tsmp == 0:
sftmpout = numpy.zeros((msksb.shape[0],)) - 9999.0
sftmpout[:] = stparr[msksb]
psfcout = numpy.zeros((msksb.shape[0],)) - 9999.0
psfcout[:] = psfarr[msksb]
sfaltout = numpy.zeros((msksb.shape[0],)) - 9999.0
sfaltout[:] = numpy.tile(salarr[msksb1],nt)
else:
sftmpout = numpy.append(sftmpout,stparr[msksb])
psfcout = numpy.append(psfcout,psfarr[msksb])
sfaltout = numpy.append(sfaltout,numpy.tile(salarr[msksb1],nt))
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((msksb.shape[0],)) - 9999.0
latout[:] = lthld[msksb]
lonout = numpy.zeros((msksb.shape[0],)) - 9999.0
lonout[:] = lnhld[msksb]
yrout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[msksb]
else:
latout = numpy.append(latout,lthld[msksb])
lonout = numpy.append(lonout,lnhld[msksb])
yrtmp = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[msksb])
tsmp = tsmp + msksb.shape[0]
# Vertical profiles
tmpmerout = numpy.zeros((tsmp,nzout)) - 9999.
h2omerout = numpy.zeros((tsmp,nzout)) - 9999.
altout = numpy.zeros((tsmp,nzout)) - 9999.
sidx = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = h5py.File(mtnm,'r')
tmparr = f['/ptemp'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
h2oarr = f['/rh'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
altarr = f['/palts'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = tmparr.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
fidx = sidx + msksb.shape[0]
for j in range(nzout):
tmpvec = tmparr[:,j,:,:].flatten()
tmpvec[tmpvec > 1e30] = -9999.
tmpmerout[sidx:fidx,j] = tmpvec[msksb]
altvec = altarr[:,j,:,:].flatten()
altout[sidx:fidx,j] = altvec[msksb]
h2ovec = h2oarr[:,j,:,:].flatten()
h2ovec[h2ovec > 1e30] = -9999.
h2omerout[sidx:fidx,j] = h2ovec[msksb]
sidx = sidx + msksb.shape[0]
# Quantiles
ztmpout = numpy.zeros((tsmp,nzout)) - 9999.
zrhout = numpy.zeros((tsmp,nzout)) - 9999.
zsftmpout = numpy.zeros((tsmp,)) - 9999.
zsfaltout = numpy.zeros((tsmp,)) - 9999.
zpsfcout = numpy.zeros((tsmp,)) - 9999.
for j in range(nzout):
tmptmp = calculate_VPD.quantile_msgdat(tmpmerout[:,j],prbs)
tmpqout[j,:] = tmptmp[:]
str1 = 'Plev %.2f, %.2f Temp Quantile: %.3f' % (plev[j],prbs[103],tmptmp[103])
print(str1)
# Transform
ztmp = calculate_VPD.std_norm_quantile_from_obs(tmpmerout[:,j], tmptmp, prbs, msgval=-9999.)
ztmpout[:,j] = ztmp[:]
alttmp = calculate_VPD.quantile_msgdat(altout[:,j],prbs)
altmed[j] = alttmp[103]
str1 = 'Plev %.2f, %.2f Alt Quantile: %.3f' % (plev[j],prbs[103],alttmp[103])
print(str1)
# Adjust RH over 100
rhadj = h2omerout[:,j]
rhadj[rhadj > 1.0] = 1.0
rhqtmp = calculate_VPD.quantile_msgdat(rhadj,prbs)
rhqout[j,:] = rhqtmp[:]
str1 = 'Plev %.2f, %.2f RH Quantile: %.4f' % (plev[j],prbs[103],rhqtmp[103])
print(str1)
zrh = calculate_VPD.std_norm_quantile_from_obs(rhadj, rhqtmp, prbs, msgval=-9999.)
zrhout[:,j] = zrh[:]
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f PSfc Quantile: %.2f' % (prbs[103],psfcqs[103])
print(str1)
zpsfcout = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
sftpqs = calculate_VPD.quantile_msgdat(sftmpout,prbs)
str1 = '%.2f SfcTmp Quantile: %.2f' % (prbs[103],sftpqs[103])
print(str1)
zsftmpout = calculate_VPD.std_norm_quantile_from_obs(sftmpout, sftpqs, prbs, msgval=-9999.)
sfalqs = calculate_VPD.quantile_msgdat(sfaltout,prbs)
str1 = '%.2f SfcAlt Quantile: %.2f' % (prbs[103],sfalqs[103])
print(str1)
zsfaltout = calculate_VPD.std_norm_quantile_from_obs(sfaltout, sfalqs, prbs, msgval=-9999.)
# Output Quantiles
mstr = dyst.strftime('%b')
qfnm = '%s/%s_US_JJA_%02dUTC_%04d_TempRHSfc_Quantile.nc' % (dtdr,rgchc,hrchc,yrlst[k])
qout = Dataset(qfnm,'w')
dimz = qout.createDimension('level',nzout)
dimp = qout.createDimension('probability',nprb)
varlvl = qout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
# Altitude grid
varalt = qout.createVariable('Altitude_median', 'f4', ['level'], fill_value = -9999)
varalt[:] = altmed
varalt.long_name = 'Altitude median value'
varalt.units = 'm'
varalt.missing_value = -9999
vartmp = qout.createVariable('Temperature_quantile', 'f4', ['level','probability'], fill_value = -9999)
vartmp[:] = tmpqout
vartmp.long_name = 'Temperature quantiles'
vartmp.units = 'K'
vartmp.missing_value = -9999.
varrh = qout.createVariable('RH_quantile', 'f4', ['level','probability'], fill_value = -9999)
varrh[:] = rhqout
varrh.long_name = 'Relative humidity quantiles'
varrh.units = 'Unitless'
varrh.missing_value = -9999.
varstmp = qout.createVariable('SfcTemp_quantile', 'f4', ['probability'], fill_value = -9999)
varstmp[:] = sftpqs
varstmp.long_name = 'Surface temperature quantiles'
varstmp.units = 'K'
varstmp.missing_value = -9999.
varpsfc = qout.createVariable('SfcPres_quantile', 'f4', ['probability'], fill_value = -9999)
varpsfc[:] = psfcqs
varpsfc.long_name = 'Surface pressure quantiles'
varpsfc.units = 'hPa'
varpsfc.missing_value = -9999.
varsalt = qout.createVariable('SfcAlt_quantile', 'f4', ['probability'], fill_value = -9999)
varsalt[:] = sfalqs
varsalt.long_name = 'Surface altitude quantiles'
varsalt.units = 'm'
varsalt.missing_value = -9999.
qout.close()
# Output transformed quantile samples
zfnm = '%s/%s_US_JJA_%02dUTC_%04d_TempRHSfc_StdGausTrans.nc' % (dtdr,rgchc,hrchc,yrlst[k])
zout = Dataset(zfnm,'w')
dimz = zout.createDimension('level',nzout)
dimsmp = zout.createDimension('sample',tsmp)
varlvl = zout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varsrt3 = zout.createVariable('Temperature_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt3[:] = ztmpout
varsrt3.long_name = 'Quantile transformed temperature'
varsrt3.units = 'None'
varsrt3.missing_value = -9999.
varsrt4 = zout.createVariable('RH_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt4[:] = zrhout
varsrt4.long_name = 'Quantile transformed relative humidity'
varsrt4.units = 'None'
varsrt4.missing_value = -9999.
varsrts1 = zout.createVariable('SfcTemp_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts1[:] = zsftmpout
varsrts1.long_name = 'Quantile transformed surface temperature'
varsrts1.units = 'None'
varsrts1.missing_value = -9999.
varsrts2 = zout.createVariable('SfcPres_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts2[:] = zpsfcout
varsrts2.long_name = 'Quantile transformed surface pressure'
varsrts2.units = 'None'
varsrts2.missing_value = -9999.
varsrts3 = zout.createVariable('SfcAlt_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts3[:] = zsfaltout
varsrts3.long_name = 'Quantile transformed surface pressure'
varsrts3.units = 'None'
varsrts3.missing_value = -9999.
zout.close()
return
def expt_near_sfc_summary(inpdr, outdr, expfl, qclrfl, outfnm):
# Produce experiment near-surface summaries
# inpdr: Name of input directory
# outdr: Name of output directory
# expfl: Name of file with experiment results
# qclrfl: Input quantile file
# outfnm: Ouptut file name
nzairs = 100
nzsrt = 101
# Read simulation results
f = h5py.File(expfl,'r')
tmprtr = f['airs_ptemp'][:,:]
h2ortr = f['airs_h2o'][:,:]
tqflg = f['airs_ptemp_qc'][:,:]
hqflg = f['airs_h2o_qc'][:,:]
tmpsrt = f['ptemp'][:,1:nzsrt]
h2osrt = f['gas_1'][:,1:nzsrt]
psfc = f['spres'][:]
lvs = f['level'][1:nzsrt]
f.close()
nszout = tmprtr.shape[0]
tqflg = tqflg.astype(numpy.int16)
hqflg = hqflg.astype(numpy.int16)
# Altitude info
qin = Dataset(qclrfl,'r')
alts = qin['Altitude_median'][:]
qin.close()
alth2o = numpy.zeros((nszout,nzsrt))
alth2o[:,nzsrt-4] = alts[nzsrt-4]
curdlt = 0.0
for j in range(nzsrt-5,-1,-1):
#str1 = 'Level %d: %.4f' % (j,curdlt)
#print(str1)
if (alts[j] > alts[j+1]):
curdlt = alts[j] - alts[j+1]
alth2o[:,j] = alts[j]
else:
alth2o[:,j] = alts[j+1] + curdlt * 2.0
curdlt = curdlt * 2.0
alth2o[:,97] = 0.0
tsfcsrt = calculate_VPD.near_sfc_temp(tmpsrt, lvs, psfc, passqual = False, qual = None)
print(tsfcsrt[0:10])
tsfcrtr, tqflgsfc = calculate_VPD.near_sfc_temp(tmprtr, lvs, psfc, passqual = True, qual = tqflg)
print(tsfcrtr[0:10])
print(tqflgsfc[0:10])
qvsrt, rhsrt, vpdsrt = calculate_VPD.calculate_QV_and_VPD(h2osrt,tmpsrt,lvs,alth2o[:,1:nzsrt])
qvrtr, rhrtr, vpdrtr = calculate_VPD.calculate_QV_and_VPD(h2ortr,tmprtr,lvs,alth2o[:,1:nzsrt])
qsfsrt, rhsfsrt = calculate_VPD.near_sfc_qv_rh(qvsrt, tsfcsrt, lvs, psfc, passqual = False, qual = None)
qsfrtr, rhsfrtr, qflgsfc = calculate_VPD.near_sfc_qv_rh(qvrtr, tsfcrtr, lvs, psfc, passqual = True, qual = hqflg)
print(tqflgsfc.dtype)
print(qflgsfc.dtype)
# Output: Sfc Temp and qflg, SfC QV, RH and qflg
fldbl = numpy.array([-9999.],dtype=numpy.float64)
flflt = numpy.array([-9999.],dtype=numpy.float32)
flshrt = numpy.array([-99],dtype=numpy.int16)
#outfnm = '%s/MAGIC_%s_%s_%02dUTC_SR%02d_Sfc_UQ_Output.h5' % (outdr,rgchc,mnchc,hrchc,scnrw)
f = h5py.File(outfnm,'w')
dft1 = f.create_dataset('TSfcAir_True',data=tsfcsrt)
dft1.attrs['missing_value'] = fldbl
dft1.attrs['_FillValue'] = fldbl
dft2 = f.create_dataset('TSfcAir_Retrieved',data=tsfcrtr)
dft2.attrs['missing_value'] = fldbl
dft2.attrs['_FillValue'] = fldbl
dft3 = f.create_dataset('TSfcAir_QC',data=tqflgsfc)
dfq1 = f.create_dataset('QVSfcAir_True',data=qsfsrt)
dfq1.attrs['missing_value'] = fldbl
dfq1.attrs['_FillValue'] = fldbl
dfq2 = f.create_dataset('QVSfcAir_Retrieved',data=qsfrtr)
dfq2.attrs['missing_value'] = fldbl
dfq2.attrs['_FillValue'] = fldbl
dfq3 = f.create_dataset('RHSfcAir_True',data=rhsfsrt)
dfq3.attrs['missing_value'] = fldbl
dfq3.attrs['_FillValue'] = fldbl
dfq4 = f.create_dataset('RHSfcAir_Retrieved',data=rhsfrtr)
dfq4.attrs['missing_value'] = fldbl
dfq4.attrs['_FillValue'] = fldbl
dfq5 = f.create_dataset('RHSfcAir_QC',data=qflgsfc)
dfp1 = f.create_dataset('SfcPres',data=psfc)
dfp1.attrs['missing_value'] = fldbl
dfp1.attrs['_FillValue'] = fldbl
f.close()
return
def quantile_cfrac_locmask_conus(rfdr, mtdr, csdr, airdr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, mskvr, mskvl):
# Construct cloud variable quantiles and z-scores, with a possibly irregular location mask
# rfdr: Directory for reference data (Levels/Quantiles)
# mtdr: Directory for MERRA data
# csdr: Directory for cloud slab data
# airdr: Directory for AIRS cloud fraction
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# RN generator
sdchc = 542354 + yrlst[0] + hrchc
random.seed(sdchc)
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['plat'][:]
lonmet = f.variables['plon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
#print(lnsq.shape)
#print(lnsm.shape)
#print(lnsm)
ltsm = numpy.sum(mskind,axis=1)
#print(ltsq.shape)
#print(ltsm.shape)
#print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
tmunit = tmunit.replace("days since ","")
dybs = datetime.datetime.strptime(tmunit,"%Y-%m-%d %H:%M:%S")
print(dybs)
dy0 = dybs + datetime.timedelta(days=tminf[0])
dyinit = datetime.date(dy0.year,dy0.month,dy0.day)
print(dyinit)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
#print(tmhld.shape)
#print(numpy.amin(tmhld))
#print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing_IncludesCloudParams.h5' % (csdr,yrlst[k],hrchc)
f = h5py.File(fnm,'r')
tms = f['/time'][:,dystidx:dyfnidx]
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
tmflt = tms.flatten()
nt = tmflt.shape[0]
lnhld = numpy.tile(lnrp,nt)
lthld = numpy.tile(ltrp,nt)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(mtnm,'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
# lthld = numpy.tile(ltrp,nt)
# lnhld = numpy.tile(lnrp,nt)
nslbtmp = numpy.zeros((ctyp1.shape),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
# AIRS clouds
anm = '%s/CONUS_AIRS_CldFrc_Match_JJA_%d_%02d_UTC.nc' % (airdr,yrlst[k],hrchc)
f = Dataset(anm,'r')
arsfrc1 = f.variables['AIRS_CldFrac_1'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
arsfrc2 = f.variables['AIRS_CldFrac_2'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
# Sum
frctot = arsfrc1 + arsfrc2
# Construct Clr/PC/Ovc indicator for AIRS total cloud frac
totclr = numpy.zeros(frctot.shape,dtype=numpy.int16)
totclr[frctot == 0.0] = -1
totclr[frctot == 1.0] = 1
totclr = ma.masked_array(totclr, mask = frctot.mask)
frc0 = frctot[0,:,:,:]
frc0 = frc0.flatten()
frcsq = numpy.arange(tmhld.shape[0])
# Subset by AIRS matchup and location masks
frcsb = frcsq[(numpy.logical_not(frc0.mask)) & (mskall > 0)]
nairs = frcsb.shape[0]
print(tmhld.shape)
print(frcsb.shape)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
nslbtmp = nslbtmp.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
if tsmp == 0:
nslabout = numpy.zeros((nairs,),dtype=numpy.int16)
nslabout[:] = nslbtmp[frcsb]
else:
nslabout = numpy.append(nslabout,nslbtmp[frcsb])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = frcsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = frcsq[(nslbtmp == 2) & (dpslbtmp < 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 1.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((frcsb.shape[0],)) - 9999.0
psfcout[:] = psfc[frcsb]
prsbot1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[frcsb]
dpcld1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[frcsb]
dpslbout = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[frcsb]
dpcld2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[frcsb]
else:
psfcout = numpy.append(psfcout,psfc[frcsb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[frcsb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[frcsb])
dpslbout = numpy.append(dpslbout,zdslbtmp[frcsb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[frcsb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[frcsb]
slbtyp2out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[frcsb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[frcsb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[frcsb])
# Cloud Cover Indicators
totclrtmp = numpy.zeros((frcsb.shape[0],3,3),dtype=numpy.int16)
cctr = 0
for frw in range(3):
for fcl in range(3):
clrvec = totclr[cctr,:,:,:].flatten()
totclrtmp[:,frw,fcl] = clrvec[frcsb]
cctr = cctr + 1
if tsmp == 0:
totclrout = numpy.zeros(totclrtmp.shape,dtype=numpy.int16)
totclrout[:,:,:] = totclrtmp
else:
totclrout = numpy.append(totclrout,totclrtmp,axis=0)
# Cloud Fraction Logit, still account for swapping
z1tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z2tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z12tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
# Cloud Fraction
cctr = 0
for frw in range(3):
for fcl in range(3):
frcvect = frctot[cctr,:,:,:].flatten()
frcvec1 = arsfrc1[cctr,:,:,:].flatten()
frcvec2 = arsfrc2[cctr,:,:,:].flatten()
# Quick fix for totals over 1.0
fvsq = numpy.arange(frcvect.shape[0])
fvsq2 = fvsq[frcvect > 1.0]
frcvect[fvsq2] = frcvect[fvsq2] / 1.0
frcvec1[fvsq2] = frcvec1[fvsq2] / 1.0
frcvec2[fvsq2] = frcvec2[fvsq2] / 1.0
for t in range(nairs):
crslb = nslbtmp[frcsb[t]]
crclr = totclrtmp[t,frw,fcl]
if ( (crslb == 0) or (crclr == -1) ):
z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 1) ):
z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 0) ):
prptmp = numpy.array( [frcvect[frcsb[t]], 1.0 - frcvect[frcsb[t]] ] )
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
# For 2 slabs, recall AIRS cloud layers go upper/lower, ours is opposite
# Also apply random overlap adjust AIRS zero values
elif ( (crslb == 2) and (crclr == 0) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if (numpy.sum(frcs) < 0.01):
frcs[0] = 0.005
frcs[1] = 0.005
elif frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [frcs[0] - c12tmp[0]*frcs[1], \
frcs[1] - c12tmp[0]*frcs[0], c12tmp[0], 0.0])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[1]
z12tmp[t,frw,fcl] = ztmp[2]
elif ( (crslb == 2) and (crclr == 1) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [0.999 * (frcs[0] - c12tmp[0]*frcs[1]), \
0.999 * (frcs[1] - c12tmp[0]*frcs[0]), 0.999 * c12tmp[0], 0.001])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[1]
z12tmp[t,frw,fcl] = ztmp[2]
cctr = cctr + 1
if tsmp == 0:
cfclgt1out = numpy.zeros(z1tmp.shape)
cfclgt1out[:,:,:] = z1tmp
cfclgt2out = numpy.zeros(z2tmp.shape)
cfclgt2out[:,:,:] = z2tmp
cfclgt12out = numpy.zeros(z12tmp.shape)
cfclgt12out[:,:,:] = z12tmp
else:
cfclgt1out = numpy.append(cfclgt1out,z1tmp,axis=0)
cfclgt2out = numpy.append(cfclgt2out,z2tmp,axis=0)
cfclgt12out = numpy.append(cfclgt12out,z12tmp,axis=0)
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[frcsb]
ngwt2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[frcsb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[frcsb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[frcsb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[frcsb]
cttp2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[frcsb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[frcsb])
cttp2out = numpy.append(cttp2out,cttptmp2[frcsb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((frcsb.shape[0],)) - 9999.0
latout[:] = lthld[frcsb]
lonout = numpy.zeros((frcsb.shape[0],)) - 9999.0
lonout[:] = lnhld[frcsb]
yrout = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[frcsb]
else:
latout = numpy.append(latout,lthld[frcsb])
lonout = numpy.append(lonout,lnhld[frcsb])
yrtmp = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[frcsb])
tsmp = tsmp + nairs
# Process quantiles
nslbqs = calculate_VPD.quantile_msgdat_discrete(nslabout,prbs)
str1 = '%.2f Number Slab Quantile: %d' % (prbs[103],nslbqs[103])
print(str1)
print(nslbqs)
# psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
# str1 = '%.2f Surface Pressure Quantile: %.3f' % (prbs[53],psfcqs[53])
# print(str1)
prsbt1qs = calculate_VPD.quantile_msgdat(prsbot1out,prbs)
str1 = '%.2f CldBot1 Pressure Quantile: %.3f' % (prbs[103],prsbt1qs[103])
print(str1)
dpcld1qs = calculate_VPD.quantile_msgdat(dpcld1out,prbs)
str1 = '%.2f DPCloud1 Quantile: %.3f' % (prbs[103],dpcld1qs[103])
print(str1)
dpslbqs = calculate_VPD.quantile_msgdat(dpslbout,prbs)
str1 = '%.2f DPSlab Quantile: %.3f' % (prbs[103],dpslbqs[103])
print(str1)
dpcld2qs = calculate_VPD.quantile_msgdat(dpcld2out,prbs)
str1 = '%.2f DPCloud2 Quantile: %.3f' % (prbs[103],dpcld2qs[103])
print(str1)
slb1qs = calculate_VPD.quantile_msgdat_discrete(slbtyp1out,prbs)
str1 = '%.2f Type1 Quantile: %d' % (prbs[103],slb1qs[103])
print(str1)
slb2qs = calculate_VPD.quantile_msgdat_discrete(slbtyp2out,prbs)
str1 = '%.2f Type2 Quantile: %d' % (prbs[103],slb2qs[103])
print(str1)
# Indicators
totclrqout = numpy.zeros((3,3,nprb)) - 99
lgt1qs = numpy.zeros((3,3,nprb)) - 9999.0
lgt2qs = numpy.zeros((3,3,nprb)) - 9999.0
lgt12qs = numpy.zeros((3,3,nprb)) - 9999.0
for frw in range(3):
for fcl in range(3):
tmpclr = calculate_VPD.quantile_msgdat_discrete(totclrout[:,frw,fcl],prbs)
totclrqout[frw,fcl,:] = tmpclr[:]
str1 = 'Clr/Ovc Indicator %d, %d %.2f Quantile: %d' % (frw,fcl,prbs[103],tmpclr[103])
print(str1)
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt1out[:,frw,fcl],prbs)
lgt1qs[frw,fcl,:] = tmplgtq[:]
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt2out[:,frw,fcl],prbs)
lgt2qs[frw,fcl,:] = tmplgtq[:]
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt12out[:,frw,fcl],prbs)
lgt12qs[frw,fcl,:] = tmplgtq[:]
str1 = 'CFrac Logit %d, %d %.2f Quantile: %.3f, %.3f, %.3f' % (frw,fcl,prbs[103], \
lgt1qs[frw,fcl,103],lgt2qs[frw,fcl,103],lgt12qs[frw,fcl,103])
print(str1)
ngwt1qs = calculate_VPD.quantile_msgdat(ngwt1out,prbs)
str1 = '%.2f NGWater1 Quantile: %.3f' % (prbs[103],ngwt1qs[103])
print(str1)
ngwt2qs = calculate_VPD.quantile_msgdat(ngwt2out,prbs)
str1 = '%.2f NGWater2 Quantile: %.3f' % (prbs[103],ngwt2qs[103])
print(str1)
cttp1qs = calculate_VPD.quantile_msgdat(cttp1out,prbs)
str1 = '%.2f CTTemp1 Quantile: %.3f' % (prbs[103],cttp1qs[103])
print(str1)
cttp2qs = calculate_VPD.quantile_msgdat(cttp2out,prbs)
str1 = '%.2f CTTemp2 Quantile: %.3f' % (prbs[103],cttp2qs[103])
print(str1)
# Output Quantiles
qfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_Quantile.nc' % (dtdr,yrlst[k],hrchc,rgchc)
qout = Dataset(qfnm,'w')
dimp = qout.createDimension('probability',nprb)
dimfov1 = qout.createDimension('fovrow',3)
dimfov2 = qout.createDimension('fovcol',3)
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
varnslb = qout.createVariable('NumberSlab_quantile','i2',['probability'], fill_value = -99)
varnslb[:] = nslbqs
varnslb.long_name = 'Number of cloud slabs quantiles'
varnslb.units = 'Count'
varnslb.missing_value = -99
varcbprs = qout.createVariable('CloudBot1Logit_quantile','f4',['probability'], fill_value = -9999)
varcbprs[:] = prsbt1qs
varcbprs.long_name = 'Slab 1 cloud bottom pressure logit quantiles'
varcbprs.units = 'hPa'
varcbprs.missing_value = -9999
vardpc1 = qout.createVariable('DPCloud1Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc1[:] = dpcld1qs
vardpc1.long_name = 'Slab 1 cloud pressure depth logit quantiles'
vardpc1.units = 'hPa'
vardpc1.missing_value = -9999
vardpslb = qout.createVariable('DPSlabLogit_quantile','f4',['probability'], fill_value = -9999)
vardpslb[:] = dpslbqs
vardpslb.long_name = 'Two-slab vertical separation logit quantiles'
vardpslb.units = 'hPa'
vardpslb.missing_value = -9999
vardpc2 = qout.createVariable('DPCloud2Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc2[:] = dpcld2qs
vardpc2.long_name = 'Slab 2 cloud pressure depth logit quantiles'
vardpc2.units = 'hPa'
vardpc2.missing_value = -9999
vartyp1 = qout.createVariable('CType1_quantile','i2',['probability'], fill_value = -99)
vartyp1[:] = slb1qs
vartyp1.long_name = 'Slab 1 cloud type quantiles'
vartyp1.units = 'None'
vartyp1.missing_value = -99
vartyp1.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
vartyp2 = qout.createVariable('CType2_quantile','i2',['probability'], fill_value = -99)
vartyp2[:] = slb2qs
vartyp2.long_name = 'Slab 2 cloud type quantiles'
vartyp2.units = 'None'
vartyp2.missing_value = -99
vartyp2.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
varcvr = qout.createVariable('CCoverInd_quantile','i2',['fovrow','fovcol','probability'], fill_value = 99)
varcvr[:] = totclrqout
varcvr.long_name = 'Cloud cover indicator quantiles'
varcvr.units = 'None'
varcvr.missing_value = -99
varcvr.comment = 'Cloud cover indicators: -1=Clear, 0=Partly cloudy, 1=Overcast'
varlgt1 = qout.createVariable('CFrcLogit1_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt1[:] = lgt1qs
varlgt1.long_name = 'Slab 1 cloud fraction (cfrac1x) logit quantiles'
varlgt1.units = 'None'
varlgt1.missing_value = -9999
varlgt2 = qout.createVariable('CFrcLogit2_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt2[:] = lgt2qs
varlgt2.long_name = 'Slab 2 cloud fraction (cfrac2x) logit quantiles'
varlgt2.units = 'None'
varlgt2.missing_value = -9999
varlgt12 = qout.createVariable('CFrcLogit12_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt12[:] = lgt12qs
varlgt12.long_name = 'Slab 1/2 overlap fraction (cfrac12) logit quantiles'
varlgt12.units = 'None'
varlgt12.missing_value = -9999
varngwt1 = qout.createVariable('NGWater1_quantile','f4',['probability'], fill_value = -9999)
varngwt1[:] = ngwt1qs
varngwt1.long_name = 'Slab 1 cloud non-gas water quantiles'
varngwt1.units = 'g m^-2'
varngwt1.missing_value = -9999
varngwt2 = qout.createVariable('NGWater2_quantile','f4',['probability'], fill_value = -9999)
varngwt2[:] = ngwt2qs
varngwt2.long_name = 'Slab 2 cloud non-gas water quantiles'
varngwt2.units = 'g m^-2'
varngwt2.missing_value = -9999
varcttp1 = qout.createVariable('CTTemp1_quantile','f4',['probability'], fill_value = -9999)
varcttp1[:] = cttp1qs
varcttp1.long_name = 'Slab 1 cloud top temperature'
varcttp1.units = 'K'
varcttp1.missing_value = -9999
varcttp2 = qout.createVariable('CTTemp2_quantile','f4',['probability'], fill_value = -9999)
varcttp2[:] = cttp2qs
varcttp2.long_name = 'Slab 2 cloud top temperature'
varcttp2.units = 'K'
varcttp2.missing_value = -9999
qout.close()
# Set up transformations
zccvout = numpy.zeros((tsmp,3,3,)) - 9999.
zlgt1 = numpy.zeros((tsmp,3,3)) - 9999.
zlgt2 = numpy.zeros((tsmp,3,3)) - 9999.
zlgt12 = numpy.zeros((tsmp,3,3)) - 9999.
znslb = calculate_VPD.std_norm_quantile_from_obs(nslabout, nslbqs, prbs, msgval=-99)
zprsbt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(prsbot1out, prsbt1qs, prbs, msgval=-9999.)
zdpcld1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld1out, dpcld1qs, prbs, msgval=-9999.)
zdpslb = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpslbout, dpslbqs, prbs, msgval=-9999.)
zdpcld2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld2out, dpcld2qs, prbs, msgval=-9999.)
zctyp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp1out, slb1qs, prbs, msgval=-99)
zctyp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp2out, slb2qs, prbs, msgval=-99)
for frw in range(3):
for fcl in range(3):
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(totclrout[:,frw,fcl], totclrqout[frw,fcl,:], \
prbs, msgval=-99)
zccvout[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt1out[:,frw,fcl], lgt1qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt1[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt2out[:,frw,fcl], lgt2qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt2[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt12out[:,frw,fcl], lgt12qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt12[:,frw,fcl] = ztmp[:]
zngwt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt1out, ngwt1qs, prbs, msgval=-9999.)
zngwt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt2out, ngwt2qs, prbs, msgval=-9999.)
zcttp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp1out, cttp1qs, prbs, msgval=-9999.)
zcttp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp2out, cttp2qs, prbs, msgval=-9999.)
# Output transformed quantile samples
zfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_StdGausTrans.nc' % (dtdr,yrlst[k],hrchc,rgchc)
zout = Dataset(zfnm,'w')
dimsmp = zout.createDimension('sample',tsmp)
dimfov1 = zout.createDimension('fovrow',3)
dimfov2 = zout.createDimension('fovcol',3)
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varnslb = zout.createVariable('NumberSlab_StdGaus','f4',['sample'], fill_value = -9999)
varnslb[:] = znslb
varnslb.long_name = 'Quantile transformed number of cloud slabs'
varnslb.units = 'None'
varnslb.missing_value = -9999.
varcbprs = zout.createVariable('CloudBot1Logit_StdGaus','f4',['sample'], fill_value = -9999)
varcbprs[:] = zprsbt1
varcbprs.long_name = 'Quantile transformed slab 1 cloud bottom pressure logit'
varcbprs.units = 'None'
varcbprs.missing_value = -9999.
vardpc1 = zout.createVariable('DPCloud1Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc1[:] = zdpcld1
vardpc1.long_name = 'Quantile transformed slab 1 cloud pressure depth logit'
vardpc1.units = 'None'
vardpc1.missing_value = -9999.
vardpslb = zout.createVariable('DPSlabLogit_StdGaus','f4',['sample'], fill_value = -9999)
vardpslb[:] = zdpslb
vardpslb.long_name = 'Quantile transformed two-slab vertical separation logit'
vardpslb.units = 'None'
vardpslb.missing_value = -9999.
vardpc2 = zout.createVariable('DPCloud2Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc2[:] = zdpcld2
vardpc2.long_name = 'Quantile transformed slab 2 cloud pressure depth logit'
vardpc2.units = 'None'
vardpc2.missing_value = -9999.
vartyp1 = zout.createVariable('CType1_StdGaus','f4',['sample'], fill_value = -9999)
vartyp1[:] = zctyp1
vartyp1.long_name = 'Quantile transformed slab 1 cloud type logit'
vartyp1.units = 'None'
vartyp1.missing_value = -9999.
vartyp2 = zout.createVariable('CType2_StdGaus','f4',['sample'], fill_value = -9999)
vartyp2[:] = zctyp2
vartyp2.long_name = 'Quantile transformed slab 2 cloud type'
vartyp2.units = 'None'
vartyp2.missing_value = -9999.
varcov = zout.createVariable('CCoverInd_StdGaus','f4',['sample','fovrow','fovcol'], fill_value= -9999)
varcov[:] = zccvout
varcov.long_name = 'Quantile transformed cloud cover indicator'
varcov.units = 'None'
varcov.missing_value = -9999.
varlgt1 = zout.createVariable('CFrcLogit1_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt1[:] = zlgt1
varlgt1.long_name = 'Quantile transformed slab 1 cloud fraction logit'
varlgt1.units = 'None'
varlgt1.missing_value = -9999.
varlgt2 = zout.createVariable('CFrcLogit2_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt2[:] = zlgt2
varlgt2.long_name = 'Quantile transformed slab 2 cloud fraction logit'
varlgt2.units = 'None'
varlgt2.missing_value = -9999.
varlgt12 = zout.createVariable('CFrcLogit12_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt12[:] = zlgt12
varlgt12.long_name = 'Quantile transformed slab 1/2 overlap fraction logit'
varlgt12.units = 'None'
varlgt12.missing_value = -9999.
varngwt1 = zout.createVariable('NGWater1_StdGaus','f4',['sample'], fill_value = -9999)
varngwt1[:] = zngwt1
varngwt1.long_name = 'Quantile transformed slab 1 non-gas water'
varngwt1.units = 'None'
varngwt1.missing_value = -9999.
varngwt2 = zout.createVariable('NGWater2_StdGaus','f4',['sample'], fill_value = -9999)
varngwt2[:] = zngwt2
varngwt2.long_name = 'Quantile transformed slab 2 non-gas water'
varngwt2.units = 'None'
varngwt2.missing_value = -9999.
varcttp1 = zout.createVariable('CTTemp1_StdGaus','f4',['sample'], fill_value = -9999)
varcttp1[:] = zcttp1
varcttp1.long_name = 'Quantile transformed slab 1 cloud top temperature'
varcttp1.units = 'None'
varcttp1.missing_value = -9999.
varcttp2 = zout.createVariable('CTTemp2_StdGaus','f4',['sample'], fill_value = -9999)
varcttp2[:] = zcttp2
varcttp2.long_name = 'Quantile transformed slab 2 cloud top temperature'
varcttp2.units = 'None'
varcttp2.missing_value = -9999.
zout.close()
return
def quantile_profile_locmask_conus(rfdr, mtdr, csdr, airdr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, mskvr, mskvl):
# Construct profile/sfc variable quantiles and z-scores, with a possibly irregular location mask
# rfdr: Directory for reference data (Levels/Quantiles)
# mtdr: Directory for MERRA data
# csdr: Directory for cloud slab data
# airdr: Directory for AIRS cloud fraction
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
nzout = 101
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['plat'][:]
lonmet = f.variables['plon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
#print(lnsq.shape)
#print(lnsm.shape)
#print(lnsm)
ltsm = numpy.sum(mskind,axis=1)
#print(ltsq.shape)
#print(ltsm.shape)
#print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
tmunit = tmunit.replace("days since ","")
dybs = datetime.datetime.strptime(tmunit,"%Y-%m-%d %H:%M:%S")
print(dybs)
dy0 = dybs + datetime.timedelta(days=tminf[0])
dyinit = datetime.date(dy0.year,dy0.month,dy0.day)
print(dyinit)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
#print(tmhld.shape)
#print(numpy.amin(tmhld))
#print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
# MERRA variables
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tms = f.variables['time'][:]
stparr = f['/stemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
psfarr = f['/spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
salarr = f['/salti'][ltmn:ltmx,lnmn:lnmx]
tmparr = f['/ptemp'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
h2oarr = f['/rh'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
altarr = f['/palts'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
f.close()
tmflt = tms.flatten()
nt = tmflt.shape[0]
lnhld = numpy.tile(lnrp,nt)
lthld = numpy.tile(ltrp,nt)
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
# AIRS Clouds
anm = '%s/CONUS_AIRS_CldFrc_Match_JJA_%d_%02d_UTC.nc' % (airdr,yrlst[k],hrchc)
f = Dataset(anm,'r')
arsfrc1 = f.variables['AIRS_CldFrac_1'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
arsfrc2 = f.variables['AIRS_CldFrac_2'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
# Sum
frctot = arsfrc1 + arsfrc2
frc0 = frctot[0,:,:,:]
frc0 = frc0.flatten()
frcsq = numpy.arange(tmhld.shape[0])
# Subset by AIRS matchup and location masks
frcsb = frcsq[(numpy.logical_not(frc0.mask)) & (mskall > 0)]
nairs = frcsb.shape[0]
print(tmhld.shape)
print(frcsb.shape)
tmptmp = numpy.zeros((nairs,nzout))
h2otmp = numpy.zeros((nairs,nzout))
alttmp = numpy.zeros((nairs,nzout))
for j in range(nzout):
tmpvec = tmparr[:,j,:,:].flatten()
tmpvec[tmpvec > 1e30] = -9999.
tmptmp[:,j] = tmpvec[frcsb]
altvec = altarr[:,j,:,:].flatten()
alttmp[:,j] = altvec[frcsb]
h2ovec = h2oarr[:,j,:,:].flatten()
h2ovec[h2ovec > 1e30] = -9999.
h2otmp[:,j] = h2ovec[frcsb]
if tsmp == 0:
tmpmerout = numpy.zeros(tmptmp.shape)
tmpmerout[:,:] = tmptmp
h2omerout = numpy.zeros(h2otmp.shape)
h2omerout[:,:] = h2otmp
altout = numpy.zeros(alttmp.shape)
altout[:,:] = alttmp
else:
tmpmerout = numpy.append(tmpmerout,tmptmp,axis=0)
h2omerout = numpy.append(h2omerout,h2otmp,axis=0)
altout = numpy.append(altout,alttmp,axis=0)
stparr = stparr.flatten()
psfarr = psfarr.flatten()
salarr = salarr.flatten()
salfl = numpy.tile(salarr[:],nt)
if tsmp == 0:
sftmpout = numpy.zeros((nairs,)) - 9999.0
sftmpout[:] = stparr[frcsb]
psfcout = numpy.zeros((nairs,)) - 9999.0
psfcout[:] = psfarr[frcsb]
sfaltout = numpy.zeros((nairs,)) - 9999.0
sfaltout[:] = salfl[frcsb]
else:
sftmpout = numpy.append(sftmpout,stparr[frcsb])
psfcout = numpy.append(psfcout,psfarr[frcsb])
sfaltout = numpy.append(sfaltout,salfl[frcsb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((nairs,)) - 9999.0
latout[:] = lthld[frcsb]
lonout = numpy.zeros((nairs,)) - 9999.0
lonout[:] = lnhld[frcsb]
yrout = numpy.zeros((nairs,),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((nairs,),dtype=numpy.int16)
jdyout[:] = tmhld[frcsb]
else:
latout = numpy.append(latout,lthld[frcsb])
lonout = numpy.append(lonout,lnhld[frcsb])
yrtmp = numpy.zeros((nairs,),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[frcsb])
tsmp = tsmp + nairs
# Quantiles
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
ztmpout = numpy.zeros((tsmp,nzout)) - 9999.
zrhout = numpy.zeros((tsmp,nzout)) - 9999.
zsftmpout = numpy.zeros((tsmp,)) - 9999.
zsfaltout = numpy.zeros((tsmp,)) - 9999.
zpsfcout = numpy.zeros((tsmp,)) - 9999.
# Quantiles
for j in range(nzout):
tmptmp = calculate_VPD.quantile_msgdat(tmpmerout[:,j],prbs)
tmpqout[j,:] = tmptmp[:]
str1 = 'Plev %.2f, %.2f Temp Quantile: %.3f' % (plev[j],prbs[103],tmptmp[103])
print(str1)
# Transform
ztmp = calculate_VPD.std_norm_quantile_from_obs(tmpmerout[:,j], tmptmp, prbs, msgval=-9999.)
ztmpout[:,j] = ztmp[:]
alttmp = calculate_VPD.quantile_msgdat(altout[:,j],prbs)
altmed[j] = alttmp[103]
str1 = 'Plev %.2f, %.2f Alt Quantile: %.3f' % (plev[j],prbs[103],alttmp[103])
print(str1)
# Adjust RH over 100
rhadj = h2omerout[:,j]
rhadj[rhadj > 1.0] = 1.0
rhqtmp = calculate_VPD.quantile_msgdat(rhadj,prbs)
rhqout[j,:] = rhqtmp[:]
str1 = 'Plev %.2f, %.2f RH Quantile: %.4f' % (plev[j],prbs[103],rhqtmp[103])
print(str1)
zrh = calculate_VPD.std_norm_quantile_from_obs(rhadj, rhqtmp, prbs, msgval=-9999.)
zrhout[:,j] = zrh[:]
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f PSfc Quantile: %.2f' % (prbs[103],psfcqs[103])
print(str1)
zpsfcout = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
sftpqs = calculate_VPD.quantile_msgdat(sftmpout,prbs)
str1 = '%.2f SfcTmp Quantile: %.2f' % (prbs[103],sftpqs[103])
print(str1)
zsftmpout = calculate_VPD.std_norm_quantile_from_obs(sftmpout, sftpqs, prbs, msgval=-9999.)
sfalqs = calculate_VPD.quantile_msgdat(sfaltout,prbs)
str1 = '%.2f SfcAlt Quantile: %.2f' % (prbs[103],sfalqs[103])
print(str1)
zsfaltout = calculate_VPD.std_norm_quantile_from_obs(sfaltout, sfalqs, prbs, msgval=-9999.)
# Output Quantiles
qfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_TempRHSfc_Quantile.nc' % (dtdr,yrlst[k],hrchc,rgchc)
qout = Dataset(qfnm,'w')
dimz = qout.createDimension('level',nzout)
dimp = qout.createDimension('probability',nprb)
varlvl = qout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
# Altitude grid
varalt = qout.createVariable('Altitude_median', 'f4', ['level'], fill_value = -9999)
varalt[:] = altmed
varalt.long_name = 'Altitude median value'
varalt.units = 'm'
varalt.missing_value = -9999
vartmp = qout.createVariable('Temperature_quantile', 'f4', ['level','probability'], fill_value = -9999)
vartmp[:] = tmpqout
vartmp.long_name = 'Temperature quantiles'
vartmp.units = 'K'
vartmp.missing_value = -9999.
varrh = qout.createVariable('RH_quantile', 'f4', ['level','probability'], fill_value = -9999)
varrh[:] = rhqout
varrh.long_name = 'Relative humidity quantiles'
varrh.units = 'Unitless'
varrh.missing_value = -9999.
varstmp = qout.createVariable('SfcTemp_quantile', 'f4', ['probability'], fill_value = -9999)
varstmp[:] = sftpqs
varstmp.long_name = 'Surface temperature quantiles'
varstmp.units = 'K'
varstmp.missing_value = -9999.
varpsfc = qout.createVariable('SfcPres_quantile', 'f4', ['probability'], fill_value = -9999)
varpsfc[:] = psfcqs
varpsfc.long_name = 'Surface pressure quantiles'
varpsfc.units = 'hPa'
varpsfc.missing_value = -9999.
varsalt = qout.createVariable('SfcAlt_quantile', 'f4', ['probability'], fill_value = -9999)
varsalt[:] = sfalqs
varsalt.long_name = 'Surface altitude quantiles'
varsalt.units = 'm'
varsalt.missing_value = -9999.
qout.close()
# Output transformed quantile samples
zfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_TempRHSfc_StdGausTrans.nc' % (dtdr,yrlst[k],hrchc,rgchc)
zout = Dataset(zfnm,'w')
dimz = zout.createDimension('level',nzout)
dimsmp = zout.createDimension('sample',tsmp)
varlvl = zout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varsrt3 = zout.createVariable('Temperature_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt3[:] = ztmpout
varsrt3.long_name = 'Quantile transformed temperature'
varsrt3.units = 'None'
varsrt3.missing_value = -9999.
varsrt4 = zout.createVariable('RH_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt4[:] = zrhout
varsrt4.long_name = 'Quantile transformed relative humidity'
varsrt4.units = 'None'
varsrt4.missing_value = -9999.
varsrts1 = zout.createVariable('SfcTemp_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts1[:] = zsftmpout
varsrts1.long_name = 'Quantile transformed surface temperature'
varsrts1.units = 'None'
varsrts1.missing_value = -9999.
varsrts2 = zout.createVariable('SfcPres_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts2[:] = zpsfcout
varsrts2.long_name = 'Quantile transformed surface pressure'
varsrts2.units = 'None'
varsrts2.missing_value = -9999.
varsrts3 = zout.createVariable('SfcAlt_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts3[:] = zsfaltout
varsrts3.long_name = 'Quantile transformed surface pressure'
varsrts3.units = 'None'
varsrts3.missing_value = -9999.
zout.close()
return
def airscld_invtransf_mix_cloud9_conus_nosfc(rfdr, dtdr, yrchc, hrchc, rgchc, rfmn, rfdy, rfgrn, scnrw, nrep = 10, \
l2dir = '/archive/AIRSOps/airs/gdaac/v6'):
# Read in mixture model parameters, draw random samples and set up SARTA input files
# Use AIRS FOV cloud fraction information
# Use designated AIRS reference granule, and pull surface pressure temperature from there
# dtdr: Output directory
# yrchc: Template Year Choice
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# rfmn: Month for reference granule
# rfdy: Day for reference granule
# rfgrn: Reference granule number
# scnrw: Scan row for experiment
# nrep: Number of replicate granules
# l2dir: Local AIRS Level 2 directory (to retrieve reference info)
# RN Generator
sdchc = 165434 + yrchc + hrchc
random.seed(sdchc)
cldprt = numpy.array([0.4,0.2,0.08])
nszout = 45 * 30 * nrep
sfrps = 45 * nrep
nlvsrt = 98
msgdbl = -9999.0
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
airs_sarta_levs = f['level'][:]
f.close()
# Get reference granule info
airsdr = '%s/%04d/%02d/%02d/airs2sup' % (l2dir,yrchc,rfmn,rfdy)
if (os.path.exists(airsdr)):
fllst = os.listdir(airsdr)
l2str = 'AIRS.%04d.%02d.%02d.%03d' % (yrchc,rfmn,rfdy,rfgrn)
rffd = -1
j = 0
while ( (j < len(fllst)) and (rffd < 0) ):
lncr = len(fllst[j])
l4 = lncr - 4
if ( (fllst[j][l4:lncr] == '.hdf') and (l2str in fllst[j])):
l2fl = '%s/%s' % (airsdr,fllst[j])
ncl2 = Dataset(l2fl)
psfc = ncl2.variables['PSurfStd'][:,:]
topg = ncl2.variables['topog'][:,:]
ncl2.close()
rffd = j
j = j + 1
else:
print('L2 directory not found')
# Surface replicates
psfcvc = psfc[scnrw-1,:]
topgvc = topg[scnrw-1,:]
spres = numpy.tile(psfcvc,(sfrps,))
salti = numpy.tile(topgvc,(sfrps,))
# Variable list
clrlst = ['Temperature','RH','SfcTemp']
clrst = [1,64,0]
clrct = [98,35,1]
cldlst = ['NumberSlab','CloudBot1Logit','DPCloud1Logit','DPSlabLogit','DPCloud2Logit', \
'CType1','CType2','CCoverInd','CFrcLogit1','CFrcLogit2','CFrcLogit12', \
'NGWater1','NGWater2','CTTemp1','CTTemp2']
cldst = [0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0]
cldct = [1,1,1,1,1, 1,1,9,9,9,9, 1,1,1,1]
nvar = 0
for q in range(len(clrct)):
nvar = nvar + clrct[q]
nclr = nvar
for q in range(len(cldlst)):
nvar = nvar + cldct[q]
ncld = nvar - nclr
# Discrete/Continuous Indicator
typind = []
for q in range(len(clrct)):
for p in range(clrct[q]):
typind.append('Continuous')
cldtypind = ['Discrete','Continuous','Continuous','Continuous','Continuous', \
'Discrete','Discrete','Discrete','Continuous','Continuous','Continuous', \
'Continuous','Continuous','Continuous','Continuous']
for q in range(len(cldct)):
for p in range(cldct[q]):
typind.append(cldtypind[q])
# Quantile files
qclrnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_TempRHSfc_Quantile.nc' % (dtdr,yrchc,hrchc,rgchc)
qcldnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_Quantile.nc' % (dtdr,yrchc,hrchc,rgchc)
qin = Dataset(qclrnm,'r')
prbs = qin.variables['probability'][:]
nprb = prbs.shape[0]
qsclr = numpy.zeros((nclr,nprb))
lvs = qin.variables['level'][:]
alts = qin.variables['Altitude_median'][:]
rhmd = qin.variables['RH_quantile'][:,103]
nlvl = lvs.shape[0]
cctr = 0
for j in range(len(clrlst)):
print(clrlst[j])
if clrst[j] == 0:
vr1 = '%s_quantile' % (clrlst[j])
qsclr[cctr,:] = qin.variables[vr1][:]
else:
inst = clrst[j] - 1
infn = inst + clrct[j]
otst = cctr
otfn = cctr + clrct[j]
vr1 = '%s_quantile' % (clrlst[j])
qsclr[otst:otfn,:] = qin.variables[vr1][inst:infn,:]
cctr = cctr + clrct[j]
qin.close()
print('Clear medians')
print(qsclr[:,103])
cldnmout = []
qin = Dataset(qcldnm,'r')
qscld = numpy.zeros((ncld,nprb))
dctr = 0
for j in range(len(cldlst)):
print(cldlst[j])
vr1 = '%s_quantile' % (cldlst[j])
vrinf = qin.variables[vr1]
if cldct[j] == 1:
qscld[dctr,:] = qin.variables[vr1][:]
dctr = dctr + 1
cldnmout.append(cldlst[j])
elif (len(vrinf.shape) == 2):
inst = cldst[j]
infn = inst + cldct[j]
for n2 in range(inst,infn):
clnm = '%s_%d' % (cldlst[j],n2)
cldnmout.append(clnm)
otst = dctr
otfn = dctr + cldct[j]
vr1 = '%s_quantile' % (clrlst[j])
qscld[otst:otfn,:] = qin.variables[vr1][inst:infn,:]
dctr = dctr + cldct[j]
elif (len(vrinf.shape) == 3):
for cl0 in range(vrinf.shape[0]):
for rw0 in range(vrinf.shape[1]):
otst = dctr
otfn = dctr + 1
qscld[otst:otfn,:] = qin.variables[vr1][cl0,rw0,:]
clnm = '%s_%d_%d' % (cldlst[j],cl0,rw0)
cldnmout.append(clnm)
dctr = dctr + 1
qin.close()
print('Cloud medians')
print(qscld[:,103])
# Read GMM Results
gmmnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_GMM_parameters.nc' % (dtdr,yrchc,hrchc,rgchc)
gmin = Dataset(gmmnm,'r')
gmnms = gmin['State_Vector_Names'][:,:]
gmmean = gmin['Mean'][:,:]
gmpkcv = gmin['Packed_Covariance'][:,:]
gmprps = gmin['Mixture_Proportion'][:]
gmin.close()
nmclps = gmnms.tolist()
strvrs = list(map(calculate_VPD.clean_byte_list,nmclps))
if sys.version_info[0] < 3:
print('Version 2')
strvrs = map(str,strvrs)
nmix = gmmean.shape[0]
nmxvar = gmmean.shape[1]
mrgcv = numpy.zeros((nmix,nmxvar,nmxvar),dtype=numpy.float64)
for j in range(nmix):
mrgcv[j,:,:] = calculate_VPD.unpackcov(gmpkcv[j,:], nelm=nmxvar)
# Component sizes
dtall = numpy.zeros((nszout,nmxvar),dtype=numpy.float)
cmpidx = numpy.zeros((nszout,),dtype=numpy.int16)
csmp = random.multinomial(nszout,pvals=gmprps)
cmsz = 0
for j in range(nmix):
cvfl = mrgcv[j,:,:]
s1 = numpy.sqrt(numpy.diagonal(cvfl))
crmt = calculate_VPD.cov2cor(cvfl)
sdmt = numpy.diag(numpy.sqrt(cvfl.diagonal()))
w, v = linalg.eig(crmt)
print(numpy.amin(w))
sdfn = cmsz + csmp[j]
dtz = random.multivariate_normal(numpy.zeros((nmxvar,)),crmt,size=csmp[j])
dttmp = numpy.tile(gmmean[j,:],(csmp[j],1)) + numpy.dot(dtz,sdmt)
dtall[cmsz:sdfn,:] = dttmp[:,:]
cmpidx[cmsz:sdfn] = j + 1
cmsz = cmsz + csmp[j]
# Re-shuffle
ssq = numpy.arange(nszout)
sqsmp = random.choice(ssq,size=nszout,replace=False)
csmpshf = cmpidx[sqsmp]
dtshf = dtall[sqsmp,:]
print(dtshf.shape)
### Inverse Transform
qout = numpy.zeros(dtshf.shape)
for j in range(nclr):
if typind[j] == 'Discrete':
qout[:,j] = calculate_VPD.data_quantile_from_std_norm_discrete(dtshf[:,j],qsclr[j,:],prbs,minval=qsclr[j,0],maxval=qsclr[j,nprb-1])
else:
qout[:,j] = calculate_VPD.data_quantile_from_std_norm(dtshf[:,j],qsclr[j,:],prbs,minval=qsclr[j,0],maxval=qsclr[j,nprb-1])
for j in range(nclr,nvar):
if typind[j] == 'Discrete':
qout[:,j] = calculate_VPD.data_quantile_from_std_norm_discrete(dtshf[:,j],qscld[j-nclr,:],prbs,minval=qsclr[j-nclr,0],maxval=qscld[j-nclr,nprb-1])
else:
qout[:,j] = calculate_VPD.data_quantile_from_std_norm(dtshf[:,j],qscld[j-nclr,:],prbs,minval=qscld[j-nclr,0],maxval=qsclr[j-nclr,nprb-1])
### Prepare for SARTA
varlstout = ['cngwat','cngwat2','cprbot','cprbot2','cprtop','cprtop2', \
'cpsize','cpsize2','cstemp','cstemp2','ctype','ctype2','salti','spres','stemp']
# Adjust altitudes
alth2o = numpy.zeros((nszout,nlvsrt+3))
alth2o[:,nlvsrt-1] = alts[nlvsrt-1]
curdlt = 0.0
for j in range(nlvsrt-2,-1,-1):
str1 = 'Level %d: %.4f' % (j,curdlt)
print(str1)
if (alts[j] > alts[j+1]):
curdlt = alts[j] - alts[j+1]
alth2o[:,j] = alts[j]
else:
alth2o[:,j] = alts[j+1] + curdlt * 2.0
curdlt = curdlt * 2.0
alth2o[:,97] = 0.0
# Convert cloud items to data frame
smpfrm = pandas.DataFrame(data=qout[:,nclr:nvar],columns=cldnmout)
dtout = numpy.zeros((nszout,len(varlstout)), dtype=numpy.float64)
frmout = pandas.DataFrame(data=dtout,columns=varlstout)
# Cloud Types
frmout['ctype'] = (smpfrm['CType1'] + 1.0) * 100.0 + 1.0
frmout['ctype2'] = (smpfrm['CType2'] + 1.0) * 100.0 + 1.0
frmout.loc[(smpfrm.NumberSlab == 0),'ctype'] = msgdbl
frmout.loc[(smpfrm.NumberSlab < 2),'ctype2'] = msgdbl
# Met/Sfc Components, arrays sized for SARTA and AIRS
cctr = 0
prhout = numpy.zeros((nszout,nlvsrt+3)) - 9999.0
ptmpout = numpy.zeros((nszout,nlvsrt+3)) - 9999.0
for j in range(len(clrst)):
if clrst[j] == 0:
if clrlst[j] == 'SfcTemp':
frmout['stemp'] = qout[:,cctr]
elif clrlst[j] == 'Temperature':
inst = clrst[j] - 1
infn = inst + clrct[j]
otst = cctr
otfn = cctr + clrct[j]
ptmpout[:,inst:infn] = qout[:,otst:otfn]
elif clrlst[j] == 'RH':
inst = clrst[j] - 1
infn = inst + clrct[j]
otst = cctr
otfn = cctr + clrct[j]
prhout[:,inst:infn] = qout[:,otst:otfn]
bsrh = rhmd[inst]
for k in range(inst-1,-1,-1):
if ma.is_masked(rhmd[k]):
prhout[:,k] = bsrh / 2.0
t2 = 'RH masked: %d' % (k)
print(t2)
elif rhmd[k] < 0:
t2 = 'RH below 0: %d' % (k)
print(t2)
prhout[:,k] = bsrh
else:
prhout[:,k] = rhmd[k]
bsrh = rhmd[k]
cctr = cctr + clrct[j]
str1 = '''RH at Level 1: %.4e, %.4e ''' % (numpy.amin(prhout[:,0]),rhmd[0])
str2 = '''RH at Level 2: %.4e, %.4e ''' % (numpy.amin(prhout[:,1]),rhmd[1])
print(str1)
print(str2)
h2oout = calculate_VPD.calculate_h2odens(prhout,ptmpout,airs_sarta_levs,alth2o)
# Surface from reference
frmout['salti'] = salti
# Need for clouds
frmout['spres'] = spres
smpfrm['SfcPres'] = spres
# Pressure Variables
for i in range(nszout):
if smpfrm['NumberSlab'][smpfrm.index[i]] == 0:
frmout.at[i,'cprbot'] = msgdbl
frmout.at[i,'cprtop'] = msgdbl
frmout.at[i,'cprbot2'] = msgdbl
frmout.at[i,'cprtop2'] = msgdbl
elif smpfrm['NumberSlab'][smpfrm.index[i]] == 1:
tmplgts = numpy.array( [smpfrm['CloudBot1Logit'][smpfrm.index[i]], \
smpfrm['DPCloud1Logit'][smpfrm.index[i]] ] )
frctmp = calculate_VPD.lgttoprp(tmplgts)
frmout.at[i,'cprbot'] = smpfrm['SfcPres'][smpfrm.index[i]] * (1.0 - frctmp[0])
frmout.at[i,'cprtop'] = smpfrm['SfcPres'][smpfrm.index[i]] * (1.0 - frctmp[0] - frctmp[1])
frmout.at[i,'cprbot2'] = msgdbl
frmout.at[i,'cprtop2'] = msgdbl
elif smpfrm['NumberSlab'][smpfrm.index[i]] == 2:
tmplgts = numpy.array( [smpfrm['CloudBot1Logit'][smpfrm.index[i]], \
smpfrm['DPCloud1Logit'][smpfrm.index[i]], \
smpfrm['DPSlabLogit'][smpfrm.index[i]], \
smpfrm['DPCloud2Logit'][smpfrm.index[i]] ] )
frctmp = calculate_VPD.lgttoprp(tmplgts)
frmout.at[i,'cprbot'] = smpfrm['SfcPres'][smpfrm.index[i]] * (1.0 - frctmp[0])
frmout.at[i,'cprtop'] = smpfrm['SfcPres'][smpfrm.index[i]] * (1.0 - frctmp[0] - frctmp[1])
frmout.at[i,'cprbot2'] = smpfrm['SfcPres'][smpfrm.index[i]] * (1.0 - frctmp[0] - frctmp[1] - frctmp[2])
frmout.at[i,'cprtop2'] = smpfrm['SfcPres'][smpfrm.index[i]] * (1.0 - frctmp[0] - frctmp[1] - frctmp[2] - frctmp[3])
# Non-Gas Water
frmout['cngwat'] = smpfrm['NGWater1']
frmout.loc[(smpfrm.NumberSlab == 0),'cngwat'] = msgdbl
frmout['cngwat2'] = smpfrm['NGWater2']
frmout.loc[(smpfrm.NumberSlab < 2),'cngwat2'] = msgdbl
# Temperature
frmout['cstemp'] = smpfrm['CTTemp1']
frmout.loc[(smpfrm.NumberSlab == 0),'cstemp'] = msgdbl
frmout['cstemp2'] = smpfrm['CTTemp2']
frmout.loc[(smpfrm.NumberSlab < 2),'cstemp2'] = msgdbl
# Particle Size, from Sergio's paper
# 20 for water, 80 for ice
#'cpsize','cpsize2','cstemp','cstemp2','ctype','ctype2']
frmout.loc[(frmout.ctype == 101.0),'cpsize'] = 20
frmout.loc[(frmout.ctype == 201.0),'cpsize'] = 80
frmout.loc[(frmout.ctype < 0.0),'cpsize'] = msgdbl
frmout.loc[(frmout.ctype2 == 101.0),'cpsize2'] = 20
frmout.loc[(frmout.ctype2 == 201.0),'cpsize2'] = 80
frmout.loc[(frmout.ctype2 < 0.0),'cpsize2'] = msgdbl
# Fractions, 3D Arrays
cfrc1out = numpy.zeros((nszout,3,3)) - 9999.0
cfrc2out = numpy.zeros((nszout,3,3)) - 9999.0
cfrc12out = numpy.zeros((nszout,3,3)) - 9999.0
for i in range(nszout):
if smpfrm['NumberSlab'][smpfrm.index[i]] == 0:
cfrc1out[i,:,:] = 0.0
cfrc2out[i,:,:] = 0.0
cfrc12out[i,:,:] = 0.0
elif smpfrm['NumberSlab'][smpfrm.index[i]] == 1:
for q in range(3):
for p in range(3):
ccvnm = 'CCoverInd_%d_%d' % (q,p)
lgtnm1 = 'CFrcLogit1_%d_%d' % (q,p)
if (smpfrm[ccvnm][smpfrm.index[i]] == -1):
cfrc1out[i,q,p] = 0.0
elif (smpfrm[ccvnm][smpfrm.index[i]] == 1):
cfrc1out[i,q,p] = 1.0
else:
tmplgts = numpy.array( [smpfrm[lgtnm1][smpfrm.index[i]]] )
frctmp = calculate_VPD.lgttoprp(tmplgts)
cfrc1out[i,q,p] = frctmp[0]
cfrc2out[i,:,:] = 0.0
cfrc12out[i,:,:] = 0.0
elif smpfrm['NumberSlab'][smpfrm.index[i]] == 2:
for q in range(3):
for p in range(3):
ccvnm = 'CCoverInd_%d_%d' % (q,p)
lgtnm1 = 'CFrcLogit1_%d_%d' % (q,p)
lgtnm2 = 'CFrcLogit2_%d_%d' % (q,p)
lgtnm12 = 'CFrcLogit12_%d_%d' % (q,p)
if (smpfrm[ccvnm][smpfrm.index[i]] == -1):
cfrc1out[i,q,p] = 0.0
cfrc2out[i,q,p] = 0.0
cfrc12out[i,q,p] = 0.0
elif (smpfrm[ccvnm][smpfrm.index[i]] == 1):
tmplgts = numpy.array( [smpfrm[lgtnm1][smpfrm.index[i]], \
smpfrm[lgtnm2][smpfrm.index[i]], \
smpfrm[lgtnm12][smpfrm.index[i]]] )
frctmp = calculate_VPD.lgttoprp(tmplgts)
frcadj = 1.0 - frctmp[3]
cfrc1out[i,q,p] = (frctmp[0] + frctmp[2]) / frcadj
cfrc2out[i,q,p] = (frctmp[1] + frctmp[2]) / frcadj
cfrc12out[i,q,p] = frctmp[2] / frcadj
else:
tmplgts = numpy.array( [smpfrm[lgtnm1][smpfrm.index[i]], \
smpfrm[lgtnm2][smpfrm.index[i]], \
smpfrm[lgtnm12][smpfrm.index[i]]] )
frctmp = calculate_VPD.lgttoprp(tmplgts)
cfrc1out[i,q,p] = frctmp[0] + frctmp[2]
cfrc2out[i,q,p] = frctmp[1] + frctmp[2]
cfrc12out[i,q,p] = frctmp[2]
# Write Sample Output
print(frmout[166:180])
fldbl = numpy.array([-9999.],dtype=numpy.float64)
flflt = numpy.array([-9999.],dtype=numpy.float32)
flshrt = numpy.array([-99],dtype=numpy.int16)
dfnm = '%s/SampledStateVectors/CONUS_AIRS_JJA_%04d_%02dUTC_%s_SR%02d_SimSARTAStates_Mix_CloudFOV.h5' % (dtdr,yrchc,hrchc,rgchc,scnrw)
f = h5py.File(dfnm,'w')
for j in range(len(varlstout)):
dftmp = f.create_dataset(varlstout[j],data=frmout[varlstout[j]])
dftmp.attrs['missing_value'] = -9999.
dftmp.attrs['_FillValue'] = -9999.
dfpt = f.create_dataset('ptemp',data=ptmpout)
dfpt.attrs['missing_value'] = fldbl
dfpt.attrs['_FillValue'] = fldbl
dfrh = f.create_dataset('relative_humidity',data=prhout)
dfrh.attrs['missing_value'] = fldbl
dfrh.attrs['_FillValue'] = fldbl
dfgs = f.create_dataset('gas_1',data=h2oout)
dfgs.attrs['missing_value'] = fldbl
dfgs.attrs['_FillValue'] = fldbl
dfcf1 = f.create_dataset('cfrac',data=cfrc1out)
dfcf1.attrs['missing_value'] = fldbl
dfcf1.attrs['_FillValue'] = fldbl
dfcf2 = f.create_dataset('cfrac2',data=cfrc2out)
dfcf2.attrs['missing_value'] = fldbl
dfcf2.attrs['_FillValue'] = fldbl
dfcf12 = f.create_dataset('cfrac12',data=cfrc12out)
dfcf12.attrs['missing_value'] = fldbl
dfcf12.attrs['_FillValue'] = fldbl
dfcsmp = f.create_dataset('mixture_component',data=csmpshf)
dfcsmp.attrs['missing_value'] = flshrt
dfcsmp.attrs['_FillValue'] = flshrt
dflv = f.create_dataset('level',data=airs_sarta_levs)
f.close()
return
def setup_airs_cloud(flnm, tms, lats, lons, tmunit = 'Seconds since 1993-01-01 00:00:00'):
# Set up matched AIRS/MERRA cloud file
# flnm: Name of output file
# tms: Time variable array
# lats: Latitude variable array
# lons: Longitude variable array
ntm = tms.shape[0]
nlat = lats.shape[0]
nlon = lons.shape[0]
# Create Output file
qout = Dataset(flnm,'w')
dimln = qout.createDimension('lon',nlon)
dimlt = qout.createDimension('lat',nlat)
dimtm = qout.createDimension('time',ntm)
dimtrk = qout.createDimension('AIRSFOV',9)
if (lons.dtype == numpy.float32):
lntp = 'f4'
else:
lntp = 'f8'
varlon = qout.createVariable('lon',lntp,['lon'], fill_value = -9999)
varlon[:] = lons
varlon.long_name = 'longitude'
varlon.units='degrees_east'
varlon.missing_value = -9999
if (lats.dtype == numpy.float32):
lttp = 'f4'
else:
lttp = 'f8'
varlat = qout.createVariable('lat',lttp,['lat'], fill_value = -9999)
varlat[:] = lats
varlat.long_name = 'latitude'
varlat.units='degrees_north'
varlat.missing_value = -9999
if (tms.dtype == numpy.float32):
tmtp = 'f4'
else:
tmtp = 'f8'
vartm = qout.createVariable('time',lttp,['time'], fill_value = -9999)
vartm[:] = tms
vartm.long_name = 'time'
vartm.units = tmunit
vartm.missing_value = -9999
# Other output variables
varcfrc1 = qout.createVariable('AIRS_CldFrac_1','f4',['time','lat','lon','AIRSFOV'], fill_value = -9999)
varcfrc1.long_name = 'AIRS cloud fraction, upper level'
varcfrc1.units = 'unitless'
varcfrc1.missing_value = -9999
varcfrc2 = qout.createVariable('AIRS_CldFrac_2','f4',['time','lat','lon','AIRSFOV'], fill_value = -9999)
varcfrc2.long_name = 'AIRS cloud fraction, lower level'
varcfrc2.units = 'unitless'
varcfrc2.missing_value = -9999
varcqc1 = qout.createVariable('AIRS_CldFrac_QC_1','i2',['time','lat','lon','AIRSFOV'], fill_value = -99)
varcqc1.long_name = 'AIRS cloud fraction quality flag, upper level'
varcqc1.units = 'unitless'
varcqc1.missing_value = -9999
varcqc2 = qout.createVariable('AIRS_CldFrac_QC_2','i2',['time','lat','lon','AIRSFOV'], fill_value = -99)
varcqc2.long_name = 'AIRS cloud fraction quality flag, lower level'
varcqc2.units = 'unitless'
varcqc2.missing_value = -9999
varncld = qout.createVariable('AIRS_nCld','i2',['time','lat','lon','AIRSFOV'], fill_value = -99)
varncld.long_name = 'AIRS number of cloud layers'
varncld.units = 'unitless'
varncld.missing_value = -9999
qout.close()
return
def airs_cfrac_match_merra(flnm, tmidx, tmday, lats, lons, msgvl = -9999, \
l2srch = '/archive/AIRSOps/airs/gdaac/v6'):
# Set up matched AIRS/MERRA cloud file
# flnm: Name of output file
# tms: Time index in output
# tmday: Datetime object with time information
# lats: Longitude variable array
# lons: Longitude variable array
# Search AIRS Level 2
airsdr = '%s/%04d/%02d/%02d/airs2ret' % (l2srch,tmday.year,tmday.month,tmday.day)
dsclst = []
asclst = []
nlat = lats.shape[0]
nlon = lons.shape[0]
lonmn = lons[0] - 5.0
lonmx = lons[nlon-1] + 5.0
latmn = lats[0] - 5.0
latmx = lats[nlat-1] + 5.0
d0 = datetime.datetime(1993,1,1,0,0,0)
ddif = tmday - d0
bsdif = ddif.total_seconds()
# Set up reference frame
ltrp = numpy.repeat(lats,nlon)
ltidx = numpy.repeat(numpy.arange(nlat),nlon)
lnrp = numpy.tile(lons,nlat)
lnidx = numpy.tile(numpy.arange(nlon),nlat)
merfrm = pandas.DataFrame({'GridLonIdx': lnidx, 'GridLatIdx': ltidx, \
'GridLon': lnrp, 'GridLat': ltrp})
if (os.path.exists(airsdr)):
fllst = os.listdir(airsdr)
#print(fllst)
for j in range(len(fllst)):
lncr = len(fllst[j])
l4 = lncr - 4
if (fllst[j][l4:lncr] == '.hdf'):
l2fl = '%s/%s' % (airsdr,fllst[j])
ncl2 = Dataset(l2fl)
slrzn = ncl2.variables['solzen'][:,:]
l2lat = ncl2.variables['Latitude'][:,:]
l2lon = ncl2.variables['Longitude'][:,:]
l2tm = ncl2.variables['Time'][:,:]
ncl2.close()
# Check lat/lon ranges and asc/dsc
l2tmdf = numpy.absolute(l2tm - bsdif)
l2mntm = numpy.min(l2tmdf)
# Within 4 hours
if l2mntm < 14400.0:
ltflt = l2lat.flatten()
lnflt = l2lon.flatten()
latsb = ltflt[(ltflt >= latmn) & (ltflt <= latmx)]
lonsb = lnflt[(lnflt >= lonmn) & (lnflt <= lonmx)]
if ( (latsb.shape[0] > 0) and (lonsb.shape[0] > 0) ):
asclst.append(fllst[j])
sstr = '%s %.2f' % (fllst[j], l2mntm)
print(sstr)
# Set up outputs
cld1arr = numpy.zeros((nlat,nlon,9),dtype=numpy.float32) + msgvl
cld2arr = numpy.zeros((nlat,nlon,9),dtype=numpy.float32) + msgvl
cld1qc = numpy.zeros((nlat,nlon,9),dtype=numpy.int16) - 99
cld2qc = numpy.zeros((nlat,nlon,9),dtype=numpy.int16) - 99
ncldarr = numpy.zeros((nlat,nlon,9),dtype=numpy.int16) - 99
#print(asclst)
if (len(asclst) > 0):
# Start matchups
for j in range(len(asclst)):
l2fl = '%s/%s' % (airsdr,asclst[j])
ncl2 = Dataset(l2fl)
l2lat = ncl2.variables['Latitude'][:,:]
l2lon = ncl2.variables['Longitude'][:,:]
cfrcair = ncl2.variables['CldFrcStd'][:,:,:,:,:]
cfrcaqc = ncl2.variables['CldFrcStd_QC'][:,:,:,:,:]
ncldair = ncl2.variables['nCld'][:,:,:,:]
ncl2.close()
nairtrk = l2lat.shape[0]
nairxtk = l2lat.shape[1]
# Data Frame
tkidx = numpy.repeat(numpy.arange(nairtrk),nairxtk)
xtidx = numpy.tile(numpy.arange(nairxtk),nairtrk)
l2lnflt = l2lon.flatten().astype(numpy.float64)
l2ltflt = l2lat.flatten().astype(numpy.float64)
l2frm = pandas.DataFrame({'L2LonIdx': xtidx, 'L2LatIdx': tkidx, \
'L2Lon': l2lnflt, 'L2Lat': l2ltflt})
l2frm['GridLon'] = numpy.around(l2frm['L2Lon']/0.625) * 0.625
l2frm['GridLat'] = numpy.around(l2frm['L2Lat']/0.5) * 0.5
l2mrg = pandas.merge(l2frm,merfrm,on=['GridLon','GridLat'])
print(l2mrg.shape)
#if j == 0:
# print(asclst[j])
# print(l2mrg[0:15])
# Output data if available
for k in range(l2mrg.shape[0]):
yidxout = l2mrg['GridLatIdx'].values[k]
xidxout = l2mrg['GridLatIdx'].values[k]
yidxl2 = l2mrg['L2LatIdx'].values[k]
xidxl2 = l2mrg['L2LonIdx'].values[k]
cld1arr[yidxout,xidxout,:] = cfrcair[yidxl2,xidxl2,:,:,0].flatten().astype(numpy.float32)
cld2arr[yidxout,xidxout,:] = cfrcair[yidxl2,xidxl2,:,:,1].flatten().astype(numpy.float32)
cld1qc[yidxout,xidxout,:] = cfrcaqc[yidxl2,xidxl2,:,:,0].flatten().astype(numpy.int16)
cld2qc[yidxout,xidxout,:] = cfrcaqc[yidxl2,xidxl2,:,:,1].flatten().astype(numpy.int16)
ncldarr[yidxout,xidxout,:] = ncldair[yidxl2,xidxl2,:,:].flatten().astype(numpy.int16)
# Output
qout = Dataset(flnm,'r+')
varcfrc1 = qout.variables['AIRS_CldFrac_1']
varcfrc1[tmidx,:,:,:] = cld1arr[:,:,:]
varcfrc2 = qout.variables['AIRS_CldFrac_2']
varcfrc2[tmidx,:,:,:] = cld2arr[:,:,:]
varcfqc1 = qout.variables['AIRS_CldFrac_QC_1']
varcfqc1[tmidx,:,:,:] = cld1qc[:,:,:]
varcfqc2 = qout.variables['AIRS_CldFrac_QC_2']
varcfqc2[tmidx,:,:,:] = cld2qc[:,:,:]
varncld = qout.variables['AIRS_nCld']
varncld[tmidx,:,:,:] = ncldarr[:,:,:]
qout.close()
return
def quantile_allstate_locmask_conus(rfdr, mtlst, cslst, airslst, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, sstr, mskvr, mskvl):
# Construct quantiles and z-scores, with a possibly irregular location mask,
# for joint atmospheric state (AIRS/SARTA)
# rfdr: Directory for reference data (Levels/Quantiles)
# mtlst: Meteorology (MERRA) file list
# cslst: Cloud slab file list
# airslst: AIRS cloud fraction file list
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# sstr: Season string
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# RN generator
sdchc = 542354 + yrlst[0] + hrchc
random.seed(sdchc)
# Mask, lat, lon
f = Dataset(mtlst[0],'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['lat'][:]
lonmet = f.variables['lon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
ltsm = numpy.sum(mskind,axis=1)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
nzout = 101
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
f = Dataset(mtlst[k],'r')
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
tmunit = tmunit.replace("days since ","")
dybs = datetime.datetime.strptime(tmunit,"%Y-%m-%d %H:%M:%S")
print(dybs)
dy0 = dybs + datetime.timedelta(days=tminf[0])
dyinit = datetime.date(dy0.year,dy0.month,dy0.day)
print(dyinit)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < mnst:
dyfn = datetime.date(yrlst[k]+1,mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jftmp = tt31.tm_yday + 1
jsq1 = numpy.arange(jst,jftmp)
jsq2 = numpy.arange(1,jfn)
jdsq = numpy.append(jsq1,jsq2)
elif mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
jdsq = numpy.arange(jst,jfn)
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
print(dyst)
print(dyfn)
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
# Cloud slab: HDF5 or NetCDF
lncr = len(cslst[k])
l3 = lncr - 3
if (cslst[k][l3:lncr] == '.h5'):
f = h5py.File(cslst[k],'r')
tms = f['/time'][:,dystidx:dyfnidx]
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
elif (cslst[k][l3:lncr] == '.nc'):
f = Dataset(cslst[k],'r')
tms = f.variables['time'][dystidx:dyfnidx]
ctyp1 = f.variables['ctype1'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f.variables['ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f.variables['cprtop1'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f.variables['cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f.variables['cprbot1'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f.variables['cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f.variables['cngwat1'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f.variables['cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f.variables['cstemp1'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f.variables['cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
tmflt = tms.flatten()
nt = tmflt.shape[0]
lnhld = numpy.tile(lnrp,nt)
lthld = numpy.tile(ltrp,nt)
# MERRA variables
f = Dataset(mtlst[k],'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
stparr = f.variables['stemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
salinf = f.variables['salti']
if salinf.ndim == 3:
salarr = f.variables['salti'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
elif salinf.ndim == 2:
salarr = f.variables['salti'][ltmn:ltmx,lnmn:lnmx]
tmparr = f.variables['ptemp'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
h2oarr = f.variables['rh'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
altarr = f.variables['palts'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
f.close()
# Mask
print(ctyp1.shape)
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
nslbtmp = numpy.zeros((ctyp1.shape),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
# AIRS clouds
f = Dataset(airslst[k],'r')
arsfrc1 = f.variables['AIRS_CldFrac_1'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx,:]
arsfrc2 = f.variables['AIRS_CldFrac_2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx,:]
f.close()
# Sum
frctot = arsfrc1 + arsfrc2
# Construct Clr/PC/Ovc indicator for AIRS total cloud frac
totclr = numpy.zeros(frctot.shape,dtype=numpy.int16)
totclr[frctot == 0.0] = -1
totclr[frctot == 1.0] = 1
totclr = ma.masked_array(totclr, mask = frctot.mask)
frc0 = frctot[:,:,:,0]
frc0 = frc0.flatten()
frcsq = numpy.arange(tmhld.shape[0])
# Subset by AIRS matchup and location masks
frcsb = frcsq[(numpy.logical_not(frc0.mask)) & (mskall > 0)]
nairs = frcsb.shape[0]
print(tmhld.shape)
print(frcsb.shape)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
nslbtmp = nslbtmp.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
if tsmp == 0:
nslabout = numpy.zeros((nairs,),dtype=numpy.int16)
nslabout[:] = nslbtmp[frcsb]
else:
nslabout = numpy.append(nslabout,nslbtmp[frcsb])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = frcsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = frcsq[(nslbtmp == 2) & (dpslbtmp <= 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 10.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((frcsb.shape[0],)) - 9999.0
psfcout[:] = psfc[frcsb]
prsbot1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[frcsb]
dpcld1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[frcsb]
dpslbout = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[frcsb]
dpcld2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[frcsb]
else:
psfcout = numpy.append(psfcout,psfc[frcsb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[frcsb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[frcsb])
dpslbout = numpy.append(dpslbout,zdslbtmp[frcsb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[frcsb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[frcsb]
slbtyp2out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[frcsb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[frcsb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[frcsb])
# Cloud Cover Indicators
totclrtmp = numpy.zeros((frcsb.shape[0],3,3),dtype=numpy.int16)
frctottmp = numpy.zeros((frcsb.shape[0],3,3),dtype=frctot.dtype)
cctr = 0
for frw in range(3):
for fcl in range(3):
clrvec = totclr[:,:,:,cctr].flatten()
frcvec = frctot[:,:,:,cctr].flatten()
totclrtmp[:,frw,fcl] = clrvec[frcsb]
frctottmp[:,frw,fcl] = frcvec[frcsb]
cctr = cctr + 1
if tsmp == 0:
totclrout = numpy.zeros(totclrtmp.shape,dtype=numpy.int16)
totclrout[:,:,:] = totclrtmp
frctotout = numpy.zeros(frctottmp.shape,dtype=frctottmp.dtype)
frctotout[:,:,:] = frctottmp
else:
totclrout = numpy.append(totclrout,totclrtmp,axis=0)
frctotout = numpy.append(frctotout,frctottmp,axis=0)
# Cloud Fraction Logit, still account for swapping
#z1tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z2tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z12tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
# Cloud Fraction
cctr = 0
for frw in range(3):
for fcl in range(3):
frcvect = frctot[:,:,:,cctr].flatten()
frcvec1 = arsfrc1[:,:,:,cctr].flatten()
frcvec2 = arsfrc2[:,:,:,cctr].flatten()
# Quick fix for totals over 1.0
fvsq = numpy.arange(frcvect.shape[0])
fvsq2 = fvsq[frcvect > 1.0]
frcvect[fvsq2] = frcvect[fvsq2] / 1.0
frcvec1[fvsq2] = frcvec1[fvsq2] / 1.0
frcvec2[fvsq2] = frcvec2[fvsq2] / 1.0
for t in range(nairs):
crslb = nslbtmp[frcsb[t]]
crclr = totclrtmp[t,frw,fcl]
if ( (crslb == 0) or (crclr == -1) ):
#z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 1) ):
#z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 0) ):
#prptmp = numpy.array( [frcvect[frcsb[t]], 1.0 - frcvect[frcsb[t]] ] )
#ztmp = calculate_VPD.lgtzs(prptmp)
#z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
# For 2 slabs, recall AIRS cloud layers go upper/lower, ours is opposite
# Also apply random overlap adjust AIRS zero values
elif ( (crslb == 2) and (crclr == 0) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if (numpy.sum(frcs) < 0.01):
frcs[0] = 0.005
frcs[1] = 0.005
elif frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [frcs[0] - c12tmp[0]*frcs[1], \
frcs[1] - c12tmp[0]*frcs[0], c12tmp[0], 0.0])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
prpcld = (prptmp[0] + prptmp[1] + prptmp[2])
prpfnl = numpy.array([prptmp[1] / prpcld, prptmp[2] / prpcld, prptmp[0] / prpcld])
ztmp = calculate_VPD.lgtzs(prpfnl)
#z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[0]
z12tmp[t,frw,fcl] = ztmp[1]
elif ( (crslb == 2) and (crclr == 1) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [0.999 * (frcs[0] - c12tmp[0]*frcs[1]), \
0.999 * (frcs[1] - c12tmp[0]*frcs[0]), 0.999 * c12tmp[0], 0.001])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
prpcld = (prptmp[0] + prptmp[1] + prptmp[2])
prpfnl = numpy.array([prptmp[1] / prpcld, prptmp[2] / prpcld, prptmp[0] / prpcld])
ztmp = calculate_VPD.lgtzs(prpfnl)
#z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[0]
z12tmp[t,frw,fcl] = ztmp[1]
cctr = cctr + 1
if tsmp == 0:
#cfclgt1out = numpy.zeros(z1tmp.shape)
#cfclgt1out[:,:,:] = z1tmp
cfclgt2out = numpy.zeros(z2tmp.shape)
cfclgt2out[:,:,:] = z2tmp
cfclgt12out = numpy.zeros(z12tmp.shape)
cfclgt12out[:,:,:] = z12tmp
else:
#cfclgt1out = numpy.append(cfclgt1out,z1tmp,axis=0)
cfclgt2out = numpy.append(cfclgt2out,z2tmp,axis=0)
cfclgt12out = numpy.append(cfclgt12out,z12tmp,axis=0)
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[frcsb]
ngwt2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[frcsb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[frcsb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[frcsb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[frcsb]
cttp2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[frcsb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[frcsb])
cttp2out = numpy.append(cttp2out,cttptmp2[frcsb])
# Temp/RH profiles
tmptmp = numpy.zeros((nairs,nzout))
h2otmp = numpy.zeros((nairs,nzout))
alttmp = numpy.zeros((nairs,nzout))
for j in range(nzout):
tmpvec = tmparr[:,j,:,:].flatten()
tmpvec[tmpvec > 1e30] = -9999.
tmptmp[:,j] = tmpvec[frcsb]
altvec = altarr[:,j,:,:].flatten()
alttmp[:,j] = altvec[frcsb]
h2ovec = h2oarr[:,j,:,:].flatten()
h2ovec[h2ovec > 1e30] = -9999.
h2otmp[:,j] = h2ovec[frcsb]
if tsmp == 0:
tmpmerout = numpy.zeros(tmptmp.shape)
tmpmerout[:,:] = tmptmp
h2omerout = numpy.zeros(h2otmp.shape)
h2omerout[:,:] = h2otmp
altout = numpy.zeros(alttmp.shape)
altout[:,:] = alttmp
else:
tmpmerout = numpy.append(tmpmerout,tmptmp,axis=0)
h2omerout = numpy.append(h2omerout,h2otmp,axis=0)
altout = numpy.append(altout,alttmp,axis=0)
# Surface
stparr = stparr.flatten()
psfarr = psfc.flatten()
if salarr.ndim == 2:
salarr = salarr.flatten()
salfl = numpy.tile(salarr[:],nt)
elif salarr.ndim == 3:
salfl = salarr.flatten()
if tsmp == 0:
sftmpout = numpy.zeros((nairs,)) - 9999.0
sftmpout[:] = stparr[frcsb]
psfcout = numpy.zeros((nairs,)) - 9999.0
psfcout[:] = psfarr[frcsb]
sfaltout = numpy.zeros((nairs,)) - 9999.0
sfaltout[:] = salfl[frcsb]
else:
sftmpout = numpy.append(sftmpout,stparr[frcsb])
psfcout = numpy.append(psfcout,psfarr[frcsb])
sfaltout = numpy.append(sfaltout,salfl[frcsb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((frcsb.shape[0],)) - 9999.0
latout[:] = lthld[frcsb]
lonout =
|
numpy.zeros((frcsb.shape[0],))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
import collections.abc as abc
import pickle
import unittest
from copy import deepcopy
from itertools import islice
import numpy as np
from crystals import Atom, AtomicStructure, Crystal
np.random.seed(23)
class TestAtomicStructure(unittest.TestCase):
def setUp(self):
self.substructure = AtomicStructure(atoms=[Atom("U", [0, 0, 0])])
self.structure = AtomicStructure(
atoms=[Atom("Ag", [0, 0, 0]), Atom("Ag", [1, 1, 1])],
substructures=[self.substructure],
)
def test_iteration(self):
""" Test iteration of AtomicStructure yields from orphan atoms and substructure atoms alike """
elements = [atm.element for atm in self.structure]
self.assertTrue(len(elements), 3)
def test_addition_trivial(self):
""" Test that the addition of two AtomicStructures, one being empty, works as expected """
addition = self.structure + AtomicStructure()
self.assertEqual(addition, self.structure)
self.assertIsNot(addition, self.structure)
def test_addition_uniqueness(self):
""" Test that the addition of two AtomicStructures, works as expected regarding unique atoms """
self.assertEqual(self.structure + self.structure, self.structure)
def test_addition(self):
""" Test the addition of two different AtomicStructures works as expected. """
new_struct = AtomicStructure(
atoms=[Atom("U", [0, 1, 0])],
substructures=[
AtomicStructure(
atoms=[Atom("Ag", [0.5, 0, 0]), Atom("Ag", [1, 0.3, 1])]
)
],
)
addition = self.structure + new_struct
self.assertEqual(len(new_struct) + len(self.structure), len(addition))
self.assertEqual(
len(new_struct.atoms) + len(self.structure.atoms), len(addition.atoms)
)
self.assertEqual(
len(new_struct.substructures) + len(self.structure.substructures),
len(addition.substructures),
)
def test_addition_subclasses(self):
""" Test that the addition of two subclass of AtomicStructures is preserved under addition. """
class NewAtomicStructure(AtomicStructure):
pass
addition = NewAtomicStructure() + NewAtomicStructure()
self.assertIs(type(addition), NewAtomicStructure)
def test_truthiness(self):
""" Test that empty AtomicStructures are falsey, and truthy otherwise. """
empty_structure = AtomicStructure()
self.assertFalse(empty_structure)
self.assertTrue(self.structure)
def test_trivial_transformation(self):
""" Test that the identity transformation of an AtomicStructure works as expected. """
transformed = self.structure.transform(np.eye(3))
# transformed structure should be different, but equal, to original structure
self.assertIsNot(transformed, self.structure)
self.assertEqual(transformed, self.structure)
def test_transformations_inversions(self):
""" Test that symmetry operations work as expected when inverted. """
operator = np.random.random(size=(3, 3))
inv_op =
|
np.linalg.inv(operator)
|
numpy.linalg.inv
|
#!/usr/bin/python3
import sklearn
import sklearn.model_selection
import sklearn.naive_bayes
import sklearn.linear_model
import sklearn.ensemble
import sklearn.neighbors
import sklearn.mixture
import sklearn.metrics
import numpy as np
import argparse
import modules
import utils
import config
def main(fname: str):
# Read data
features_header, identifier, features_str, label_str = utils.read_csv(fname, config.FEATURE_COLS, config.LABEL_COL)
# Feature Preprocessing
features = np.array(list(map(lambda x: utils.process_features(x, feature_map_fcn = {
41: lambda x: 1 if x == '1' else 0,
42: lambda x: np.char.count(x, '1'),
51: lambda x: utils.process_prov_feature(x, config.PROV_FEATURE_TYPE_1),
52: lambda x: utils.process_prov_feature(x, config.PROV_FEATURE_TYPE_1),
53: lambda x: utils.process_prov_feature(x, config.PROV_FEATURE_TYPE_1),
54: lambda x: utils.process_prov_feature(x, config.PROV_FEATURE_TYPE_2)
}), features_str)))
label = np.array(list(map(lambda y: utils.process_label(y, config.LABEL_DICT, config.UNLABELED), label_str)))
# Dataset Split
pt_idx, train_l_idx, train_u_idx, test_idx = utils.dataset_split(identifier, label, config.UNLABELED)
identifier_pt = identifier[pt_idx]
features_pt = features[pt_idx]
label_pt = label[pt_idx]
identifier_train_l = identifier[train_l_idx]
features_train_l = features[train_l_idx]
label_train_l = label[train_l_idx]
identifier_train_u = identifier[train_u_idx]
features_train_u = features[train_u_idx]
identifier_test = identifier[test_idx]
features_test = features[test_idx]
label_test = label[test_idx]
print(f'PreTraining Dataset Size: {np.shape(features_pt), np.shape(label_pt)}')
print(f'Training Dataset Size: {np.shape(features_train_l), np.shape(label_train_l)}, {
|
np.shape(features_train_u)
|
numpy.shape
|
import re
import numpy as np
from sklearn.linear_model import LogisticRegression
import CS6140_A_MacLeay.utils.Adaboost as adar
import CS6140_A_MacLeay.utils.Adaboost_compare as adac
import pandas as pd
import multiprocessing
import os
from sklearn.feature_selection import SelectKBest
from sklearn.tree import DecisionTreeClassifier
__author__ = ''
def get_bits(val, length):
"""\
Gets an array of bits for the given integer (most significant digits first),
padded with 0s up to the desired length
"""
bits = [int(bit_val) for bit_val in '{:b}'.format(val)]
padding = [0] * max(0, length - len(bits))
return padding + bits
class ECOCClassifier(object):
"""Implements multiclass prediction for any binary learner using Error Correcting Codes"""
def __init__(self, learner=adac.AdaboostOptimal, verbose=False, encoding_type='exhaustive'):
#def __init__(self, learner=adac.AdaboostOptimal, verbose=False, encoding_type='exhaustive'):
"""\
:param learner: binary learner to use
"""
self.learner = learner
self.verbose = verbose
assert encoding_type in {'exhaustive', 'one_vs_all'}
self.encoding_type = encoding_type
# Things we'll estimate from data
self.ordered_y = None
self.encoding_table = None
self.classifiers = None
def _create_one_vs_all_encoding(self, ordered_y):
"""\
Creates an identity encoding table. Much faster than exhaustive but gives suboptimal results.
Useful for debugging.
"""
return np.identity(len(ordered_y))
def _create_exhaustive_encoding(self, ordered_y):
"""\
Creates an exhaustive encoding table for the given set of unique label values
:param ordered_y: unique labels present in the data, in a fixed order
:return: matrix of size #unique labels x #ECOC functions
"""
# Given k labels, the maximum number of unique binary encodings is 2 ** k. Of those, two are unary
# (all 0s and all 1s), and as such not useful. This gives a total number of useful encodings equal to
# 2 ** k - 2. Now we note that inverted encodings are equivalent, which means that the practical
# number of unique, non-redundant encodings is only (2 ** k - 2)/2 = 2 ** (k - 1) - 1.
n_functions = 2 ** (len(ordered_y) - 1) - 1
# We generate the signature for each function by enumerating binary numbers between 1 and 2 ** k - 1,
# making sure we don't include inverses.
encodings = []
for j in xrange(2 ** len(ordered_y) - 2):
enc = tuple(get_bits(j + 1, len(ordered_y)))
inv_enc = tuple([1 - x for x in enc])
if enc not in encodings and inv_enc not in encodings:
encodings.append(enc)
encoding_table = np.array(encodings).T
assert encoding_table.shape[1] == n_functions
# Sanity check 1: make sure all functions have non-trivial encodings (with both 0s and 1s)
for j in xrange(encoding_table.shape[1]):
if len(set(encoding_table[:, j])) < 2:
raise ValueError('Bad encoding. Function {} is unary.'.format(j))
# Sanity check 2: make sure all encodings are unique
encodings = [tuple(encoding_table[:, j]) for j in xrange(encoding_table.shape[1])]
if len(encodings) != len(set(encodings)):
raise ValueError('Some encodings are duplicated')
if self.verbose:
print('Encoding OK')
return encoding_table
def _encode_y(self, y, function_idx):
"""\
Binarizes a multi-class vector y using the given function.
:param y: multi-class label vector
:param function_idx: which function to use for the encoding (between 0 and self.encoding_table.shape[1] - 1)
:return: label vector binarized into 0s and 1
"""
def encode_one(y_val):
"""\
Encodes a single multi-class label.
:param y_val: single label value
:return: y_val encoded as either 0 or 1
"""
y_idx = self.ordered_y.index(y_val)
return self.encoding_table[y_idx, function_idx]
# Check that the requested function is valid
assert 0 <= function_idx < self.encoding_table.shape[1]
# Binarize using the function's encoding
return np.asarray([encode_one(y_val) for y_val in y])
def fit(self, X, y):
"""Fits the classifier on data"""
self.ordered_y = sorted(set(y))
if self.encoding_type == 'exhaustive':
self.encoding_table = self._create_exhaustive_encoding(self.ordered_y)
else:
self.encoding_table = self._create_one_vs_all_encoding(self.ordered_y)
self.classifiers = []
pool = multiprocessing.Pool()
async_results = []
for function_idx in xrange(self.encoding_table.shape[1]):
encoded_y = self._encode_y(y, function_idx)
async_results.append(pool.apply_async(_fit_one, [X, encoded_y]))
pool.close()
for function_idx, result in zip(xrange(self.encoding_table.shape[1]), async_results):
if self.verbose:
print('Fit function {}/{}'.format(function_idx + 1, self.encoding_table.shape[1]))
self.classifiers.append(result.get())
return self
def predict(self, X):
"""Predicts crisp labels for samples in a matrix"""
def predict_one(idx, signature):
"""Predicts the label for a single sample"""
# Compute hamming distance between our prediction and each label's encoding
hamming_dist = {}
for y_val, row in zip(self.ordered_y, self.encoding_table):
hamming_dist[y_val] = np.sum(
|
np.abs(signature - row)
|
numpy.abs
|
# This is only meant to add docs to objects defined in C-extension modules.
# The purpose is to allow easier editing of the docstrings without
# requiring a re-compile.
# NOTE: Many of the methods of ndarray have corresponding functions.
# If you update these docstrings, please keep also the ones in
# core/fromnumeric.py, core/defmatrix.py up-to-date.
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', or 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index :
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews :
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index :
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value :
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the Numpy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, fill
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=numpy.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'count_nonzero',
"""
count_nonzero(a)
Counts the number of non-zero values in the array ``a``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
Returns
-------
count : int
Number of non-zero values in the array.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
5
""")
add_newdoc('numpy.core.multiarray','set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset; default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = '<NAME>'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays together.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions of must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core','fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray','correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns a ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
out : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray','_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray','_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k,N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['<NAME>', '<NAME>', '<NAME>']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print ind
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
True
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i1', 'S8')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: invalid type promotion
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray','newbuffer',
"""newbuffer(size)
Return a new uninitialized buffer object of size bytes
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it's the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'einsum',
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : data-type, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
order : {'C', 'F', 'A', or 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
""")
add_newdoc('numpy.core', 'alterdot',
"""
Change `dot`, `vdot`, and `innerproduct` to use accelerated BLAS functions.
Typically, as a user of Numpy, you do not explicitly call this function. If
Numpy is built with an accelerated BLAS, this function is automatically
called when Numpy is imported.
When Numpy is built with an accelerated BLAS like ATLAS, these functions
are replaced to make use of the faster implementations. The faster
implementations only affect float32, float64, complex64, and complex128
arrays. Furthermore, the BLAS API only includes matrix-matrix,
matrix-vector, and vector-vector products. Products of arrays with larger
dimensionalities use the built in functions and are not accelerated.
See Also
--------
restoredot : `restoredot` undoes the effects of `alterdot`.
""")
add_newdoc('numpy.core', 'restoredot',
"""
Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS
implementations.
Typically, the user will only need to call this when troubleshooting and
installation problem, reproducing the conditions of a build without an
accelerated BLAS, or when being very careful about benchmarking linear
algebra operations.
See Also
--------
alterdot : `restoredot` undoes the effects of `alterdot`.
""")
add_newdoc('numpy.core', 'vdot',
"""
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
the methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and strides are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(t)
Copy of the array, cast to a specified type.
Parameters
----------
t : str or dtype
Typecode or data-type to which the array is cast.
Raises
------
ComplexWarning :
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace: bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out: ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(a_min, a_max, out=None)
Return an array whose values are limited to ``[a_min, a_max]``.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
By default, the result is stored in C-contiguous (row-major) order in
memory. If `order` is `F`, the result has 'Fortran' (column-major)
order. If order is 'A' ('Any'), then the result has the same order
as the input.
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals.
Refer to `numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A'}, optional
Whether to flatten in C (row-major), Fortran (column-major) order,
or preserve the C/Fortran ordering from `a`.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset)
Returns a field of the given array as a certain type.
A field is a view of the array data with each itemsize determined
by the given type and the offset into the current array, i.e. from
``offset * dtype.itemsize`` to ``(offset+1) * dtype.itemsize``.
Parameters
----------
dtype : str
String denoting the data type of the field.
offset : int
Number of `dtype.itemsize`'s to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 1.+1.j]])
>>> x.dtype
dtype('complex128')
>>> x.getfield('complex64', 0) # Note how this != x
array([[ 0.+1.875j, 0.+0.j ],
[ 0.+0.j , 0.+1.875j]], dtype=complex64)
>>> x.getfield('complex64',1) # Note how different this is than x
array([[ 0. +5.87173204e-39j, 0. +0.00000000e+00j],
[ 0. +0.00000000e+00j, 0. +5.87173204e-39j]], dtype=complex64)
>>> x.getfield('complex128', 0) # == x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 1.+1.j]])
If the argument dtype is the same as x.dtype, then offset != 0 raises
a ValueError:
>>> x.getfield('complex128', 1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Need 0 <= offset <= 0 for requested type but received offset = 1
>>> x.getfield('float64', 0)
array([[ 1., 0.],
[ 0., 1.]])
>>> x.getfield('float64', 1)
array([[ 1.77658241e-307, 0.00000000e+000],
[ 0.00000000e+000, 1.77658241e-307]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat',
"""
a.setasflat(arr)
Equivalent to a.flat = arr.flat, but is generally more efficient.
This function does not check for overlap, so if ``arr`` and ``a``
are viewing the same data with different strides, the results will
be unpredictable.
Parameters
----------
arr : array_like
The array to copy into a.
Examples
--------
>>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a
array([[0, 1, 2],
[4, 5, 6]])
>>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b
array([[ 2., 5.],
[ 1., 4.],
[ 0., 3.]], dtype=float32)
>>> a.setasflat(b)
>>> a
array([[2, 5, 1],
[4, 0, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
above. `new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left')
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze()
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tostring())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring',
"""
a.tostring(order='C')
Construct a Python string containing the raw data bytes in the array.
Constructs a Python string showing a copy of the raw contents of
data memory. The string can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
Parameters
----------
order : {'C', 'F', None}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : str
A Python string exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tostring()
'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tostring('C') == x.tostring()
True
>>> x.tostring('F')
'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`.
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print type(y)
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print x
[(1, 20) (3, 4)]
Using a view to convert an array to a record array:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frexp',
"""
Return normalized fraction and exponent of 2 of input array, element-wise.
Returns (`out1`, `out2`) from equation ``x` = out1 * 2**out2``.
Parameters
----------
x : array_like
Input array.
Returns
-------
(out1, out2) : tuple of ndarrays, (float, int)
`out1` is a float array with values between -1 and 1.
`out2` is an int array which represent the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a Numpy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a Numpy universal function (``ufunc``) object.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Compute y = x1 * 2**x2.
Parameters
----------
x1 : array_like
The array of multipliers.
x2 : array_like
The array of exponents.
Returns
-------
y : array_like
The output array, the result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, the inverse of `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# lib._compiled_base functions
#
##############################################################################
add_newdoc('numpy.lib._compiled_base', 'digitize',
"""
digitize(x, bins)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate.
Parameters
----------
x : array_like
Input array to be binned. It has to be 1-dimensional.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If the input is not 1-dimensional, or if `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
""")
add_newdoc('numpy.lib._compiled_base', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
.. versionadded:: 1.6.0
A minimum number of bins for the output array.
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.lib._compiled_base', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.lib._compiled_base', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
.. versionadded:: 1.6.0
Determines whether the indices should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.lib._compiled_base', 'add_docstring',
"""
docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.lib._compiled_base', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An integer type array whose elements should be packed to bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.lib._compiled_base', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print np.exp.identity
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in xrange(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : int, optional
The axis along which to apply the reduction.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in xrange(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are two exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in xrange(len(A)):
for j in xrange(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Record, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Record, one field named 'f1', in itself containing a record with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Record, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
Array-interface compliant full description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
"""))
|
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print dt.fields
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
|
numpy.lib.add_newdoc
|
from gym.spaces import Box
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.dqn.dqn_policy import postprocess_nstep_and_prio
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Deterministic
from ray.rllib.utils.annotations import override
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tf_ops import huber_loss, minimize_and_clip, scope_vars
tf = try_import_tf()
ACTION_SCOPE = "action"
POLICY_SCOPE = "policy"
POLICY_TARGET_SCOPE = "target_policy"
Q_SCOPE = "critic"
Q_TARGET_SCOPE = "target_critic"
TWIN_Q_SCOPE = "twin_critic"
TWIN_Q_TARGET_SCOPE = "twin_target_critic"
# Importance sampling weights for prioritized replay
PRIO_WEIGHTS = "weights"
class DDPGPostprocessing:
"""Implements n-step learning and param noise adjustments."""
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
if self.config["parameter_noise"]:
# adjust the sigma of parameter space noise
states, noisy_actions = [
list(x) for x in sample_batch.columns(
[SampleBatch.CUR_OBS, SampleBatch.ACTIONS])
]
self.sess.run(self.remove_parameter_noise_op)
# TODO(sven): This won't work if exploration != Noise, which is
# probably fine as parameter_noise will soon be its own
# Exploration class.
clean_actions, cur_noise_scale = self.sess.run(
[self.output_actions,
self.exploration.get_info()],
feed_dict={
self.cur_observations: states,
self._is_exploring: False,
})
distance_in_action_space = np.sqrt(
np.mean(np.square(clean_actions - noisy_actions)))
self.pi_distance = distance_in_action_space
if distance_in_action_space < \
self.config["exploration_config"].get("ou_sigma", 0.2) * \
cur_noise_scale:
# multiplying the sampled OU noise by noise scale is
# equivalent to multiplying the sigma of OU by noise scale
self.parameter_noise_sigma_val *= 1.01
else:
self.parameter_noise_sigma_val /= 1.01
self.parameter_noise_sigma.load(
self.parameter_noise_sigma_val, session=self.sess)
return postprocess_nstep_and_prio(self, sample_batch)
class DDPGTFPolicy(DDPGPostprocessing, TFPolicy):
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.agents.ddpg.ddpg.DEFAULT_CONFIG, **config)
if not isinstance(action_space, Box):
raise UnsupportedSpaceException(
"Action space {} is not supported for DDPG.".format(
action_space))
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
self.config = config
# Create global step for counting the number of update operations.
self.global_step = tf.train.get_or_create_global_step()
# Create sampling timestep placeholder.
timestep = tf.placeholder(tf.int32, (), name="timestep")
# use separate optimizers for actor & critic
self._actor_optimizer = tf.train.AdamOptimizer(
learning_rate=self.config["actor_lr"])
self._critic_optimizer = tf.train.AdamOptimizer(
learning_rate=self.config["critic_lr"])
# Observation inputs.
self.cur_observations = tf.placeholder(
tf.float32,
shape=(None, ) + observation_space.shape,
name="cur_obs")
with tf.variable_scope(POLICY_SCOPE) as scope:
policy_out, self.policy_model = self._build_policy_network(
self.cur_observations, observation_space, action_space)
self.policy_vars = scope_vars(scope.name)
# Noise vars for P network except for layer normalization vars
if self.config["parameter_noise"]:
self._build_parameter_noise([
var for var in self.policy_vars if "LayerNorm" not in var.name
])
# Create exploration component.
self.exploration = self._create_exploration(action_space, config)
explore = tf.placeholder_with_default(True, (), name="is_exploring")
# Action outputs
with tf.variable_scope(ACTION_SCOPE):
self.output_actions, _ = self.exploration.get_exploration_action(
policy_out, Deterministic, self.policy_model, explore,
timestep)
# Replay inputs
self.obs_t = tf.placeholder(
tf.float32,
shape=(None, ) + observation_space.shape,
name="observation")
self.act_t = tf.placeholder(
tf.float32, shape=(None, ) + action_space.shape, name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(
tf.float32, shape=(None, ) + observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(
tf.float32, [None], name="weight")
# policy network evaluation
with tf.variable_scope(POLICY_SCOPE, reuse=True) as scope:
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.policy_t, _ = self._build_policy_network(
self.obs_t, observation_space, action_space)
policy_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) -
prev_update_ops)
# target policy network evaluation
with tf.variable_scope(POLICY_TARGET_SCOPE) as scope:
policy_tp1, _ = self._build_policy_network(
self.obs_tp1, observation_space, action_space)
target_policy_vars = scope_vars(scope.name)
# Action outputs
with tf.variable_scope(ACTION_SCOPE, reuse=True):
if config["smooth_target_policy"]:
target_noise_clip = self.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random_normal(
tf.shape(policy_tp1),
stddev=self.config["target_noise"]),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
action_space.low * tf.ones_like(policy_tp1),
action_space.high * tf.ones_like(policy_tp1))
else:
# no smoothing, just use deterministic actions
policy_tp1_smoothed = policy_tp1
# q network evaluation
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
with tf.variable_scope(Q_SCOPE) as scope:
# Q-values for given actions & observations in given current
q_t, self.q_model = self._build_q_network(
self.obs_t, observation_space, action_space, self.act_t)
self.q_func_vars = scope_vars(scope.name)
self.stats = {
"mean_q": tf.reduce_mean(q_t),
"max_q": tf.reduce_max(q_t),
"min_q": tf.reduce_min(q_t),
}
with tf.variable_scope(Q_SCOPE, reuse=True):
# Q-values for current policy (no noise) in given current state
q_t_det_policy, _ = self._build_q_network(
self.obs_t, observation_space, action_space, self.policy_t)
if self.config["twin_q"]:
with tf.variable_scope(TWIN_Q_SCOPE) as scope:
twin_q_t, self.twin_q_model = self._build_q_network(
self.obs_t, observation_space, action_space, self.act_t)
self.twin_q_func_vars = scope_vars(scope.name)
q_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops)
# target q network evaluation
with tf.variable_scope(Q_TARGET_SCOPE) as scope:
q_tp1, _ = self._build_q_network(self.obs_tp1, observation_space,
action_space, policy_tp1_smoothed)
target_q_func_vars = scope_vars(scope.name)
if self.config["twin_q"]:
with tf.variable_scope(TWIN_Q_TARGET_SCOPE) as scope:
twin_q_tp1, _ = self._build_q_network(
self.obs_tp1, observation_space, action_space,
policy_tp1_smoothed)
twin_target_q_func_vars = scope_vars(scope.name)
if self.config["twin_q"]:
self.critic_loss, self.actor_loss, self.td_error \
= self._build_actor_critic_loss(
q_t, q_tp1, q_t_det_policy, twin_q_t=twin_q_t,
twin_q_tp1=twin_q_tp1)
else:
self.critic_loss, self.actor_loss, self.td_error \
= self._build_actor_critic_loss(
q_t, q_tp1, q_t_det_policy)
if config["l2_reg"] is not None:
for var in self.policy_vars:
if "bias" not in var.name:
self.actor_loss += (config["l2_reg"] * tf.nn.l2_loss(var))
for var in self.q_func_vars:
if "bias" not in var.name:
self.critic_loss += (config["l2_reg"] * tf.nn.l2_loss(var))
if self.config["twin_q"]:
for var in self.twin_q_func_vars:
if "bias" not in var.name:
self.critic_loss += (
config["l2_reg"] * tf.nn.l2_loss(var))
# update_target_fn will be called periodically to copy Q network to
# target Q network
self.tau_value = config.get("tau")
self.tau = tf.placeholder(tf.float32, (), name="tau")
update_target_expr = []
for var, var_target in zip(
sorted(self.q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
if self.config["twin_q"]:
for var, var_target in zip(
sorted(self.twin_q_func_vars, key=lambda v: v.name),
sorted(twin_target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
for var, var_target in zip(
sorted(self.policy_vars, key=lambda v: v.name),
sorted(target_policy_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
self.update_target_expr = tf.group(*update_target_expr)
self.sess = tf.get_default_session()
self.loss_inputs = [
(SampleBatch.CUR_OBS, self.obs_t),
(SampleBatch.ACTIONS, self.act_t),
(SampleBatch.REWARDS, self.rew_t),
(SampleBatch.NEXT_OBS, self.obs_tp1),
(SampleBatch.DONES, self.done_mask),
(PRIO_WEIGHTS, self.importance_weights),
]
input_dict = dict(self.loss_inputs)
if self.config["use_state_preprocessor"]:
# Model self-supervised losses
self.actor_loss = self.policy_model.custom_loss(
self.actor_loss, input_dict)
self.critic_loss = self.q_model.custom_loss(
self.critic_loss, input_dict)
if self.config["twin_q"]:
self.critic_loss = self.twin_q_model.custom_loss(
self.critic_loss, input_dict)
TFPolicy.__init__(
self,
observation_space,
action_space,
self.config,
self.sess,
obs_input=self.cur_observations,
sampled_action=self.output_actions,
loss=self.actor_loss + self.critic_loss,
loss_inputs=self.loss_inputs,
update_ops=q_batchnorm_update_ops + policy_batchnorm_update_ops,
explore=explore,
timestep=timestep)
self.sess.run(tf.global_variables_initializer())
# Note that this encompasses both the policy and Q-value networks and
# their corresponding target networks
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
tf.group(q_t_det_policy, q_tp1), self.sess)
# Hard initial update
self.update_target(tau=1.0)
@override(TFPolicy)
def optimizer(self):
# we don't use this because we have two separate optimisers
return None
@override(TFPolicy)
def build_apply_op(self, optimizer, grads_and_vars):
# for policy gradient, update policy net one time v.s.
# update critic net `policy_delay` time(s)
should_apply_actor_opt = tf.equal(
tf.mod(self.global_step, self.config["policy_delay"]), 0)
def make_apply_op():
return self._actor_optimizer.apply_gradients(
self._actor_grads_and_vars)
actor_op = tf.cond(
should_apply_actor_opt,
true_fn=make_apply_op,
false_fn=lambda: tf.no_op())
critic_op = self._critic_optimizer.apply_gradients(
self._critic_grads_and_vars)
# increment global step & apply ops
with tf.control_dependencies([tf.assign_add(self.global_step, 1)]):
return tf.group(actor_op, critic_op)
@override(TFPolicy)
def gradients(self, optimizer, loss):
if self.config["grad_norm_clipping"] is not None:
actor_grads_and_vars = minimize_and_clip(
self._actor_optimizer,
self.actor_loss,
var_list=self.policy_vars,
clip_val=self.config["grad_norm_clipping"])
critic_grads_and_vars = minimize_and_clip(
self._critic_optimizer,
self.critic_loss,
var_list=self.q_func_vars + self.twin_q_func_vars
if self.config["twin_q"] else self.q_func_vars,
clip_val=self.config["grad_norm_clipping"])
else:
actor_grads_and_vars = self._actor_optimizer.compute_gradients(
self.actor_loss, var_list=self.policy_vars)
if self.config["twin_q"]:
critic_vars = self.q_func_vars + self.twin_q_func_vars
else:
critic_vars = self.q_func_vars
critic_grads_and_vars = self._critic_optimizer.compute_gradients(
self.critic_loss, var_list=critic_vars)
# save these for later use in build_apply_op
self._actor_grads_and_vars = [(g, v) for (g, v) in actor_grads_and_vars
if g is not None]
self._critic_grads_and_vars = [(g, v)
for (g, v) in critic_grads_and_vars
if g is not None]
grads_and_vars = self._actor_grads_and_vars \
+ self._critic_grads_and_vars
return grads_and_vars
@override(TFPolicy)
def extra_compute_grad_fetches(self):
return {
"td_error": self.td_error,
LEARNER_STATS_KEY: self.stats,
}
@override(TFPolicy)
def get_weights(self):
return self.variables.get_weights()
@override(TFPolicy)
def set_weights(self, weights):
self.variables.set_weights(weights)
def _build_q_network(self, obs, obs_space, action_space, actions):
if self.config["use_state_preprocessor"]:
q_model = ModelCatalog.get_model({
"obs": obs,
"is_training": self._get_is_training_placeholder(),
}, obs_space, action_space, 1, self.config["model"])
q_out = tf.concat([q_model.last_layer, actions], axis=1)
else:
q_model = None
q_out = tf.concat([obs, actions], axis=1)
activation = getattr(tf.nn, self.config["critic_hidden_activation"])
for hidden in self.config["critic_hiddens"]:
q_out = tf.layers.dense(q_out, units=hidden, activation=activation)
q_values = tf.layers.dense(q_out, units=1, activation=None)
return q_values, q_model
def _build_policy_network(self, obs, obs_space, action_space):
if self.config["use_state_preprocessor"]:
model = ModelCatalog.get_model({
"obs": obs,
"is_training": self._get_is_training_placeholder(),
}, obs_space, action_space, 1, self.config["model"])
action_out = model.last_layer
else:
model = None
action_out = obs
activation = getattr(tf.nn, self.config["actor_hidden_activation"])
for hidden in self.config["actor_hiddens"]:
if self.config["parameter_noise"]:
import tensorflow.contrib.layers as layers
action_out = layers.fully_connected(
action_out,
num_outputs=hidden,
activation_fn=activation,
normalizer_fn=layers.layer_norm)
else:
action_out = tf.layers.dense(
action_out, units=hidden, activation=activation)
action_out = tf.layers.dense(
action_out, units=action_space.shape[0], activation=None)
# Use sigmoid to scale to [0,1], but also double magnitude of input to
# emulate behaviour of tanh activation used in DDPG and TD3 papers.
sigmoid_out = tf.nn.sigmoid(2 * action_out)
# Rescale to actual env policy scale
# (shape of sigmoid_out is [batch_size, dim_actions], so we reshape to
# get same dims)
action_range = (action_space.high - action_space.low)[None]
low_action = action_space.low[None]
actions = action_range * sigmoid_out + low_action
return actions, model
def _build_actor_critic_loss(self,
q_t,
q_tp1,
q_t_det_policy,
twin_q_t=None,
twin_q_tp1=None):
twin_q = self.config["twin_q"]
gamma = self.config["gamma"]
n_step = self.config["n_step"]
use_huber = self.config["use_huber"]
huber_threshold = self.config["huber_threshold"]
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - self.done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = tf.stop_gradient(
self.rew_t + gamma**n_step * q_tp1_best_masked)
# compute the error (potentially clipped)
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
td_error = td_error + twin_td_error
if use_huber:
errors = huber_loss(td_error, huber_threshold) \
+ huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.square(td_error) + 0.5 * tf.square(
twin_td_error)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.square(td_error)
critic_loss = tf.reduce_mean(self.importance_weights * errors)
actor_loss = -tf.reduce_mean(q_t_det_policy)
return critic_loss, actor_loss, td_error
def _build_parameter_noise(self, pnet_params):
self.parameter_noise_sigma_val = \
self.config["exploration_config"].get("ou_sigma", 0.2)
self.parameter_noise_sigma = tf.get_variable(
initializer=tf.constant_initializer(
self.parameter_noise_sigma_val),
name="parameter_noise_sigma",
shape=(),
trainable=False,
dtype=tf.float32)
self.parameter_noise = list()
# No need to add any noise on LayerNorm parameters
for var in pnet_params:
noise_var = tf.get_variable(
name=var.name.split(":")[0] + "_noise",
shape=var.shape,
initializer=tf.constant_initializer(.0),
trainable=False)
self.parameter_noise.append(noise_var)
remove_noise_ops = list()
for var, var_noise in zip(pnet_params, self.parameter_noise):
remove_noise_ops.append(tf.assign_add(var, -var_noise))
self.remove_parameter_noise_op = tf.group(*tuple(remove_noise_ops))
generate_noise_ops = list()
for var_noise in self.parameter_noise:
generate_noise_ops.append(
tf.assign(
var_noise,
tf.random_normal(
shape=var_noise.shape,
stddev=self.parameter_noise_sigma)))
with tf.control_dependencies(generate_noise_ops):
add_noise_ops = list()
for var, var_noise in zip(pnet_params, self.parameter_noise):
add_noise_ops.append(tf.assign_add(var, var_noise))
self.add_noise_op = tf.group(*tuple(add_noise_ops))
self.pi_distance = None
def compute_td_error(self, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err = self.sess.run(
self.td_error,
feed_dict={
self.obs_t: [
|
np.array(ob)
|
numpy.array
|
import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
from theano.tensor import dmatrix, iscalar, lscalar, dmatrices
from theano import tensor
from theano.compile import In
from theano.compile import pfunc
from theano.compile import shared
from theano.compile import config
def data_of(s):
"""Return the raw value of a shared variable"""
return s.container.storage[0]
class Test_pfunc(unittest.TestCase):
def test_doc(self):
"""Ensure the code given in pfunc.txt works as expected"""
# Example #1.
a = lscalar()
b = shared(1)
f1 = pfunc([a], (a + b))
f2 = pfunc([In(a, value=44)], a + b, updates={b: b + 1})
self.assertTrue(b.get_value() == 1)
self.assertTrue(f1(3) == 4)
self.assertTrue(f2(3) == 4)
self.assertTrue(b.get_value() == 2)
self.assertTrue(f1(3) == 5)
b.set_value(0)
self.assertTrue(f1(3) == 3)
# Example #2.
a = tensor.lscalar()
b = shared(7)
f1 = pfunc([a], a + b)
f2 = pfunc([a], a * b)
self.assertTrue(f1(5) == 12)
b.set_value(8)
self.assertTrue(f1(5) == 13)
self.assertTrue(f2(4) == 32)
def test_shared(self):
# CHECK: two functions (f1 and f2) can share w
w = shared(numpy.random.rand(2, 2), 'w')
wval = w.get_value(borrow=False)
x = dmatrix()
out1 = w + x
out2 = w * x
f1 = pfunc([x], [out1])
f2 = pfunc([x], [out2])
xval = numpy.random.rand(2, 2)
assert numpy.all(f1(xval) == xval + wval)
assert numpy.all(f2(xval) == xval * wval)
# CHECK: updating a shared value
f3 = pfunc([x], out1, updates=[(w, (w - 1))])
# f3 changes the value of w
assert numpy.all(f3(xval) == xval + wval)
# this same value is read by f1
assert numpy.all(f1(xval) == xval + (wval - 1))
w.set_value(w.get_value(borrow=True) * 10, borrow=True)
# this same value is read by f1
assert numpy.all(f1(xval) == xval + w.get_value(borrow=True))
def test_no_shared_as_input(self):
"""Test that shared variables cannot be used as function inputs."""
w_init = numpy.random.rand(2, 2)
w = shared(w_init.copy(), 'w')
try:
pfunc([w], theano.tensor.sum(w * w))
assert False
except TypeError as e:
msg = 'Cannot use a shared variable (w) as explicit input'
if str(e).find(msg) < 0:
raise
def test_default_container(self):
# Ensure it is possible to (implicitly) use a shared variable in a
# function, as a 'state' that can be updated at will.
rng = numpy.random.RandomState(1827)
w_init = rng.rand(5)
w = shared(w_init.copy(), 'w')
reg = theano.tensor.sum(w * w)
f = pfunc([], reg)
assert f() == numpy.sum(w_init * w_init)
# Change the value of w and ensure the output changes accordingly.
w.set_value(w.get_value(borrow=True) + 1.0, borrow=True)
assert f() == numpy.sum((w_init + 1) ** 2)
def test_default_scalar_container(self):
# Similar in spirit to test_default_container, but updating a scalar
# variable. This is a sanity check for non mutable types.
x = shared(0.0, 'x')
f = pfunc([], x)
assert f() == 0
x.set_value(x.get_value(borrow=True) + 1, borrow=True)
assert f() == 1
def test_param_strict(self):
a = tensor.dvector()
b = shared(7)
out = a + b
f = pfunc([In(a, strict=False)], [out])
# works, rand generates float64 by default
f(numpy.random.rand(8))
# works, casting is allowed
f(numpy.array([1, 2, 3, 4], dtype='int32'))
f = pfunc([In(a, strict=True)], [out])
try:
# fails, f expects float64
f(numpy.array([1, 2, 3, 4], dtype='int32'))
except TypeError:
pass
def test_param_mutable(self):
a = tensor.dvector()
a_out = a * 2 # assuming the op which makes this "in place" triggers
# using mutable=True will let fip change the value in aval
fip = pfunc([In(a, mutable=True)], [a_out], mode='FAST_RUN')
aval = numpy.random.rand(10)
aval2 = aval.copy()
assert numpy.all(fip(aval) == (aval2 * 2))
assert not numpy.all(aval == aval2)
# using mutable=False should leave the input untouched
f = pfunc([In(a, mutable=False)], [a_out], mode='FAST_RUN')
aval = numpy.random.rand(10)
aval2 = aval.copy()
assert numpy.all(f(aval) == (aval2 * 2))
assert numpy.all(aval == aval2)
def test_shared_mutable(self):
bval = numpy.arange(5)
b = shared(bval)
b_out = b * 2
# shared vars copy args.
assert b.get_value(borrow=True) is not bval
# so we do this to get at the underlying data
bval = data_of(b)
# by default, shared are not mutable unless doing an explicit update
f = pfunc([], [b_out], mode='FAST_RUN')
assert (f() == numpy.arange(5) * 2).all()
assert numpy.all(b.get_value(borrow=True) == numpy.arange(5))
# using updates, b is now a mutable parameter
f = pfunc([], [b_out], updates=[(b, b_out)], mode='FAST_RUN')
assert (f() == (numpy.arange(5) * 2)).all()
# because of the update
assert (b.get_value(borrow=True) == (numpy.arange(5) * 2)).all()
assert (bval == (numpy.arange(5) * 2)).all() # because of mutable=True
# do not depend on updates being in-place though!
bval = numpy.arange(5)
b.set_value(bval, borrow=True)
bval = data_of(b)
f = pfunc([], [b_out], updates=[(b, (b_out + 3))], mode='FAST_RUN')
assert (f() == (numpy.arange(5) * 2)).all()
# because of the update
assert (b.get_value(borrow=True) == ((numpy.arange(5) * 2) + 3)).all()
# bval got modified to something...
assert not (bval == numpy.arange(5)).all()
# ... but not to b.value !
assert not (bval == b.get_value(borrow=True)).all()
def test_param_allow_downcast_int(self):
a = tensor.wvector('a') # int16
b = tensor.bvector('b') # int8
c = tensor.bscalar('c') # int8
f = pfunc([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# Both values are in range. Since they're not ndarrays (but lists),
# they will be converted, and their value checked.
assert numpy.all(f([3], [6], 1) == 10)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, f,
[3], numpy.array([6], dtype='int16'), 1)
# Value too big for a, silently ignored
assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, f, [3], [312], 1)
# Value too big for c, raises TypeError
self.assertRaises(TypeError, f, [3], [6], 806)
def test_param_allow_downcast_floatX(self):
a = tensor.fscalar('a')
b = tensor.fscalar('b')
c = tensor.fscalar('c')
f = pfunc([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# If the values can be accurately represented, everything is OK
assert numpy.all(f(0, 0, 0) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f(0.1, 0, 0), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, 0, 0.1, 0)
# If allow_downcast is None, it should work iff floatX=float32
if config.floatX == 'float32':
assert numpy.allclose(f(0, 0, 0.1), 0.1)
else:
self.assertRaises(TypeError, f, 0, 0, 0.1)
def test_param_allow_downcast_vector_floatX(self):
a = tensor.fvector('a')
b = tensor.fvector('b')
c = tensor.fvector('c')
f = pfunc([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# If the values can be accurately represented, everything is OK
z = [0]
assert numpy.all(f(z, z, z) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f([0.1], z, z), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, z, [0.1], z)
# If allow_downcast is None, like False
self.assertRaises(TypeError, f, z, z, [0.1])
def test_allow_input_downcast_int(self):
a = tensor.wvector('a') # int16
b = tensor.bvector('b') # int8
c = tensor.bscalar('c') # int8
f = pfunc([a, b, c], (a + b + c), allow_input_downcast=True)
# Value too big for a, b, or c, silently ignored
assert f([2 ** 20], [1], 0) == 1
assert f([3], [312], 0) == 59
assert f([3], [1], 806) == 42
g = pfunc([a, b, c], (a + b + c), allow_input_downcast=False)
# All values are in range. Since they're not ndarrays (but lists
# or scalars), they will be converted, and their value checked.
assert numpy.all(g([3], [6], 0) == 9)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, g,
[3], numpy.array([6], dtype='int16'), 0)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, g, [3], [312], 0)
h = pfunc([a, b, c], (a + b + c)) # Default: allow_input_downcast=None
# Everything here should behave like with False
assert numpy.all(h([3], [6], 0) == 9)
self.assertRaises(TypeError, h,
[3], numpy.array([6], dtype='int16'), 0)
self.assertRaises(TypeError, h, [3], [312], 0)
def test_allow_downcast_floatX(self):
a = tensor.fscalar('a')
b = tensor.fvector('b')
f = pfunc([a, b], (a + b), allow_input_downcast=True)
g = pfunc([a, b], (a + b), allow_input_downcast=False)
h = pfunc([a, b], (a + b), allow_input_downcast=None)
# If the values can be accurately represented, OK
assert numpy.all(f(0, [0]) == 0)
assert numpy.all(g(0, [0]) == 0)
assert numpy.all(h(0, [0]) == 0)
# For the vector: OK iff allow_input_downcast is True
assert numpy.allclose(f(0, [0.1]), 0.1)
self.assertRaises(TypeError, g, 0, [0.1])
self.assertRaises(TypeError, h, 0, [0.1])
# For the scalar: OK if allow_input_downcast is True,
# or None and floatX==float32
assert numpy.allclose(f(0.1, [0]), 0.1)
self.assertRaises(TypeError, g, 0.1, [0])
if config.floatX == 'float32':
assert numpy.allclose(h(0.1, [0]), 0.1)
else:
self.assertRaises(TypeError, h, 0.1, [0])
def test_update(self):
"""Test update mechanism in different settings."""
# Simple value assignment.
x = shared(0)
assign = pfunc([], [], updates={x: 3})
assign()
self.assertTrue(x.get_value() == 3)
# Basic increment function.
x.set_value(0)
inc = pfunc([], [], updates={x: x + 1})
inc()
self.assertTrue(x.get_value() == 1)
# Increment by a constant value.
x.set_value(-1)
y = shared(2)
inc_by_y = pfunc([], [], updates={x: x + y})
inc_by_y()
self.assertTrue(x.get_value() == 1)
def test_update_err_broadcast(self):
# Test that broadcastable dimensions raise error
data = numpy.random.rand(10, 10).astype('float32')
output_var = shared(name="output", value=data)
# the update_var has type matrix, and the update expression
# is a broadcasted scalar, and that should be allowed.
self.assertRaises(TypeError, theano.function, inputs=[], outputs=[],
updates={output_var: output_var.sum().dimshuffle('x', 'x')})
def test_duplicate_updates(self):
x, y = dmatrices('x', 'y')
z = shared(numpy.ones((2, 3)))
self.assertRaises(ValueError, theano.function, [x, y], [z],
updates=[(z, (z + x + y)), (z, (z - x))])
def test_givens(self):
x = shared(0)
assign = pfunc([], x, givens={x: 3})
assert assign() == 3
assert x.get_value(borrow=True) == 0
y = tensor.ivector()
f = pfunc([y], (y * x), givens={x: 6})
assert numpy.all(f([1, 1, 1]) == [6, 6, 6])
assert x.get_value() == 0
z = tensor.ivector()
c = z * y
f = pfunc([y], (c + 7),
givens={z: theano._asarray([4, 4, 4], dtype='int32')})
assert numpy.all(f([1, 1, 1]) == [11, 11, 11])
assert x.get_value() == 0
def test_clone0(self):
x = shared(numpy.asarray([4, 4, 4]))
y = shared(numpy.asarray([4, 4, 4]))
z = shared(numpy.asarray([2, 2, 2]))
up = pfunc([], [], updates={
x: (x * 5),
y: ((x * 5) + y),
z: (((x * 5) + y) ** z)})
up()
assert numpy.all(x.get_value() == 20)
assert numpy.all(y.get_value() == 24)
assert numpy.all(z.get_value() == (24 ** 2))
def test_default_updates(self):
x = shared(0)
x.default_update = x + 1
f = pfunc([], [x])
f()
assert x.get_value() == 1
del x.default_update
f()
assert x.get_value() == 2
g = pfunc([], [x])
g()
assert x.get_value() == 2
def test_no_default_updates(self):
x = shared(0)
y = shared(1)
x.default_update = x + 2
# Test that the default update is taken into account in the right cases
f1 = pfunc([], [x], no_default_updates=True)
f1()
assert x.get_value() == 0
f2 = pfunc([], [x], no_default_updates=[x])
f2()
assert x.get_value() == 0
f3 = pfunc([], [x], no_default_updates=[x, y])
f3()
assert x.get_value() == 0
f4 = pfunc([], [x], no_default_updates=[y])
f4()
assert x.get_value() == 2
f5 = pfunc([], [x], no_default_updates=[])
f5()
assert x.get_value() == 4
f5 = pfunc([], [x], no_default_updates=False)
f5()
assert x.get_value() == 6
self.assertRaises(TypeError, pfunc, [], [x], no_default_updates=(x))
self.assertRaises(TypeError, pfunc, [], [x], no_default_updates=x)
self.assertRaises(TypeError, pfunc, [], [x],
no_default_updates='canard')
# Mix explicit updates and no_default_updates
g1 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=True)
g1()
assert x.get_value() == 5
g2 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[x])
g2()
assert x.get_value() == 4
g3 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[x, y])
g3()
assert x.get_value() == 3
g4 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[y])
g4()
assert x.get_value() == 2
g5 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[])
g5()
assert x.get_value() == 1
g5 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=False)
g5()
assert x.get_value() == 0
def test_default_updates_expressions(self):
x = shared(0)
y = shared(1)
a = lscalar('a')
z = a * x
x.default_update = x + y
f1 = pfunc([a], z)
f1(12)
assert x.get_value() == 1
f2 = pfunc([a], z, no_default_updates=True)
assert f2(7) == 7
assert x.get_value() == 1
f3 = pfunc([a], z, no_default_updates=[x])
assert f3(9) == 9
assert x.get_value() == 1
def test_default_updates_multiple(self):
x = shared(0)
y = shared(1)
x.default_update = x - 1
y.default_update = y + 1
f1 = pfunc([], [x, y])
f1()
assert x.get_value() == -1
assert y.get_value() == 2
f2 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=[y])
f2()
assert x.get_value() == -3
assert y.get_value() == 2
f3 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=True)
f3()
assert x.get_value() == -5
assert y.get_value() == 2
f4 = pfunc([], [x, y], updates=[(y, (y - 2))])
f4()
assert x.get_value() == -6
assert y.get_value() == 0
def test_default_updates_chained(self):
x = shared(2)
y = shared(1)
z = shared(-1)
x.default_update = x - y
y.default_update = z
z.default_update = z - 1
f1 = pfunc([], [x])
f1()
assert x.get_value() == 1
assert y.get_value() == -1
assert z.get_value() == -2
f2 = pfunc([], [x, y])
f2()
assert x.get_value() == 2
assert y.get_value() == -2
assert z.get_value() == -3
f3 = pfunc([], [y])
f3()
assert x.get_value() == 2
assert y.get_value() == -3
assert z.get_value() == -4
f4 = pfunc([], [x, y], no_default_updates=[x])
f4()
assert x.get_value() == 2
assert y.get_value() == -4
assert z.get_value() == -5
f5 = pfunc([], [x, y, z], no_default_updates=[z])
f5()
assert x.get_value() == 6
assert y.get_value() == -5
assert z.get_value() == -5
def test_default_updates_input(self):
x = shared(0)
y = shared(1)
if theano.configdefaults.python_int_bitwidth() == 32:
a = iscalar('a')
else:
a = lscalar('a')
x.default_update = y
y.default_update = y + a
f1 = pfunc([], x, no_default_updates=True)
f1()
assert x.get_value() == 0
assert y.get_value() == 1
f2 = pfunc([], x, no_default_updates=[x])
f2()
assert x.get_value() == 0
assert y.get_value() == 1
f3 = pfunc([], x, no_default_updates=[y])
f3()
assert x.get_value() == 1
assert y.get_value() == 1
f4 = pfunc([a], x)
f4(2)
assert x.get_value() == 1
assert y.get_value() == 3
f5 = pfunc([], x, updates={y: (y - 1)})
f5()
assert x.get_value() == 3
assert y.get_value() == 2
# a is needed as input if y.default_update is used
self.assertRaises(theano.gof.MissingInputError, pfunc, [], x)
def test_default_updates_partial_graph(self):
a = shared(0)
a.default_update = a + 1 # Increment a each time it is used
b = 2 * a
# Use only the tip of the graph, a is not used
f = pfunc([b], b)
assert a.get_value() == 0
f(21)
assert a.get_value() == 0
def test_givens_replaces_shared_variable(self):
a = shared(1., 'a')
a.default_update = a + 3.
b = tensor.dscalar('b')
c = a + 10
f = pfunc([b], c, givens={a: b})
assert len(f.maker.fgraph.inputs) == 1
assert len(f.maker.fgraph.outputs) == 1
def test_givens_replaces_shared_variable2(self):
a = shared(1., 'a')
a.default_update = a + 3
c = a + 10
f = pfunc([], c, givens={a: (a + 10)})
assert f() == 21
assert f() == 34
def test_duplicate_inputs(self):
x = theano.tensor.lscalar('x')
self.assertRaises(theano.compile.UnusedInputError,
theano.function, [x, x, x], x)
def test_update_same(self):
# There was a bug in CVM, triggered when a shared variable
# was its own update expression.
a = shared(1., 'a')
b = shared(numpy.ones((2, 3)), 'b')
# The order of the variables is not determined, so we try
# both shared variables.
# TODO: explain the above comment. By "not determined" does
# this mean "not deterministic"?
# This test originally wrote the updates using dictionaries,
# and iterating over the dictionary was not deterministic.
# Is that all the comment above meant, or is the CVM intended
# to add extra non-determinism? Or is the CVM meant to
# deterministically but arbitrarily pick an order for the updates?
f = theano.function([], [], updates=[(a, a), (b, (2 * b))])
g = theano.function([], [], updates=[(a, (a * 2)), (b, b)])
f()
assert a.get_value(borrow=True).shape == (), a.get_value()
assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
g()
assert a.get_value(borrow=True).shape == (), a.get_value()
assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
def test_update_equiv(self):
# Like test_update_same, but the update expression is simplified until
# it is found to be equal to the original variable
a = shared(1., 'a')
b = shared(numpy.ones((2, 3)), 'b')
# See comment in test_update_same about why we try both
# shared variables.
f = theano.function([], [], updates=[(a, a), (b, (2 * b - b))])
g = theano.function([], [], updates=[(a, (a * 2 - a)), (b, b)])
f()
assert a.get_value(borrow=True).shape == (), a.get_value()
assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
g()
assert a.get_value(borrow=True).shape == (), a.get_value()
assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
class Test_aliasing_rules(unittest.TestCase):
"""
1. Theano manages its own memory space, which typically does not overlap
with the memory of normal python variables that the user uses.
2. shared variables are allocated in this memory space, as are the
temporaries used for Function evalution.
3. Physically, this managed memory space may be spread across the host,
on a GPU device(s), or even on a remote machine.
4. Theano assumes that shared variables are never aliased to one another,
and tries to make it impossible to accidentally alias them.
5. Theano's managed data is constant while Theano Functions are not running
and theano library code is not running.
6. The default behaviour of Function is to return user-space values for
outputs, but this can be overridden (borrow=True) for better performance,
in which case the returned value may be aliased to managed memory, and
potentially invalidated by the next Theano Function call or call to theano
library code.
"""
def shared(self, x):
return tensor._shared(x)
def test_shared_constructor_copies(self):
# shared constructor makes copy
# (rule #2)
orig_a =
|
numpy.zeros((2, 2))
|
numpy.zeros
|
# rtgym interfaces for Trackmania
# standard library imports
import platform
import logging
import time
from collections import deque
# third-party imports
import cv2
import gym.spaces as spaces
import mss
import numpy as np
if platform.system() == "Windows":
import vgamepad as vg
# third-party imports
from rtgym import RealTimeGymInterface
# local imports
import tmrl.config.config_constants as cfg
from tmrl.custom.utils.compute_reward import RewardFunction
from tmrl.custom.utils.control_gamepad import control_gamepad
from tmrl.custom.utils.key_event import apply_control, keyres
from tmrl.custom.utils.mouse_event import mouse_close_finish_pop_up_tm20, wait_for_popup_save_replay_and_improve_tm20
from tmrl.custom.utils.tools import Lidar, TM2020OpenPlanetClient, get_speed, load_digits
# Globals ==============================================================================================================
NB_OBS_FORWARD = 500 # this allows (and rewards) 50m cuts
# Interface for Trackmania 2020 ========================================================================================
class TM2020Interface(RealTimeGymInterface):
"""
This is the API needed for the algorithm to control Trackmania2020
"""
def __init__(self, img_hist_len: int = 4, gamepad: bool = False, min_nb_steps_before_early_done: int = int(3.5 * 20), save_replay: bool = False):
"""
Args:
"""
self.monitor = {"top": 32, "left": 1, "width": 256, "height": 127}
self.sct = None
self.last_time = None
self.digits = None
self.img_hist_len = img_hist_len
self.img_hist = None
self.img = None
self.reward_function = None
self.client = None
self.gamepad = gamepad
self.j = None
self.min_nb_steps_before_early_done = min_nb_steps_before_early_done
self.save_replay = save_replay
self.initialized = False
def initialize(self):
if self.gamepad:
assert platform.system() == "Windows", "Sorry, Only Windows is supported for gamepad control"
self.j = vg.VX360Gamepad()
logging.debug(" virtual joystick in use")
self.sct = mss.mss()
self.last_time = time.time()
self.digits = load_digits()
self.img_hist = deque(maxlen=self.img_hist_len)
self.img = None
self.reward_function = RewardFunction(reward_data_path=cfg.REWARD_PATH,
nb_obs_forward=NB_OBS_FORWARD,
nb_obs_backward=10,
nb_zero_rew_before_early_done=10,
min_nb_steps_before_early_done=self.min_nb_steps_before_early_done)
self.client = TM2020OpenPlanetClient()
self.initialized = True
def send_control(self, control):
"""
Non-blocking function
Applies the action given by the RL policy
If control is None, does nothing (e.g. to record)
Args:
control: np.array: [forward,backward,right,left]
"""
if self.gamepad:
if control is not None:
control_gamepad(self.j, control)
else:
if control is not None:
actions = []
if control[0] > 0:
actions.append('f')
if control[1] > 0:
actions.append('b')
if control[2] > 0.5:
actions.append('r')
elif control[2] < -0.5:
actions.append('l')
apply_control(actions)
def grab_data_and_img(self):
img = np.asarray(self.sct.grab(self.monitor))[:, :, :3]
img = np.moveaxis(img, -1, 0)
data = self.client.retrieve_data()
self.img = img # for render()
return data, img
def reset(self):
"""
obs must be a list of numpy arrays
"""
if not self.initialized:
self.initialize()
self.send_control(self.get_default_action())
keyres()
time.sleep(cfg.SLEEP_TIME_AT_RESET) # must be long enough for image to be refreshed
data, img = self.grab_data_and_img()
speed = np.array([
data[0],
], dtype='float32')
gear = np.array([
data[9],
], dtype='float32')
rpm = np.array([
data[10],
], dtype='float32')
for _ in range(self.img_hist_len):
self.img_hist.append(img)
imgs = np.array(list(self.img_hist))
obs = [speed, gear, rpm, imgs]
self.reward_function.reset()
return obs
def wait(self):
"""
Non-blocking function
The agent stays 'paused', waiting in position
"""
self.send_control(self.get_default_action())
keyres()
time.sleep(0.5)
mouse_close_finish_pop_up_tm20(small_window=True)
def get_obs_rew_done_info(self):
"""
returns the observation, the reward, and a done signal for end of episode
obs must be a list of numpy arrays
"""
data, img = self.grab_data_and_img()
speed = np.array([
data[0],
], dtype='float32')
gear = np.array([
data[9],
], dtype='float32')
rpm = np.array([
data[10],
], dtype='float32')
rew, done = self.reward_function.compute_reward(pos=np.array([data[2], data[3], data[4]]))
rew = np.float32(rew)
self.img_hist.append(img)
imgs = np.array(list(self.img_hist))
obs = [speed, gear, rpm, imgs]
end_of_track = bool(data[8])
info = {}
if end_of_track:
done = True
info["__no_done"] = True
if self.save_replay:
wait_for_popup_save_replay_and_improve_tm20(True)
return obs, rew, done, info
def get_observation_space(self):
"""
must be a Tuple
"""
speed = spaces.Box(low=0.0, high=1000.0, shape=(1, ))
gear = spaces.Box(low=0.0, high=6, shape=(1, ))
rpm = spaces.Box(low=0.0, high=np.inf, shape=(1, ))
img = spaces.Box(low=0.0, high=255.0, shape=(self.img_hist_len, 3, 127, 256))
return spaces.Tuple((speed, gear, rpm, img))
def get_action_space(self):
"""
must return a Box
"""
return spaces.Box(low=-1.0, high=1.0, shape=(3, ))
def get_default_action(self):
"""
initial action at episode start
"""
return np.array([0.0, 0.0, 0.0], dtype='float32')
class TM2020InterfaceLidar(TM2020Interface):
def __init__(self, img_hist_len=1, gamepad=False, min_nb_steps_before_early_done=int(20 * 3.5), road_point=(440, 479), record=False, save_replay: bool = False):
super().__init__(img_hist_len, gamepad, min_nb_steps_before_early_done, save_replay)
self.monitor = {"top": 30, "left": 0, "width": 958, "height": 490}
self.lidar = Lidar(monitor=self.monitor, road_point=road_point)
self.record = record
def grab_lidar_speed_and_data(self):
img = np.asarray(self.sct.grab(self.monitor))[:, :, :3]
data = self.client.retrieve_data()
speed = np.array([
data[0],
], dtype='float32')
lidar = self.lidar.lidar_20(im=img, show=False)
return lidar, speed, data
def reset(self):
"""
obs must be a list of numpy arrays
"""
if not self.initialized:
self.initialize()
self.send_control(self.get_default_action())
keyres()
time.sleep(cfg.SLEEP_TIME_AT_RESET) # must be long enough for image to be refreshed
img, speed, data = self.grab_lidar_speed_and_data()
for _ in range(self.img_hist_len):
self.img_hist.append(img)
imgs = np.array(list(self.img_hist), dtype='float32')
obs = [speed, imgs]
self.reward_function.reset()
return obs # if not self.record else data
def wait(self):
"""
Non-blocking function
The agent stays 'paused', waiting in position
"""
self.send_control(self.get_default_action())
keyres()
time.sleep(0.5)
mouse_close_finish_pop_up_tm20(small_window=False)
def get_obs_rew_done_info(self):
"""
returns the observation, the reward, and a done signal for end of episode
obs must be a list of numpy arrays
"""
img, speed, data = self.grab_lidar_speed_and_data()
rew, done = self.reward_function.compute_reward(pos=np.array([data[2], data[3], data[4]]))
rew =
|
np.float32(rew)
|
numpy.float32
|
import copy
import errno
import glob
import logging # as logging
import logging.config
import math
import os
import pickle
import struct
import sys
import time
import configuration
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
# from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
from frontend.parameter_generation import ParameterGeneration
from io_funcs.binary_io import BinaryIOCollection
# import matplotlib.pyplot as plt
# our custom logging class that can also plot
# from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, SingleWeightMatrixPlot
from lxml import etree
from utils.providers import ListDataProviderWithProjectionIndex, get_unexpanded_projection_inputs # ListDataProvider
from util import file_util, math_statis
from util.file_util import load_binary_file_frame
# from frontend.feature_normalisation_base import FeatureNormBase
## This should always be True -- tidy up later
expand_by_minibatch = True
if expand_by_minibatch:
proj_type = 'int32'
else:
proj_type = theano.config.floatX
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i + 1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num - 1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i * 2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def infer_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
'''
Unlike the same function in run_tpdnn.py this *DOESN'T* save model at the
end -- just returns array of the learned projection weights
'''
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
######### data providers ##########
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list=train_x_file_list,
y_file_list=train_y_file_list, n_ins=n_ins, n_outs=n_outs,
buffer_size=buffer_size, shuffle=True,
index_to_project=index_to_project,
projection_insize=projection_insize,
indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list=valid_x_file_list,
y_file_list=valid_y_file_list, n_ins=n_ins, n_outs=n_outs,
buffer_size=buffer_size, shuffle=False,
index_to_project=index_to_project,
projection_insize=projection_insize,
indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
####################################
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
############## load existing dnn #####
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
####################################
logger.info('fine-tuning the %s model' % (model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
logger.info('fine-tuning the %s model' % (model_type))
dnn_model.initialise_projection_weights()
inference_epochs = 20 ## <-------- hard coded !!!!!!!!!!
current_finetune_lr = previous_finetune_lr = finetune_lr
warmup_epoch_3 = 10 # 10 ## <-------- hard coded !!!!!!!!!!
# warmup_epoch_3 = epoch + warmup_epoch_3
# inference_epochs += epoch
while (epoch < inference_epochs):
epoch = epoch + 1
current_momentum = momentum
if epoch > warmup_epoch_3:
previous_finetune_lr = current_finetune_lr
current_finetune_lr = previous_finetune_lr * 0.5
dev_error = []
sub_start_time = time.clock()
## osw -- inferring word reps on validation set in a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('infer word representations for validation set')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# valid_error = infer_projections_fn(current_finetune_lr, current_momentum)
# this_validation_loss = numpy.mean(valid_error)
# if plot:
# ## add dummy validation loss so that plot works:
# plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
# plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
#
sub_end_time = time.clock()
logger.info('INFERENCE epoch %i, validation error %f, time spent %.2f' % (
epoch, this_validation_loss, (sub_end_time - sub_start_time)))
# if cfg.hyper_params['model_type'] == 'TPDNN':
# if not os.path.isdir(cfg.projection_weights_output_dir):
# os.mkdir(cfg.projection_weights_output_dir)
# weights = dnn_model.get_projection_weights()
# fname = os.path.join(cfg.projection_weights_output_dir, 'proj_INFERENCE_epoch_%s'%(epoch))
# numpy.savetxt(fname, weights)
#
best_dnn_model = dnn_model ## always update
end_time = time.clock()
##cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
final_weights = dnn_model.get_projection_weights()
logger.info(
'overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
# if plot:
# plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
#
### ========================================================
# if cfg.hyper_params['model_type'] == 'TPDNN':
# os.system('python %s %s'%('/afs/inf.ed.ac.uk/user/o/owatts/scripts_NEW/plot_weights_multiple_phases.py', cfg.projection_weights_output_dir))
return final_weights
def dnn_generation_PROJECTION(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, cfg=None,
synth_mode='constant', projection_end=0, projection_weights_to_use=None,
save_weights_to_file=None):
'''
Use the (training/dev/test) projections learned in training, but shuffled, for test tokens.
-- projection_end is *real* value for last projection index (or some lower value)
-- this is so the samples / means are of real values learned on training data
'''
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation_PROJECTION')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
## 'remove' word representations by randomising them. As model is unpickled and
## not re-saved, this does not throw trained parameters away.
if synth_mode == 'sampled_training':
## use randomly chosen training projection -- shuffle in-place = same as sampling wihtout replacement
P = dnn_model.get_projection_weights()
numpy.random.shuffle(P[:, :projection_end]) ## shuffle in place along 1st dim (reorder rows)
dnn_model.params[0].set_value(P, borrow=True)
elif synth_mode == 'uniform':
## generate utt embeddings uniformly at random within the min-max of the training set (i.e. from a (hyper)-rectangle)
P = dnn_model.get_projection_weights()
column_min = numpy.min(P[:, :projection_end], axis=0) ## vector like a row of P with min of its columns
column_max = numpy.max(P[:, :projection_end], axis=0)
random_proj = numpy.random.uniform(low=column_min, high=column_max, size=numpy.shape(P))
random_proj = random_proj.astype(numpy.float32)
dnn_model.params[0].set_value(random_proj, borrow=True)
elif synth_mode == 'constant':
## use mean projection
P = dnn_model.get_projection_weights()
mean_row = P[:, :projection_end].mean(axis=0)
print('mean row used for projection:')
print(mean_row)
P = numpy.ones(
|
numpy.shape(P)
|
numpy.shape
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph algorithm generators.
Currently implements the following:
- Depth-first search (Moore, 1959)
- Breadth-first search (Moore, 1959)
- Topological sorting (Knuth, 1973)
- Articulation points
- Bridges
- Kosaraju's strongly-connected components (Aho et al., 1974)
- Kruskal's minimum spanning tree (Kruskal, 1956)
- Prim's minimum spanning tree (Prim, 1957)
- Bellman-Ford's single-source shortest path (Bellman, 1958)
- Dijkstra's single-source shortest path (Dijkstra, 1959)
- DAG shortest path
- Floyd-Warshall's all-pairs shortest paths (Floyd, 1962)
- Edmonds-Karp bipartite matching (Edmund & Karp, 1972)
See "Introduction to Algorithms" 3ed (CLRS3) for more information.
"""
# pylint: disable=invalid-name
from typing import Tuple
import chex
from clrs._src import probing
from clrs._src import specs
import numpy as np
_Array = np.ndarray
_Out = Tuple[_Array, probing.ProbesDict]
_OutputClass = specs.OutputClass
def dfs(A: _Array) -> _Out:
"""Depth-first search (Moore, 1959)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['dfs'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
color = np.zeros(A.shape[0], dtype=np.int32)
pi = np.arange(A.shape[0])
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
pi[v] = u
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
break
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)})
probing.finalize(probes)
return pi, probes
def bfs(A: _Array, s: int) -> _Out:
"""Breadth-first search (Moore, 1959)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['bfs'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
's': probing.mask_one(s, A.shape[0]),
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
reach = np.zeros(A.shape[0])
pi = np.arange(A.shape[0])
reach[s] = 1
while True:
prev_reach = np.copy(reach)
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'reach_h': np.copy(prev_reach),
'pi_h': np.copy(pi)
})
for i in range(A.shape[0]):
for j in range(A.shape[0]):
if A[i, j] > 0 and prev_reach[i] == 1:
if pi[j] == j and j != s:
pi[j] = i
reach[j] = 1
if np.all(reach == prev_reach):
break
probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)})
probing.finalize(probes)
return pi, probes
def topological_sort(A: _Array) -> _Out:
"""Topological sorting (Knuth, 1973)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['topological_sort'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
color = np.zeros(A.shape[0], dtype=np.int32)
topo = np.arange(A.shape[0])
s_prev = np.arange(A.shape[0])
topo_head = 0
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
while True:
if color[u] == 0:
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
break
if s_last == u:
color[u] = 2
if color[topo_head] == 2:
topo[u] = topo_head
topo_head = u
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={
'topo': np.copy(topo),
'topo_head': probing.mask_one(topo_head, A.shape[0])
})
probing.finalize(probes)
return topo, probes
def articulation_points(A: _Array) -> _Out:
"""Articulation points."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['articulation_points'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
color = np.zeros(A.shape[0], dtype=np.int32)
pi = np.arange(A.shape[0])
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
low = np.zeros(A.shape[0])
child_cnt = np.zeros(A.shape[0])
is_cut = np.zeros(A.shape[0])
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
low[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
pi[v] = u
color[v] = 1
s_prev[v] = s_last
s_last = v
child_cnt[u] += 0.01
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
break
elif v != pi[u]:
low[u] = min(low[u], d[v])
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
for v in range(A.shape[0]):
if pi[v] == u:
low[u] = min(low[u], low[v])
if pi[u] != u and low[v] >= d[u]:
is_cut[u] = 1
if pi[u] == u and child_cnt[u] > 0.01:
is_cut[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={'is_cut': np.copy(is_cut)},
)
probing.finalize(probes)
return is_cut, probes
def bridges(A: _Array) -> _Out:
"""Bridges."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['bridges'])
A_pos = np.arange(A.shape[0])
adj = probing.graph(np.copy(A))
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': adj
})
color = np.zeros(A.shape[0], dtype=np.int32)
pi = np.arange(A.shape[0])
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
low = np.zeros(A.shape[0])
is_bridge = (
np.zeros((A.shape[0], A.shape[0])) + _OutputClass.MASKED.value + adj)
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
low[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
pi[v] = u
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
break
elif v != pi[u]:
low[u] = min(low[u], d[v])
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
for v in range(A.shape[0]):
if pi[v] == u:
low[u] = min(low[u], low[v])
if low[v] > d[u]:
is_bridge[u, v] = 1
is_bridge[v, u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={'is_bridge': np.copy(is_bridge)},
)
probing.finalize(probes)
return is_bridge, probes
def strongly_connected_components(A: _Array) -> _Out:
"""Kosaraju's strongly-connected components (Aho et al., 1974)."""
chex.assert_rank(A, 2)
probes = probing.initialize(
specs.SPECS['strongly_connected_components'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
scc_id = np.arange(A.shape[0])
color = np.zeros(A.shape[0], dtype=np.int32)
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
A_t = np.transpose(A)
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 0
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 0
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 0
})
break
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 0
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
color = np.zeros(A.shape[0], dtype=np.int32)
s_prev = np.arange(A.shape[0])
for s in np.argsort(-f):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 1
})
while True:
scc_id[u] = s
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 1
})
for v in range(A.shape[0]):
if A_t[u, v] != 0:
if color[v] == 0:
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 1
})
break
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 1
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={'scc_id': np.copy(scc_id)},
)
probing.finalize(probes)
return scc_id, probes
def mst_kruskal(A: _Array) -> _Out:
"""Kruskal's minimum spanning tree (Kruskal, 1956)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['mst_kruskal'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
pi = np.arange(A.shape[0])
def mst_union(u, v, in_mst, probes):
root_u = u
root_v = v
mask_u = np.zeros(in_mst.shape[0])
mask_v = np.zeros(in_mst.shape[0])
mask_u[u] = 1
mask_v[v] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'in_mst_h': np.copy(in_mst),
'pi': np.copy(pi),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
'root_u': probing.mask_one(root_u, A.shape[0]),
'root_v': probing.mask_one(root_v, A.shape[0]),
'mask_u': np.copy(mask_u),
'mask_v': np.copy(mask_v),
'phase': probing.mask_one(1, 3)
})
while pi[root_u] != root_u:
root_u = pi[root_u]
for i in range(mask_u.shape[0]):
if mask_u[i] == 1:
pi[i] = root_u
mask_u[root_u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'in_mst_h': np.copy(in_mst),
'pi': np.copy(pi),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
'root_u': probing.mask_one(root_u, A.shape[0]),
'root_v': probing.mask_one(root_v, A.shape[0]),
'mask_u':
|
np.copy(mask_u)
|
numpy.copy
|
# Advent of Code 2017, Day 14
# (c) blu3r4y
import numpy as np
X, Y = 128, 128
def knot_hash(text):
"""
Advent of Code 2017, Day 10
"""
def knot_round(nums, lens, pos=0, skip=0):
for le in lens:
sel = np.take(nums, np.arange(pos, pos + le), mode='wrap')
np.put(nums, np.arange(pos + le - 1, pos - 1, step=-1), sel, mode='wrap')
pos = (pos + le + skip) % len(nums)
skip += 1
return nums, pos, skip
lens = np.array([ord(ch) for ch in text] + [17, 31, 73, 47, 23])
sparse, pos, skip = np.arange(256), 0, 0
for _ in range(64):
sparse, pos, skip = knot_round(sparse, lens, pos, skip)
return np.array([np.bitwise_xor.reduce(sparse[i:i + 16]) for i in
|
np.arange(0, 256, 16)
|
numpy.arange
|
import pytest
from SciDataTool import DataLinspace, DataTime, DataPattern
from numpy import meshgrid, linspace, array
from numpy.testing import assert_array_almost_equal
@pytest.mark.validation
# @pytest.mark.DEV
def test_slice():
"""Test slicing"""
X = DataLinspace(name="X", unit="m", initial=0, final=10, number=11)
Y = DataLinspace(name="Y", unit="m", initial=0, final=100, number=11)
y, x = meshgrid(Y.get_values(), X.get_values())
field = x + y
Field = DataTime(name="Example field", symbol="Z", axes=[X, Y], values=field)
# Extract data by axis value
# 'X=1'
result = Field.get_along("X=1", "Y")
assert_array_almost_equal(field[1, :], result["Z"])
# 'X=[0, 1]'
result = Field.get_along("X=[0, 1]", "Y")
expected = field[0:2, :]
assert_array_almost_equal(expected, result["Z"])
# 'X<2' #TODO result in an error
result = Field.get_along("X<2", "Y")
expected = field[0:2, :]
# assert_array_almost_equal(expected, result["Z"])
# Extract data by operator
# mean value 'X=mean'
result = Field.get_along("X=mean", "Y")
expected = field.mean(axis=0)
assert_array_almost_equal(expected, result["Z"])
# sum 'X=sum'
result = Field.get_along("X=sum", "Y")
expected = field.sum(axis=0)
assert_array_almost_equal(expected, result["Z"])
# rms value 'X=rms'
result = Field.get_along("X=rms", "Y")
expected = (field ** 2).mean(axis=0) ** (1 / 2)
|
assert_array_almost_equal(expected, result["Z"])
|
numpy.testing.assert_array_almost_equal
|
import os
import numpy as np
import scipy.stats as st
import seaborn as sns
import matplotlib.pyplot as plt
agg_icc=False # whether aggregate ICC output or not, set it to 'True' for the first time and 'False' otherwise
datain=os.environ.get('RUN_ICC_OUTPUT') # path to ICC output from run_ICC.py
scan_type='diff' # 'same' for same data; 'diff' for test-retest data
for corr_type in ['pearson']:
def plot_mean_and_CI(mean, lb, ub, color_mean=None, color_shading=None):
# plot the shaded range of the confidence intervals
plt.fill_between(range(len(mean)), ub, lb,
color=color_shading, alpha=.5)
# plot the mean on top
plt.plot(mean, color_mean)
plt.show()
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):
#x_grid = np.linspace(-4.5, 3.5, 1000)
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return kde.evaluate(x_grid)
def re_sample(x,y,x_grid):
y_out=np.linspace(0, 0, len(x_grid))
stop=0
for jj in range((len(x)-1)):
for ii in range(len(x_grid)):
grid=x_grid[ii]
if grid >= x[jj] and grid <= x[jj+1]:
y_out[ii]=y[jj]
if x[jj]== x[jj+1] and x[jj]==1 and stop == 0:
if grid >= x[jj]:
y_out[ii]=y[jj]
stop=1
return y_out
num_pair=72
num_rand_times=10
binnum=2000
x_grid=np.linspace(0, 1, binnum)
# combine different random runs together
if agg_icc:
for pl in range(0,num_pair):
for ses in range(1,4):
if os.path.isfile(datain+'/Random_All_Pipeline-'+str(pl)+'_Ses_'+str(ses)+'.txt'):
continue
# each is a single plot
dataall = np.empty((num_rand_times,binnum))
for random_num in range(0,num_rand_times):
file=datain+'/Random-'+str(random_num)+'_Pipeline-'+str(pl)+'_Ses_'+str(ses)+'.txt'
if os.path.isfile(file):
data=np.loadtxt(file)
data=data[data>
|
np.finfo(np.float32)
|
numpy.finfo
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Code to extract images patches that maximally activate particular neurons
# in an Atari convnet. Note that right now this code is specifically fit
# to the Atari convnet structure, and would need adaptation and generalization
# to fit to arbitrary structures. This would likely be non-trivial because it
# requires some reflection on the structure of the network (reasoning about
# pooling / convs) to calculate receptive fields at particular layers, etc.
import sys
import tensorflow as tf
import lucid
import numpy as np
import atari_zoo
from atari_zoo import MakeAtariModel
from atari_zoo.rollout import generate_rollout
from lucid.misc.io import show
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.transform as transform
import lucid.optvis.render as render
import atari_zoo.utils
from atari_zoo.utils import conv_activations_to_canvas
from atari_zoo.utils import fc_activations_to_canvas
from lucid.optvis.render import import_model
from matplotlib.pyplot import *
#from IPython import embed
# receptive field at conv3 is 36
# receptive field at conv2 is 20
# receptive field at conv1 is 8 (8x8 conv...)
def pad_image(image, padSize, pad_values=0.):
"""
Function that pads an image on all 4 sides, each side having the same padding.
simulating the receptive field that can be larger than original image
image: shape (batch, h, w, c) or (h, w, c)
padSize: integer. Number of pixels to pad each side
pad_values: what value to pad it with
"""
if len(image.shape) == 4: # (batch, h, w, c)
pads = ((0,0), (padSize,padSize),(padSize,padSize), (0,0))
elif len(image.shape) == 3: # (h, w, c)
pads = ((padSize,padSize),(padSize,padSize), (0,0))
else:
raise ValueError('Unsupported representation shape {}'.format(image.shape))
ret = np.pad(image, pads, 'constant', constant_values=pad_values)
return ret
def get_obs_patch(observation, ii, jj, receptive_stride=(36,8), pad_each_side=4+2*4+1*8,plot=False):
""" Function that get a patch from an observation matrix, according to
a (ii, jj) location at a layer higher up
observation: (batch, h, w, c), normally (batch, 84, 84, 4)
ii: integer index in the h dimension
jj: integer index in the w dimension
receptive_stride: a tuple of (receptive field size, stride size) indicating from this higher-up
layer where (ii, jj) is located, the size of receptive field and stride into the observation.
For networks used in this application, the three conv layers have, respectively,
(8,4), (20,8), (36,8)
onto the original observation.
pad_each_side: how much the observation should be padded, due to the fact that receptive field at
some point expand outside of the original image. Because there have been 3 layers of conv, having
filter sizes of 8, 4, and 3, strides of 2, 2, and 1. Under "same" padding as they do, the eventual
padding is 4 + 4*2 + 1*2*4 = 20
"""
repp = pad_image(observation, pad_each_side) # pad to (112,112,4)
(rec_size, stride) = receptive_stride
# the field to look at in observation
top = int(ii*stride-rec_size/2)
bot = int(ii*stride+rec_size/2)
left = int(jj*stride-rec_size/2)
right = int(jj*stride+rec_size/2)
#print('Before pad: ', top, bot, left, right)
print('bottom left location in original obs: ({},{})'.format(bot, left))
[new_top, new_bot, new_left, new_right] = [k+pad_each_side for k in [top,bot,left,right]]
#print('After pad: ', new_top, new_bot, new_left, new_right)
#figure(figsize=(10,4))
if plot:
for cc in range(observation.shape[-1]):
subplot(101+observation.shape[-1]*10+cc)
#print('bottom left location in padded obs: ({},{})'.format(bot+pad_each_side, left+pad_each_side))
matshow(repp[new_top:new_bot,new_left:new_right,cc], fignum=0)
#print(repp[new_top:new_bot,new_left:new_right,cc].shape)
return repp[new_top:new_bot,new_left:new_right,observation.shape[-1]-1], (top, left)
def build_model_get_act(algo, env, run_id=1, tag='final', local=True, which_layer=2):
""" Function that builds/loads a model given algorithm algo and environment env, etc.,
and obtain activations at a specific layer.
which_layer: the index into layers. 0->Conv1, 1->Conv2, 2->Conv3, 3->FC
"""
# Activation map shapes:
# 0 Online/Conv/Relu (21, 21, 32)
# 1 Online/Conv_1/Relu (11, 11, 64)
# 2 Online/Conv_2/Relu (11, 11, 64)
# 3 Online/fully_connected/Relu (512)
#
#TODO
# load model
m = MakeAtariModel(algo, env, run_id, tag=tag)()
nA = atari_zoo.game_action_counts[env]
acts_shapes = [(0,21,21,32), (0,11,11,64), (0,11,11,64), (0,512),(0,nA)]
# getting frames, observations
obs = m.get_observations()
frames = m.get_frames()
# get the flow ready from observation the the layer activation you want
m.load_graphdef()
#get a tf session
session = atari_zoo.utils.get_session()
#create a placeholder input to the network
X_t = tf.placeholder(tf.float32, [None] + m.image_shape)
#now get access to a dictionary that grabs output layers from the model
T = import_model(m,X_t,X_t)
# the activation tensor we want
acts_T = T(m.layers[which_layer]['name'])
try:
acts = session.run(acts_T, {X_t: obs})
except:
# some models does not allow batch size > 1 so do it one at a time
acts = np.empty(acts_shapes[which_layer])
for obs_1 in obs:
obs_1 =
|
np.expand_dims(obs_1, axis=0)
|
numpy.expand_dims
|
"""classic Acrobot task"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from rlpy.Tools import wrap, bound, lines, fromAtoB, rk4
from .Domain import Domain
import numpy as np
import matplotlib.pyplot as plt
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "BSD 3-Clause"
__author__ = "<NAME> <<EMAIL>>"
class Acrobot(Domain):
"""
Acrobot is a 2-link pendulum with only the second joint actuated
Intitially, both links point downwards. The goal is to swing the
end-effector at a height at least the length of one link above the base.
Both links can swing freely and can pass by each other, i.e., they don't
collide when they have the same angle.
**STATE:**
The state consists of the two rotational joint angles and their velocities
[theta1 theta2 thetaDot1 thetaDot2]. An angle of 0 corresponds to corresponds
to the respective link pointing downwards (angles are in world coordinates).
**ACTIONS:**
The action is either applying +1, 0 or -1 torque on the joint between
the two pendulum links.
.. note::
The dynamics equations were missing some terms in the NIPS paper which
are present in the book. R. Sutton confirmed in personal correspondance
that the experimental results shown in the paper and the book were
generated with the equations shown in the book.
However, there is the option to run the domain with the paper equations
by setting book_or_nips = 'nips'
**REFERENCE:**
.. seealso::
R. Sutton: Generalization in Reinforcement Learning:
Successful Examples Using Sparse Coarse Coding (NIPS 1996)
.. seealso::
<NAME> and <NAME>:
Reinforcement learning: An introduction.
Cambridge: MIT press, 1998.
.. warning::
This version of the domain uses the Runge-Kutta method for integrating
the system dynamics and is more realistic, but also considerably harder
than the original version which employs Euler integration,
see the AcrobotLegacy class.
"""
episodeCap = 1000
dt = .2
continuous_dims = np.arange(4)
discount_factor = 1.
LINK_LENGTH_1 = 1. # [m]
LINK_LENGTH_2 = 1. # [m]
LINK_MASS_1 = 1. #: [kg] mass of link 1
LINK_MASS_2 = 1. #: [kg] mass of link 2
LINK_COM_POS_1 = 0.5 #: [m] position of the center of mass of link 1
LINK_COM_POS_2 = 0.5 #: [m] position of the center of mass of link 2
LINK_MOI = 1. #: moments of inertia for both links
MAX_VEL_1 = 4 * np.pi
MAX_VEL_2 = 9 * np.pi
AVAIL_TORQUE = [-1., 0., +1]
torque_noise_max = 0.
statespace_limits = np.array([[-np.pi, np.pi]] * 2
+ [[-MAX_VEL_1, MAX_VEL_1]]
+ [[-MAX_VEL_2, MAX_VEL_2]])
#: use dynamics equations from the nips paper or the book
book_or_nips = "book"
action_arrow = None
domain_fig = None
actions_num = 3
def s0(self):
self.state = np.zeros((4))
return np.zeros((4)), self.isTerminal(), self.possibleActions()
def isTerminal(self):
s = self.state
return -np.cos(s[0]) - np.cos(s[1] + s[0]) > 1.
def step(self, a):
s = self.state
torque = self.AVAIL_TORQUE[a]
# Add noise to the force action
if self.torque_noise_max > 0:
torque += self.random_state.uniform(-
self.torque_noise_max, self.torque_noise_max)
# Now, augment the state with our force action so it can be passed to
# _dsdt
s_augmented = np.append(s, torque)
ns = rk4(self._dsdt, s_augmented, [0, self.dt])
# only care about final timestep of integration returned by integrator
ns = ns[-1]
ns = ns[:4] # omit action
# ODEINT IS TOO SLOW!
# ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])
# self.s_continuous = ns_continuous[-1] # We only care about the state
# at the ''final timestep'', self.dt
ns[0] = wrap(ns[0], -np.pi, np.pi)
ns[1] = wrap(ns[1], -np.pi, np.pi)
ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)
ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)
self.state = ns.copy()
terminal = self.isTerminal()
reward = -1. if not terminal else 0.
return reward, ns, terminal, self.possibleActions()
def _dsdt(self, s_augmented, t):
m1 = self.LINK_MASS_1
m2 = self.LINK_MASS_2
l1 = self.LINK_LENGTH_1
lc1 = self.LINK_COM_POS_1
lc2 = self.LINK_COM_POS_2
I1 = self.LINK_MOI
I2 = self.LINK_MOI
g = 9.8
a = s_augmented[-1]
s = s_augmented[:-1]
theta1 = s[0]
theta2 = s[1]
dtheta1 = s[2]
dtheta2 = s[3]
d1 = m1 * lc1 ** 2 + m2 * \
(l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * np.cos(theta2)) + I1 + I2
d2 = m2 * (lc2 ** 2 + l1 * lc2 * np.cos(theta2)) + I2
phi2 = m2 * lc2 * g * np.cos(theta1 + theta2 - old_div(np.pi, 2.))
phi1 = - m2 * l1 * lc2 * dtheta2 ** 2 * np.sin(theta2) \
- 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 *
|
np.sin(theta2)
|
numpy.sin
|
# Copyright 2021 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module on physical information of the system.
"""
from typing import Callable, Dict, List, Optional, Text
from copy import copy
from functools import lru_cache
import numpy as np
from numpy import ndarray
from time_evolving_mpo.base_api import BaseAPIClass
from time_evolving_mpo.config import NpDtype
from time_evolving_mpo.util import acommutator, commutator
from time_evolving_mpo.util import left_right_super
def _check_hamiltonian(hamiltonian):
"""Input checking for a single Hamiltonian. """
try:
__hamiltonian = np.array(hamiltonian, dtype=NpDtype)
__hamiltonian.setflags(write=False)
except Exception as e:
raise AssertionError("Coupling operator must be numpy array") from e
assert len(__hamiltonian.shape) == 2, \
"Coupling operator is not a matrix."
assert __hamiltonian.shape[0] == \
__hamiltonian.shape[1], \
"Coupling operator is not a square matrix."
return __hamiltonian
def _liouvillian(hamiltonian, gammas, lindblad_operators):
"""Lindbladian for a specific Hamiltonian, gammas and lindblad_operators.
"""
liouvillian = -1j * commutator(hamiltonian)
for gamma, op in zip(gammas, lindblad_operators):
op_dagger = op.conjugate().T
liouvillian += gamma * (left_right_super(op, op_dagger) \
- 0.5 * acommutator(np.dot(op_dagger, op)))
return liouvillian
class BaseSystem(BaseAPIClass):
"""Base class for systems. """
def __init__(
self,
dimension: int,
name: Optional[Text] = None,
description: Optional[Text] = None,
description_dict: Optional[Dict] = None) -> None:
"""Create a BaseSystem object."""
self._dimension = dimension
super().__init__(name, description, description_dict)
@property
def dimension(self) -> ndarray:
"""Hilbert space dimension of the system. """
return self._dimension
def liouvillian(self, t: Optional[float] = None) -> ndarray:
r"""
Returns the Liouvillian super-operator :math:`\mathcal{L}(t)` with
.. math::
\mathcal{L}(t)\rho = -i [\hat{H}(t), \rho]
+ \sum_n^N \gamma_n \left(
\hat{A}_n(t) \rho \hat{A}_n^\dagger(t)
- \frac{1}{2} \hat{A}_n^\dagger(t) \hat{A}_n(t) \rho
- \frac{1}{2} \rho \hat{A}_n^\dagger(t) \hat{A}_n(t)
\right),
with time :math:`t`.
Parameters
----------
t: float (default = None)
time :math:`t`.
Returns
-------
liouvillian : ndarray
Liouvillian :math:`\mathcal{L}(t)` at time :math:`t`.
"""
raise NotImplementedError(
"Class {} has no liouvillian implementation.".format(
type(self).__name__))
class System(BaseSystem):
r"""
Represents a system (without any coupling to a non-Markovian bath).
It is possible to include Lindblad terms in the master equation.
The equations of motion for a system density matrix (without any coupling
to a non-Markovian bath) is then:
.. math::
\frac{d}{dt}\rho(t) = &-i [\hat{H}, \rho(t)] \\
&+ \sum_n^N \gamma_n \left(
\hat{A}_n \rho(t) \hat{A}_n^\dagger
- \frac{1}{2} \hat{A}_n^\dagger \hat{A}_n \rho(t)
- \frac{1}{2} \rho(t) \hat{A}_n^\dagger \hat{A}_n \right)
with `hamiltionian` :math:`\hat{H}`, the rates `gammas` :math:`\gamma_n` and
`linblad_operators` :math:`\hat{A}_n`.
Parameters
----------
hamiltonian: ndarray
System-only Hamiltonian :math:`\hat{H}`.
gammas: List(float)
The rates :math:`\gamma_n`.
lindblad_operators: list(ndarray)
The Lindblad operators :math:`\hat{A}_n`.
name: str
An optional name for the system.
description: str
An optional description of the system.
description_dict: dict
An optional dictionary with descriptive data.
"""
def __init__(
self,
hamiltonian: ndarray,
gammas: Optional[List[float]] = None,
lindblad_operators: Optional[List[ndarray]] = None,
name: Optional[Text] = None,
description: Optional[Text] = None,
description_dict: Optional[Dict] = None) -> None:
"""Create a System object. """
# input check for Hamiltonian.
self._hamiltonian = _check_hamiltonian(hamiltonian)
__dimension = self._hamiltonian.shape[0]
# input check gammas and lindblad_operators
if gammas is None:
gammas = []
if lindblad_operators is None:
lindblad_operators = []
assert isinstance(gammas, list), \
"Argument `gammas` must be a list)]."
assert isinstance(lindblad_operators, list), \
"Argument `lindblad_operators` must be a list."
assert len(gammas) == len(lindblad_operators), \
"Lists `gammas` and `lindblad_operators` must have the same length."
try:
__gammas = []
for gamma in gammas:
__gammas.append(float(gamma))
except Exception as e:
raise AssertionError("All elements of `gammas` must be floats.") \
from e
try:
__lindblad_operators = []
for lindblad_operator in lindblad_operators:
__lindblad_operators.append(
|
np.array(lindblad_operator, dtype=NpDtype)
|
numpy.array
|
"""Code for simulations in [1].
[1] <NAME> and <NAME>. Continuous and Discrete-Time Survival Prediction
with Neural Networks. arXiv preprint arXiv:1910.06724, 2019.
https://arxiv.org/pdf/1910.06724.pdf
"""
import numpy as np
import pandas as pd
import torchtuples as tt
from pycox.simulations import base
_TIMES = np.linspace(0, 100, 1001)
class SimBase(base._SimBase):
times = _TIMES
num_weights = NotImplemented
def __init__(self, covs_per_weight=5, betas=None):
self.covs_per_weight = covs_per_weight
self.betas = betas if betas else self.make_betas()
def make_betas(self, func=lambda m: np.random.normal(0, 1, m)):
return tuple(func(self.covs_per_weight) for _ in range(self.num_weights))
@staticmethod
def _sample_uniform(n):
return np.random.uniform(-1, 1, (n, 1))
def sample_weights(self, n):
return [self._sample_uniform(n) for _ in range(self.num_weights)]
def sample_covs(self, weights):
return [self._conditional_covariate_sampling(beta, weight)
for beta, weight in zip(self.betas, weights)]
def surv_df(self, logit_haz):
assert len(self.times) == (logit_haz.shape[1] + 1), 'Need dims to be correct'
haz = sigmoid(logit_haz)
surv = np.ones((len(self.times), len(haz)))
surv[1:, :] = haz2surv(haz).transpose()
return pd.DataFrame(surv, index=self.times)
@staticmethod
def _conditional_covariate_sampling(beta, weight):
beta, weight = beta.reshape(-1), weight.reshape(-1)
size = len(weight), len(beta)
u = np.random.uniform(-1, 1, size=size)
u[:, 0] = weight
x = np.empty_like(u)
x[:, :-1] = -
|
np.diff(u)
|
numpy.diff
|
import os
import torch
import cv2
import numpy as np
from easydict import EasyDict
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
import time
from core.policy import LBCBirdviewPolicy, LBCImagePolicy
from core.data import LBCImageDataset
from core.utils.simulator_utils.carla_utils import visualize_birdview
from core.utils.learner_utils.log_saver_utils import Experiment
lbc_config = dict(
exp_name='lbc_img_train_p0',
data=dict(
train=dict(
root_dir='lbc_datasets_train',
gap=5,
n_step=5,
),
val=dict(
root_dir='lbc_datasets_val',
gap=5,
n_step=5,
),
),
policy=dict(
cudnn=True,
cuda=True,
model=dict(
backbone='resnet34',
pretrained=True,
all_branch=False,
),
learn=dict(
epoches=2,
log_freq=1000,
batch_size=128,
loss='l1',
lr=1e-4,
),
gap=5,
dt=0.1,
camera_args=dict(
w=384,
h=160,
fov=90,
world_y=1.4,
fixed_offset=4.0,
n_step=5,
crop_size=192,
pixels_per_meter=5,
)
),
teacher_policy=dict(ckpt_path='model-256.th', ),
)
main_config = EasyDict(lbc_config)
CROP_SIZE = main_config.policy.camera_args.crop_size
class CoordConverter():
def __init__(
self,
w=384,
h=160,
fov=90,
world_y=1.4,
fixed_offset=4.0,
n_step=5,
crop_size=192,
pixels_per_meter=5,
device='cuda'
):
self._w = w
self._h = h
self._img_size = torch.FloatTensor([w, h]).to(device)
self._fov = fov
self._world_y = world_y
self._fixed_offset = fixed_offset
self._n_step = n_step
self._crop_size = crop_size
self._pixels_per_meter = pixels_per_meter
self._tran = np.array([0., 0., 0.])
self._rot = np.array([0., 0., 0.])
f = self._w / (2 * np.tan(self._fov * np.pi / 360))
self._A =
|
np.array([[f, 0., self._w / 2], [0, f, self._h / 2], [0., 0., 1.]])
|
numpy.array
|
import PhishingDetector as PD
import SpamDetector as SD
import numpy as np
import random
import datetime
feature_size_phishing = 30
feature_size_spam = 141
model_phishing = 1
model_spam = 0
classifier_neural_network = 1
classifier_svm = 0
class Agent():
# Init an Agent
def __init__(self, features_size):
"""
Start a random list of 0 and 1 of len = features_size
e.g. if features_size = 5 the chromosome = [1, 1, 1, 0, 1]
features_size represent the amount of features
"""
self.chromosome = []
for x in range(features_size):
self.chromosome.append(random.randint(0,1))
self.chromosome = np.array(self.chromosome)
self.fitness = -1
def __str__(self):
return "Chromosome: " + str(self.chromosome) + ", with fitness " + str(self.fitness)
population = 20
generations = 100
selection_size = int(0.3 * population)
def ga(model = model_phishing, classifier = classifier_svm, features_size = feature_size_phishing):
agents = Agent.init_agents(Agent.population, features_size)
# the agent with the best fitness
best_agent = Agent(features_size)
# The generation the best agent was created
generation_best_agent = -1
for generation in range(Agent.generations):
print("Generation: "+str(generation))
agents = Agent.fitness(agents, model, classifier)
agents = Agent.selection(agents, features_size)
# check if the best new agent is better than the best_agent
if agents[0].fitness > best_agent.fitness:
# a new agent created have better fitness
best_agent.chromosome = agents[0].chromosome
best_agent.fitness = agents[0].fitness
generation_best_agent = generation
agents = Agent.crossover(agents, features_size)
agents = Agent.mutation(agents, features_size)
print('----------------------------------------Best Agent So Far in '+ str(generation_best_agent)+'----------------------------------')
print(best_agent)
print('----------------------------------------Best Agent So Far in '+ str(generation_best_agent)+'----------------------------------')
if any(agent.fitness >= 0.9 for agent in agents):
print("Found an agent")
print('\n'.join(map(str, agents)))
#get the best agent with minimum value of 0.9
best_agent = max(agents, key = lambda agent: agent.fitness)
Agent.print_best_agent(best_agent, generation_best_agent, model, classifier)
#break
exit(0)
# get the best agent at the end of the generation
Agent.print_best_agent(best_agent, generation_best_agent, model, classifier)
# This function creates initial population using the Agent class, the return is a list
# size population and each agent in the population must be size features_size
def init_agents(population, features_size):
return [Agent(features_size) for _ in range(population)]
# This function will calculate the fitness in each memeber of the population
def fitness(agents, model, classifier):
print("---------------------------------fitness-------------------------------")
if model is model_phishing and classifier is classifier_svm:
# Generate a phishing_detector for each agent with SVM
for agent in agents:
if agent.fitness is -1:
pd = PD.phishing_detector(agent.chromosome)
agent.fitness = float(pd.test_features_svm())
#agent.fitness = random.random()
print(agent)
elif model is model_phishing and classifier is classifier_neural_network:
# Generate a phishing_detector for each agent with ANN
for agent in agents:
pd = PD.phishing_detector(agent.chromosome)
agent.fitness = float(pd.test_features_neural_network())
print(agent)
elif model is model_spam and classifier is classifier_svm:
# Generate a spam detector for each agent with SVM
for agent in agents:
if agent.fitness is -1:
sd = SD.spam_detector(agent.chromosome)
agent.fitness = float(sd.test_features_svm())
print(agent)
elif model is model_spam and classifier is classifier_neural_network:
# Generate a spam detector for each agent with ANN
for agent in agents:
sd = SD.spam_detector(agent.chromosome)
agent.fitness = float(sd.test_features_neural_network())
print(agent)
return agents
# The selection will select the population to be go for the next generation,
# the population will be decide by the highest fitness function higher the
# probability to be selected
def selection(agents, features_size):
print("---------------------------------selection-------------------------------")
agents = sorted(agents, key = lambda agent: agent.fitness, reverse = True)
agents = agents[:Agent.selection_size]
print('\n'.join(map(str, agents)))
return agents
# The crossover will combine the agents that were selected in the selection function
def crossover(agents, features_size):
print("---------------------------------crossover-------------------------------")
# Method 1: Add new population and keep part of the old population
new_blood = []
for _ in range(int((Agent.population - len(agents))/ 2)):
parent1 = random.choice(agents)
parent2 = random.choice(agents)
child1 = Agent(features_size)
child2 = Agent(features_size)
split_point = random.randint(0, features_size)
child1.chromosome = np.concatenate((parent1.chromosome[0:split_point], parent2.chromosome[split_point:features_size]))
child2.chromosome =
|
np.concatenate((parent2.chromosome[0:split_point], parent1.chromosome[split_point:features_size]))
|
numpy.concatenate
|
import logging
import subprocess
import sys
import numpy as np
import os
import scipy.integrate
from scipy.special import erf
from scipy.interpolate import UnivariateSpline
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import maximum_bipartite_matching
from copy import deepcopy
import matplotlib as mpl
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import NullFormatter
import matplotlib.pyplot as plt
from plotbin import sauron_colormap as pb_sauron_colormap
from plotbin import display_pixels
# from loess.loess_2d import loess_2d
from dynamite import kinematics
from dynamite import weight_solvers
from dynamite import physical_system as physys
class ReorderLOSVDError(Exception):
pass
class Plotter():
"""Class to hold plotting routines
Class containing methods for plotting results. Each plotting method saves a
plot in the `outplot/plots` directory, and returns a `matplotlib` `figure`
object.
Parameters
----------
config : a ``dyn.config_reader.Configuration`` object
"""
def __init__(self, config=None):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
if config is None:
text = f'{__class__.__name__} needs configuration object, ' \
'None provided.'
self.logger.error(text)
raise ValueError(text)
self.config = config
self.system = config.system
self.settings = config.settings
self.all_models = config.all_models
self.input_directory = config.settings.io_settings['input_directory']
self.plotdir = config.settings.io_settings['plot_directory']
self.modeldir = config.settings.io_settings['model_directory']
pb_sauron_colormap.register_sauron_colormap()
def make_chi2_vs_model_id_plot(self, which_chi2=None, figtype=None):
"""
Generates a (kin)chi2 vs. model id plot
Parameters
----------
which_chi2 : STR, optional
Determines whether chi2 or kinchi2 is used. If None, the setting
in the configuration file's parameter settings is used.
Must be None, 'chi2', or 'kinchi2'. The default is None.
figtype : STR, optional
Determines the file extension to use when saving the figure.
If None, the default setting is used ('.png').
Raises
------
ValueError
If which_chi2 is not one of None, 'chi2', or 'kinchi2'.
Returns
-------
fig : matplotlib.pyplot.figure
Figure instance.
"""
if figtype is None:
figtype = '.png'
if which_chi2 is None:
which_chi2 = self.settings.parameter_space_settings['which_chi2']
if which_chi2 not in ('chi2', 'kinchi2'):
text = 'which_chi2 needs to be chi2 or kinchi2, ' \
f'but it is {which_chi2}'
self.logger.error(text)
raise ValueError(text)
n_models = len(self.all_models.table)
fig = plt.figure()
plt.plot([i for i in range(n_models)],
self.all_models.table[which_chi2],
'rx')
plt.gca().set_title(f'{which_chi2} vs. model id')
plt.xlabel('model id')
plt.ylabel(which_chi2)
fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
self.logger.info(f'{which_chi2} vs. model id plot created '
f'({n_models} models).')
figname = self.plotdir + which_chi2 + '_progress_plot' + figtype
fig.savefig(figname)
self.logger.info(f'Plot {figname} saved in {self.plotdir}')
return fig
def make_chi2_plot(self, which_chi2=None, n_excl=0, figtype=None):
"""
Generates a chisquare plot
The models generated are shown on a grid of parameter space.
The best-fit model is marked with a black cross.
The coloured circles represent models within 3 sigma
confidence level (light colours and larger circles
indicate smaller values of the chisquare). The small
black dots indicate the models outside this confidence region.
Parameters
----------
which_chi2 : STR, optional
Determines whether chi2 or kinchi2 is used. If None, the setting
in the configuration file's parameter settings is used.
Must be None, 'chi2', or 'kinchi2'. The default is None.
nexcl : integer, optional
Determines how many models (in the initial burn-in phase of
the fit) to exclude from the plot. Must be an integer number.
Default is 0 (all models are shown). Use this with caution!
figtype : STR, optional
Determines the file extension to use when saving the figure.
If None, the default setting is used ('.png').
Raises
------
ValueError
If which_chi2 is not one of None, 'chi2', or 'kinchi2'.
Returns
-------
fig : matplotlib.pyplot.figure
Figure instance.
"""
if figtype is None:
figtype = '.png'
if which_chi2 is None:
which_chi2 = self.settings.parameter_space_settings['which_chi2']
if which_chi2 not in ('chi2', 'kinchi2'):
text = 'which_chi2 needs to be chi2 or kinchi2, ' \
f'but it is {which_chi2}'
self.logger.error(text)
raise ValueError(text)
self.logger.info(f'Making chi2 plot scaled according to {which_chi2}')
pars = self.config.parspace
val = deepcopy(self.all_models.table)
# exclude the first 50, 100 (specified by the user)
# models in case the values were really off there
# (or alternatively based on too big Delta chi2)
val = val[n_excl:]
#only use models that are finished
val=val[val['all_done']==True]
# add black hole scaling
scale_factor = np.zeros(len(val))
for i in range(len(val)):
chi2val = val[which_chi2][i]
model_id=np.where(self.all_models.table[which_chi2]==chi2val)[0][0]
scale_factor[i] = \
self.all_models.get_model_velocity_scaling_factor( \
model_id=model_id)
dh = self.system.get_all_dark_non_plummer_components()
dh = dh[0] # take the first as there should only be one of these
if type(dh) is physys.NFW:
val[f'c-{dh.name}'] = val[f'c-{dh.name}']
val[f'f-{dh.name}'] = val[f'f-{dh.name}']
elif type(dh) is physys.NFW_m200_c:
pass
elif type(dh) is physys.Hernquist:
val[f'rhoc-{dh.name}']= val[f'rhoc-{dh.name}']*scale_factor**2
elif type(dh) is physys.TriaxialCoredLogPotential:
val[f'Vc-{dh.name}'] = val[f'Vc-{dh.name}']*scale_factor
elif type(dh) is physys.GeneralisedNFW:
val[f'Mvir-{dh.name}'] = val[f'Mvir-{dh.name}']*scale_factor**2
else:
text = f'unknown dark halo type component'
self.logger.error(text)
raise ValueError(text)
# get the plummer component i.e. black hole
bh = self.system.get_component_from_class(physys.Plummer)
val[f'm-{bh.name}'] = np.log10(val[f'm-{bh.name}']*scale_factor**2)
#get number and names of parameters that are not fixed
nofix_sel=[]
nofix_name=[]
nofix_latex=[]
nofix_islog=[]
for i in np.arange(len(pars)):
if pars[i].fixed==False:
pars[i].name
nofix_sel.append(i)
if pars[i].name == 'ml':
nofix_name.insert(0, 'ml')
nofix_latex.insert(0, pars[i].LaTeX)
nofix_islog.insert(0, pars[i].logarithmic)
else:
nofix_name.append(pars[i].name)
nofix_latex.append(pars[i].LaTeX)
nofix_islog.append(pars[i].logarithmic)
nnofix=len(nofix_sel)
nf=len(val)
## 1 sigma confidence level
chlim = np.sqrt(self.config.get_2n_obs())
chi2pmin=np.min(val[which_chi2])
chi2t = val[which_chi2] - chi2pmin
val.add_column(chi2t, name='chi2t')
val.sort(['chi2t'])
#start of the plotting
figname = self.plotdir + which_chi2 + '_plot' + figtype
colormap_orig = mpl.cm.viridis
colormap = mpl.cm.get_cmap('viridis_r')
fig = plt.figure(figsize=(10, 10))
for i in range(0, nnofix - 1):
for j in range(nnofix-1, i, -1):
xtit = ''
ytit = ''
if i==0 : ytit = nofix_latex[j]
xtit = nofix_latex[i]
pltnum = (nnofix-1-j) * (nnofix-1) + i+1
ax = plt.subplot(nnofix-1, nnofix-1, pltnum)
plt.plot(val[nofix_name[i]],val[nofix_name[j]], 'D',
color='black', markersize=2)
for k in range(nf - 1, -1, -1):
if val['chi2t'][k]/chlim<=3: #only significant chi2 values
color = colormap(val['chi2t'][k]/chlim)
# * 240) #colours the significant chi2
markersize = 10-3*(val['chi2t'][k]/(chlim))
#smaller chi2 become bigger :)
plt.plot((val[nofix_name[i]])[k],
(val[nofix_name[j]])[k], 'o',
markersize=markersize, color=color)
if val['chi2t'][k]==0:
plt.plot((val[nofix_name[i]])[k],
(val[nofix_name[j]])[k], 'x',
markersize=10, color='k')
if nofix_islog[i]:
ax.set_xscale('log')
if nofix_islog[j]:
ax.set_yscale('log')
if j==i+1:
ax.set_xlabel(xtit, fontsize=12)
ax.set_xmargin(0.5)
nbins = len(ax.get_xticklabels())
ax.xaxis.set_major_locator(MaxNLocator(nbins=nbins, prune='lower'))
else:
ax.set_xticks([])
if i==0:
ax.set_ylabel(ytit, fontsize=12)
else:
ax.yaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_minor_formatter(NullFormatter())
plt.subplots_adjust(hspace=0)
plt.subplots_adjust(wspace=0)
axcb = fig.add_axes([0.75, 0.07, 0.2, 0.02])
cb = mpl.colorbar.ColorbarBase(axcb,
cmap=plt.get_cmap('viridis_r'),
norm=mpl.colors.Normalize(vmin=0., vmax=3),
orientation='horizontal')
plt.subplots_adjust(top=0.99, right=0.99, bottom=0.07, left=0.1)
fig.savefig(figname)
self.logger.info(f'Plot {figname} saved in {self.plotdir}')
return fig
def make_contour_plot(self):
# first version written by sabine, will add in the weekend
#
pass
def plot_kinematic_maps(self,
model=None,
kin_set=0,
cbar_lims='default',
figtype=None,
**kwargs):
"""
Generates a kinematic map of a model with v, sigma, h3, h4...
Maps of the surface brightness, mean line-of-sight velocity,
velocity dispersion, and higher order Gauss–Hermite moments
are shown. The first row are data, the second row the best-fit
model, and the third row the residuals.
Parameters
----------
model : model, optional
Determines which model is used for the plot.
If model = None, the model corresponding to the minimum
chisquare (so far) is used; the setting in the configuration
file's parameter settings is used to determine which chisquare
to consider. The default is None.
kin_set : integer or 'all'
Determines which kinematic set to use for the plot.
The value of this parameter should be the index of the data
set (e.g. kin_set=0 , kin_set=1). The default is kin_set=0.
If kin_set='all', several kinematic maps are produced, one
for each kinematic dataset. A list of (fig,kin_set_name) is
returned where fig are figure objects and kin_set_name are
the names of the kinematics sets.
cbar_lims : STR
Determines which set of values is used to determine the
limiting values defining the colorbar used in the plots.
Accepted values: 'model', 'data', 'combined', 'default'.
The default is 'data' for GaussHermite kinematics, and [0,3] for
BayesLOSVD kinematics where reduced chi2 values are plotted.
figtype : STR, optional
Determines the file extension to use when saving the figure.
If None, the default setting is used ('.png').
Raises
------
ValueError
If kin_set is not smaller than the number of kinematic sets.
ValueError
If cbar_lims is not one of 'model', 'data', or 'combined'.
Returns
-------
list or `matplotlib.pyplot.figure`
if kin_set == 'all', returns `(matplotlib.pyplot.figure, string)`, i.e.
Figure instances along with kinemtics name or figure instance
else, returns a `matplotlib.pyplot.figure`
"""
# Taken from schw_kin.py.
if figtype is None:
figtype = '.png'
stars = \
self.system.get_component_from_class(physys.TriaxialVisibleComponent)
n_kin = len(stars.kinematic_data)
#########################################
if kin_set == 'all':
self.logger.info(f'Plotting kinematic maps for {n_kin} kin_sets.')
figures = []
for i in range(n_kin):
fig = self.plot_kinematic_maps(model=model,
kin_set=i,
cbar_lims=cbar_lims)
figures.append((fig, stars.kinematic_data[i].name))
return figures # returns a list of (fig,kin_name) tuples
#########################################
if kin_set >= n_kin:
text = f'kin_set must be < {n_kin}, but it is {kin_set}'
self.logger.error(text)
raise ValueError(text)
kin_name = stars.kinematic_data[kin_set].name
self.logger.info(f'Plotting kinematic maps for kin_set no {kin_set}: '
f'{kin_name}')
if model is None:
which_chi2 = self.settings.parameter_space_settings['which_chi2']
models_done = np.where(self.all_models.table['all_done'])
min_chi2 = min(m[which_chi2]
for m in self.all_models.table[models_done])
t = self.all_models.table.copy(copy_data=True) # deep copy!
t.add_index(which_chi2)
model_id = t.loc_indices[min_chi2]
model = self.all_models.get_model_from_row(model_id)
kin_type = type(stars.kinematic_data[kin_set])
ws_type = self.settings.weight_solver_settings['type']
if kin_type is kinematics.GaussHermite:
if ws_type == 'LegacyWeightSolver':
if cbar_lims=='default':
cbar_lims = 'data'
fig = self._plot_kinematic_maps_gaussherm(
model,
kin_set,
cbar_lims=cbar_lims,
**kwargs)
else:
self.logger.info(f'Gauss Hermite kinematic maps can only be '
'plot if LegacyWeightSolver is used')
fig = plt.figure(figsize=(27, 12))
elif kin_type is kinematics.BayesLOSVD:
if cbar_lims=='default':
cbar_lims = [0,3]
fig = self._plot_kinematic_maps_bayeslosvd(
model,
kin_set,
cbar_lims=cbar_lims,
**kwargs)
figname = self.plotdir + f'kinematic_map_{kin_name}' + figtype
fig.savefig(figname, dpi=300)
return fig
def _plot_kinematic_maps_bayeslosvd(self,
model,
kin_set,
cmap=None,
cbar_lims=[0,3],
color_dat='0.3',
color_mod='C2'):
"""Short summary.
Parameters
----------
model : type
Description of parameter `model`.
kin_set : type
Description of parameter `kin_set`.
cmap : type
Description of parameter `cmap`.
cbar_lims : type
Description of parameter `cbar_lims`.
color_dat : type
Description of parameter `color_dat`.
color_mod : type
Description of parameter `color_mod`.
Returns
-------
type
Description of returned object.
"""
# get the data
stars = \
self.system.get_component_from_class(physys.TriaxialVisibleComponent)
kin_set = stars.kinematic_data[kin_set]
# helper function to decide which losvds to plot
def dissimilar_subset_greedy_search(distance_matrix, target_size):
"""Greedy algorithm to find dissimilar subsets
Args:
distance_matrix (array): 2D matrix of pairwise distances.
target_size (int): Desired size of subset.
Returns:
tuple: (list of index values of subset in distance_matrix,
minimum pairwise distance in this subset)
"""
n = distance_matrix.shape[0]
idx = np.unravel_index(np.argmax(distance_matrix), distance_matrix.shape)
idx = list(idx)
tmp = distance_matrix[idx][:,idx]
for n0 in range(3, target_size+1):
iii = list(range(n))
for idx0 in idx:
iii.remove(idx0)
ttt = []
for i in iii:
idx_tmp = idx + [i]
tmp = distance_matrix[idx_tmp][:,idx_tmp]
ttt += [np.min(tmp[np.triu_indices(n0, k=1)])]
idx += [iii[np.argmax(ttt)]]
tmp = distance_matrix[idx][:,idx]
min_pairwise_dist = np.min(tmp[
|
np.triu_indices(target_size, k=1)
|
numpy.triu_indices
|
# Copyright 2020 The TensorFlow Recommenders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint-as: python3
"""Tests for factorized top K layers."""
import itertools
import os
from typing import Any, Dict, Iterator
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_recommenders.layers import factorized_top_k
def test_cases(
k=(5, 10),
batch_size=(3, 16),
num_queries=(3, 15, 16),
num_candidates=(1024, 128),
indices_dtype=(np.str, None),
use_exclusions=(True, False)) -> Iterator[Dict[str, Any]]:
"""Generates test cases.
Generates all possible combinations of input arguments as test cases.
Args:
k: The number of candidates to retrieve.
batch_size: The query batch size.
num_queries: Number of queries.
num_candidates: Number of candidates.
indices_dtype: The type of indices.
use_exclusions: Whether to test exclusions.
Yields:
Keyword argument dicts.
"""
keys = ("k", "batch_size", "num_queries", "num_candidates", "indices_dtype",
"use_exclusions")
for values in itertools.product(k, batch_size, num_queries, num_candidates,
indices_dtype, use_exclusions):
yield dict(zip(keys, values))
class FactorizedTopKTestBase(tf.test.TestCase, parameterized.TestCase):
def run_top_k_test(self,
layer_class,
k,
batch_size,
num_queries,
num_candidates,
indices_dtype,
use_exclusions,
random_seed=42,
check_export=True):
layer = layer_class(k=k)
rng =
|
np.random.RandomState(random_seed)
|
numpy.random.RandomState
|
import cv2, numpy as np
import random
import datetime
'''
Class to handle information, predictions about tracks
Parameters
----------
object: object for access to variables, without instances
'''
class track(object):
'''
Constructor for initialization of variables and Kalman-filter
Parameters
----------
pos: tupel, with position information (x, y)
id: integer, used as identifier for tracks
binThresh: float, binary threshold default value
dt: float, delta time
'''
def __init__(self, pos, id, dt=0.004, fps = 0):
x = pos[0]
y = pos[1]
mp = np.array([np.float32(x), np.float32(y)])
self.dt = dt
self.kalman = cv2.KalmanFilter(4, 2, 2)
self.kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
self.kalman.transitionMatrix = np.array([[1, 0, self.dt, 0], [0, 1, 0, self.dt], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
self.kalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03
self.kalman.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 0.001
self.kalman.controlMatrix = np.array([0,1], np.float32)
self.kalman.statePost[0][0] = x
self.kalman.statePost[1][0] = y
self.pos = pos
self.meas = []
self.kalman.correct(mp)
self.pred = self.kalman.predict()
self.pred = (int(self.pred[0]), int(self.pred[1]))
self.strike = 0
self.id = id
self.color = (random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255))
self.track = []
self.veloVec = []
self.acceleration = []
self.velocity = []
self.directionVect = []
self.datetime=[]
self.fps = fps
# self.kalman.correct(mp)
self.correct = []
self.false = []
#print(self.pos, self.pred, flag)
'''
Function to correct Kalman-filter and predict positions and velocity.
Parameters
----------
meas: 2D-position-Tupel, used to correct kalman and predict new state
'''
def predict(self, meas):
'time'
date = datetime.datetime.now()
self.datetime.append(date)
'kalman'
#print(self.kalman.controlMatrix)
self.false.append(self.pred)
px = self.pos[0]
py = self.pos[1]
self.pos = meas
x = meas[0]
y = meas[1]
mp = np.array([np.float32(x), np.float32(y)])
self.track.append(meas)
#velocity and acceleration
vx = (px - x) / self.dt
vy = (py - y) / self.dt
if x != 0:
ax = (vx*vx)/(2*x)
else:
ax = 0
if y != 0:
ay = (vy*vy)/(2*y)
else:
ay = 0
self.acceleration.append([ax, ay])
self.velocity.append([vx, vy]) #ohne Startpunkt
self.directionVect.append([x+px, y+py])
############
self.kalman.correct(mp)
self.pred = self.kalman.predict()
'velocity'
if
|
np.shape(self.pred)
|
numpy.shape
|
import numpy as np
import matplotlib.pyplot as plt
class dbase:
def __init__(self, A_p, x, noise, M):
self.A_p = A_p.copy()
self.M = M
self.M_p, self.N = self.A_p.shape
self.P = int(self.M / self.M_p)
self.x = x
Ax = self.A_p @ self.x
if type(noise) is int:
SNRdB = 10**(0.1*noise) / self.P
self.sigma_p = np.linalg.norm(Ax)**2 / SNRdB
n = np.random.normal(0, self.sigma_p**0.5, (self.M_p, 1))
elif type(noise).__module__ == 'numpy':
self.sigma_p = np.var(noise)
n = noise.copy()
else :
raise ValueError
self.y = Ax + n
self.s = np.zeros((self.N, 1))
self.AT_p = self.A_p.T
self.trA2_p = np.trace(self.AT_p @ self.A_p)
class D_Base:
def __init__(self, A, x, noise, P):
self.M, self.N = A.shape
self.P = P
self.a = self.M / self.N
self.M_p = int(self.M / self.P)
self.A_p = A.reshape(P, self.M_p, self.N)
self.x = x
if type(noise) is int:
self.noise = [noise] * self.P
elif type(noise).__module__ == 'numpy':
self.noise = noise.reshape(P, self.M_p, 1)
else :
raise ValueError
self.s = np.zeros((self.N, 1))
self.mse = np.array([None])
self.communication_cost =
|
np.array([])
|
numpy.array
|
#!/usr/bin/env python
# converted to python by (from C# and Java sources):
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# most of the credit belongs to:
__credits__ = ['http://msdn.microsoft.com/en-us/library/bb259689.aspx',
'http://www.klokan.cz/projects/gdal2tiles/gdal2tiles.py']
__copyright__ = 'Copyright (c) 2013, Matrix Mariner Inc.\n' + \
'Copyright (c) 2006-2009 Microsoft Corporation. All rights reserved.\n' + \
'Copyright (c) 2008, <NAME>'
__status__ = 'Development' # 'Prototype', 'Development', or 'Production'
__license__ = 'It\'s not too clear from the original source ?(Public domain)'
'''Microsoft, Google, OpenStreetMap (ZXY) tile system conversion methods to and from:
WGS84 latitude longitude, and EPSG:900913 meter
'''
import numpy
tile_size = 256
earth_radius = 6378137.
earth_circumference = 2. * numpy.pi * earth_radius # at equator
origin_shift = earth_circumference / 2. # 20037508.342789244
min_latitude = -85.05112878
max_latitude = 85.05112878
min_longitude = -180.
max_longitude = 180.
inches_per_meter = 39.3701
max_zoom_level = 23
# Following methods adapted from http://msdn.microsoft.com/en-us/library/bb259689.aspx
def clip(num, min_value, max_value):
"""num - the number to clip
min_value - minimum allowable value
max_value - maximum allowable value
"""
return numpy.minimum(numpy.maximum(num, min_value), max_value)
def map_size(level_of_detail):
"""determines the map width and height (in pixels) at a specified level of detail
level_of_detail, from 1 (lowest detail) to 23 (highest detail)
returns map height and width in pixels
"""
return float(tile_size << level_of_detail)
def map_size_tiles(level_of_detail):
"""determines the map width and height (in tiles) at a specified level of detail
level_of_detail, from 1 (lowest detail) to 23 (highest detail)
returns map height and width in number of tiles
"""
return int(map_size(level_of_detail) / tile_size)
def ground_resolution(latitude, level_of_detail):
"""determines the ground resolution (in meters per pixel) at a specifiec latitude and
level of detail
latitude - (in decimal degrees) at which to measure the ground resolution
level_of_detail, from 1 (lowest detail) to 23 (highest detail)
returns the ground resolution in meters per pixel
"""
latitude = clip(latitude, min_latitude, max_latitude)
return numpy.cos(latitude * numpy.pi / 180.) * 2 * numpy.pi * earth_radius / map_size(level_of_detail)
def map_scale(latitude, level_of_detail, dpi):
"""determines the map scale at a specified latitude, level of detail, and dpi resolution
latitude - (in decimal degrees) at which to measure the ground resolution
level_of_detail, from 1 (lowest detail) to 23 (highest detail)
dpi - resolution in dots per inch
"""
return ground_resolution(latitude, level_of_detail) * dpi / 0.0254
def lat_lng_to_pixel_xy(latitude, longitude, level_of_detail):
"""converts latitude/longitude WGS-84 coordinates (in decimal degrees) into pixel x,y
latitude - (in decimal degrees) to convert
longitude - (in decimal degrees) to convert
level_of_detail, from 1 (lowest detail) to 23 (highest detail)
"""
latitude = clip(latitude, min_latitude, max_latitude)
longitude = clip(longitude, min_longitude, max_longitude)
x = (longitude + 180.) / 360.
sin_lat = numpy.sin(latitude * numpy.pi / 180.)
y = .5 - numpy.log((1. + sin_lat) / (1. - sin_lat)) / (4. * numpy.pi)
m_size = map_size(level_of_detail)
x = int(clip(x*m_size + .5, 0, m_size - 1))
y = int(clip(y*m_size + .5, 0, m_size - 1))
return x, y
def lat_lng_to_tile_xy(latitude, longitude, level_of_detail):
"""gives you zxy tile coordinate for given latitude, longitude WGS-84 coordinates (in decimal degrees)
"""
x, y = lat_lng_to_pixel_xy(latitude, longitude, level_of_detail)
return pixel_xy_to_tile_xy(x, y)
def pixel_xy_to_lat_lng(x, y, level_of_detail):
"""converts a pixel x,y coordinates at a specified level of detail into
latitude,longitude WGS-84 coordinates (in decimal degrees)
x - coordinate of point in pixels
y - coordinate of point in pixels
level_of_detail, from 1 (lowest detail) to 23 (highest detail)
"""
m_size = map_size(level_of_detail)
x = (clip(x, 0, m_size - 1) / m_size) - .5
y = .5 - (clip(y, 0, m_size - 1) / m_size)
lat = 90. - 360. * numpy.arctan(
|
numpy.exp(-y * 2 * numpy.pi)
|
numpy.exp
|
########################################################################
############## Definition of all wrappers for 2D plotting ##############
########################################################################
####################################
# Level contours
####################################
def contour(z,x=None,y=None,filled=None,xlim=None,ylim=None,xinvert=False,yinvert=False,xlog=False,ylog=False,title=None,xlabel=None,
ylabel=None,lab_loc=0,ax=None,grid=None,plot_kw={},**kwargs):
"""Level contour plotting function.
This is a wrapper for pyplot.contour() and pyplot.contourf().
Parameters
----------
z : array-like
The height values to draw the contours.
x : array-like, optional
Position of data points in the x axis.
y : array-like, optional
Position of data points in the y axis.
filled: boolean, optional
If True, draws filled contours. If not given defaults to the value defined in splotch.Params.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
xinvert : bool, optional
If True, inverts the x-axis.
yinvert : bool, optional
If True, inverts the y-axis.
xlog : bool, optional
If True, the scale of the x-axis is logarithmic. If not given defaults to the value defined in splotch.Params.
ylog : bool, optional
If True, the scale of the x-axis is logarithmic. If not given defaults to the value defined in splotch.Params.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
output : boolean, optional
If True, returns the edges and values of the underlying histogram plus the levels of the contours.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are QuadContourSet properties.
**kwargs: QuadContourSet properties, optional
kwargs are used to specify matplotlib specific properties such as cmap, linewidths, hatches, etc.
The list of available properties can be found here:
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.contour.html
Returns
-------
bin_edges_x : array
The bin edges for the x axis.
bin_edges_y : array
The bin edges for the y axis.
n : array
The values of the underlying histogram.
l : array
The levels for the contours.
"""
from numpy import shape, linspace
from matplotlib.pyplot import contour,contourf, legend
from .base_func import axes_handler,dict_splicer,plot_finalizer
if ax is not None:
old_axes=axes_handler(ax)
if filled is None:
from .defaults import Params
filled=Params.cont_filled
if x is None:
x=linspace(0,1,z.shape[0])
if y is None:
y=linspace(0,1,z.shape[1])
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
# Create 'L' number of plot kwarg dictionaries to parse into each plot call
plotf={False:contour,True:contourf}
plotf[filled](x,y,z,**plot_par)
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
####################################
# Contours from density histograms
####################################
def contourp(x,y,percent=None,filled=None,bin_type=None,bins=None,smooth=0.0,max_spacing=True,xlim=None,ylim=None,xinvert=False,yinvert=False,
xlog=False,ylog=False,title=None,plabel=None,xlabel=None,ylabel=None,lab_loc=0,ax=None,grid=None,output=None,plot_kw={},**kwargs):
"""Contour function, encircling the highest density regions that contain the given percentages of the sample.
Parameters
----------
x : array-like
Position of data points in the x axis.
y : array-like
Position of data points in the y axis.
percent : float or array-like, optional.
The percentages of the sample that the contours encircle.
bin_type : {'number','width','edges','equal'}, optional
Defines how is understood the value given in bins: 'number' for givinf the desired number
of bins, 'width' forthe width of the bins, 'edges' for the edges of bins, and 'equal' for
making bins with equal number of elements (or as close as possible). If not given it is
inferred from the data type of bins: 'number' if int, 'width' if float and 'edges' if ndarray.
bins : int, float, array-like, optional
Gives the values for the bins, according to bin_type.
smooth : float, optional
The standard deviation for the Gaussian kernel. Default: 0.0 (No smoothing).
max_spacing : boolean, optional
If True, maximises the separation between colours drawn from the colour map. Default: True.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
xinvert : bool, optional
If True, inverts the x-axis.
yinvert : bool, optional
If True, inverts the y-axis.
xlog : bool, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool, optional
If True, the scale of the x-axis is logarithmic.
title : str, optional
Sets the title of the plot
plabel : array-like or boolean, optional
Specifies label(s) for the contour(s). If False, do not create a legend. If an array of
strings, sets the labels for each contour. Must be of equal length to number specified by 'percent'.
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
output : boolean, optional
If True, returns the edges and values of the underlying histogram plus the levels of the contours.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are QuadContourSet properties.
**kwargs: QuadContourSet properties, optional
kwargs are used to specify matplotlib specific properties such as cmap, linewidths, hatches, etc.
The list of available properties can be found here:
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.contour.html
Returns
-------
bin_edges_x : array
The bin edges for the x axis.
bin_edges_y : array
The bin edges for the y axis.
n : array
The values of the underlying histogram.
l : array
The levels for the contours.
"""
from warnings import warn
from matplotlib import lines, patches, rcParams
from matplotlib.cm import get_cmap, ScalarMappable
from matplotlib.colors import Normalize
from matplotlib.pyplot import gca, sca, contour, contourf, legend, Normalize, colorbar
from numpy import array, linspace, round, ndarray, ceil
from scipy.ndimage.filters import gaussian_filter
from .base_func import axes_handler,basehist2D,percent_finder,plot_finalizer,dict_splicer,is_numeric
from .defaults import Params
# Initialise defaults
if filled is None:
filled=Params.cont_filled
if percent is None:
percent=Params.contp_percent
if 'cmap' not in kwargs.keys() and 'cmap' not in plot_kw.keys() and 'colors' not in kwargs.keys() and 'colors' not in plot_kw.keys():
plot_kw['cmap']=Params.cont_cmap
if output is None:
output=Params.contp_output
func_dict={True:contourf,False:contour}
# Assign current axis
if ax is not None:
old_axes=axes_handler(ax)
else:
ax = gca()
old_axes=ax
if type(percent) is not ndarray:
percent=array([percent]).flatten()
if type(bin_type) not in [list, tuple, ndarray]:
bin_type=[bin_type]*2
if type(bins) not in [list,tuple]:
if bins is None:
bins = max([10,int(len(x)**0.4)]) # Defaults to min of 10 bins
bins=[bins]*2
percent=percent[::-1]
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par = {**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
if filled:
plot_par['extend']='max'
if not filled and len(percent)<4 and 'colors' not in plot_par.keys(): # if drawing <4 lines with no color specified, get first color of color cycler
plot_par['colors']=[next(ax._get_lines.prop_cycler)['color']]*len(percent)
if 'colors' in plot_par.keys():
if type(plot_par['colors']) is str:
plot_par['colors']=[plot_par['colors'] for i in range(len(percent))]
if 'cmap' in plot_par.keys():
plot_par.pop('cmap')
elif max_spacing:
if type(plot_par['cmap']) is str:
plot_par['cmap']=get_cmap(plot_par['cmap'])
plot_par['colors']=[plot_par['cmap'](i) for i in linspace(0,1,len(percent))]
plot_par.pop('cmap')
if not filled and len(percent)<4 and 'linestyles' not in plot_par.keys(): # if drawing <4 lines with no color specified, use 'solid', 'dashed' and then 'dotted'
plot_par['linestyles']=[['solid','dashed','dotted'][i] for i in range(len(percent))][::-1]
# Validate labels array
if (type(plabel) in [list, tuple, ndarray]):
if (len(plabel) != len(percent)):
raise ValueError(f"Length of labels ({len(plabel)}) does not match length of percent ({len(percent)}).")
else:
if plabel is None:
if rcParams['text.usetex']:
plabel=[f'{round(p,1)}\%' for p in percent]
else:
plabel=[f'{round(p,1)}%' for p in percent]
X,Y,Z=basehist2D(x,y,None,bin_type,bins,None,None,None,xlog,ylog)
X=(X[:-1]+X[1:])/2
Y=(Y[:-1]+Y[1:])/2
level=array([percent_finder(Z,p/100) for p in percent])
plot_return=func_dict[filled](X,Y,gaussian_filter(Z.T,sigma=smooth),levels=level,**plot_par)
if plabel:
plot_par['colors']=plot_return.colors
if type(plot_par['colors']) is str:
plot_par['colors']=[plot_par['colors'] for i in range(len(percent))]
plot_par['linestyles']=plot_return.linestyles
if type(plot_par['linestyles']) is str:
plot_par['linestyles']=[plot_return.linestyles for i in range(len(percent))]
elif plot_par['linestyles'] is None:
plot_par['linestyles']=['solid' for i in range(len(percent))]
plot_par['alpha']=plot_return.alpha
if type(plot_par['alpha']) is float:
plot_par['alpha']=[plot_return.alpha for i in range(len(percent))]
elif plot_par['alpha'] is None:
plot_par['alpha']=[1.0 for i in range(len(percent))]
if filled:
legend([patches.Patch(color=plot_par['colors'][i],alpha=plot_par['alpha'][i])for i in range(len(percent))],
plabel,numpoints=1,loc=lab_loc)
else:
legend([lines.Line2D([0,1],[0,1],color=plot_par['colors'][i],linestyle=plot_par['linestyles'][i],
alpha=plot_par['alpha'][i]) for i in range(len(percent))],
plabel,numpoints=1,loc=lab_loc)
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
if output:
return(X,Y,Z.T,array(level))
####################################
# Error bands
####################################
def errorband(x,y,yerr,line=False,xlim=None,ylim=None,
xinvert=False,yinvert=False,xlog=False,ylog=None,title=None,xlabel=None,ylabel=None,
label=None,lab_loc=0,ax=None,grid=None,line_kw={},band_kw={},**kwargs):
"""Error line and band plotting function.
Parameters
----------
x : array-like or list
If list it is assumed that each elemement is array-like.
y : array-like or list
If list it is assumed that each elemement is array-like.
yerr : array-like or list, optional
Defines the length of the errobars in the y-axis. If list it is assumed that each elemement is array-like.
line : boolean, optional
If True, draw a line that follows the statistic defined in line_stat.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
xinvert : bool or list, optional
If True, inverts the x-axis.
yinvert : bool or list, optional
If True, inverts the y-axis.
xlog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
label : str, optional
Sets the label for the plot.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are Line2D properties.
**kwargs: Line2D properties, optional
kwargs are used to specify matplotlib specific properties such as linecolor, linewidth, antialiasing, etc.
A list of available `Line2D` properties can be found here:
https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
Returns
-------
None
"""
from splotch.base_func import axes_handler,bin_axis,plot_finalizer
import numpy as np
from numbers import Number
import scipy.stats as stats
from numpy import array,percentile
from functools import partial
import matplotlib.colors as clr
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
from warnings import warn
# Handle deprecated variables
deprecated = {'plabel':'label'}
for dep in deprecated:
if dep in kwargs:
warn(f"'{dep}' will be deprecated in future verions, using '{deprecated[dep]}' instead")
if (dep=='plabel'): label = kwargs.pop(dep)
if ax is not None:
old_axes=axes_handler(ax)
else:
ax=gca()
old_axes=ax
if ylog is None:
from splotch.defaults import Params
ylog=Params.hist1D_yaxis_log
if 'linewidth' not in band_kw.keys():
band_kw['linewidth']=0
if 'alpha' not in band_kw.keys():
band_kw['alpha']=0.4
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#band_par={**plot_kw, **kwargs} # For Python > 3.5
band_kw.update(kwargs)
if len(array(yerr).shape)==2:
plt.fill_between(x,y-yerr[0],y+yerr[1],label=label,**band_kw)
else:
plt.fill_between(x,y-yerr,y+yerr,label=label,**band_kw)
if line:
plt.plot(x,y,**line_kw)
if label is not None:
plt.legend(loc=lab_loc)
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
####################################
# Error bars
####################################
def errorbar(x,y,xerr=None,yerr=None,xlim=None,ylim=None,xinvert=False,yinvert=False,xlog=False,ylog=False,
title=None,xlabel=None,ylabel=None,label=None,lab_loc=0,ax=None,grid=None,plot_kw={},**kwargs):
"""Errorbar plotting function.
This is a wrapper for pyplot.errorbar().
Parameters
----------
x : array-like or list
If list it is assumed that each elemement is array-like.
y : array-like or list
If list it is assumed that each elemement is array-like.
xerr : array-like or list, optional
Defines the length of the errobars in the x-axis. If list it is assumed that each elemement is array-like.
yerr : array-like or list, optional
Defines the length of the errobars in the y-axis. If list it is assumed that each elemement is array-like.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
xinvert : bool or list, optional
If True, inverts the x-axis.
yinvert : bool or list, optional
If True, inverts the y-axis.
xlog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
label : str, optional
Sets the label for the plot.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are Line2D properties.
**kwargs: Line2D properties, optional
kwargs are used to specify matplotlib specific properties such as linecolor, linewidth, antialiasing, etc.
A list of available `Line2D` properties can be found here:
https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
Returns
-------
None
"""
from .base_func import axes_handler,dict_splicer,plot_finalizer
from matplotlib.pyplot import errorbar, legend, gca
from warnings import warn
# Handle deprecated variables
deprecated = {'plabel':'label'}
for dep in deprecated:
if dep in kwargs:
warn(f"'{dep}' will be deprecated in future verions, using '{deprecated[dep]}' instead")
if (dep=='plabel'): label = kwargs.pop(dep)
if ax is not None:
old_axes=axes_handler(ax)
else:
ax=gca()
old_axes=ax
if type(x) is not list:
x=[x]
if type(y) is not list:
y=[y]
if type(xerr) is not list:
xerr=[xerr]
if type(yerr) is not list:
yerr=[yerr]
L=len(x)
if type(label) is not list:
label=[label for i in range(L)]
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
# Create 'L' number of plot kwarg dictionaries to parse into each plot call
plot_par=dict_splicer(plot_par,L,[1]*L)
for i in range(L):
errorbar(x[i],y[i],xerr=xerr[i],yerr=yerr[i],label=label[i],**plot_par[i])
if any(label):
legend(loc=lab_loc)
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
####################################
# Error boxes
####################################
def errorbox(x,y,xerr=None,yerr=None,xlim=None,ylim=None,xinvert=False,yinvert=False,xlog=False,ylog=False,box_type='ellipse',
title=None,xlabel=None,ylabel=None,label=None,grid=None,lab_loc=0,ax=None,plot_kw={},**kwargs):
"""Errorbox plotting function.
This is a wrapper around matplotlib PatchCollections with a matplotlib errorbar functionality.
Parameters
----------
x : array-like or list
If list it is assumed that each elemement is array-like.
y : array-like or list
If list it is assumed that each elemement is array-like.
xerr : array-like or list, optional
Defines the length of the errobars in the x-axis. If list it is assumed that each elemement is array-like.
yerr : array-like or list, optional
Defines the length of the errobars in the y-axis. If list it is assumed that each elemement is array-like.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
xinvert : bool or list, optional
If True, inverts the x-axis.
yinvert : bool or list, optional
If True, inverts the y-axis.
xlog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
box_type : str
The type of box to plot, patch types include: ellipse | rectangle (Default: ellipse).
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
label : str, optional
Sets the label for the plot.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are Patches properties.
**kwargs: Patch properties, optional
kwargs are used to specify matplotlib specific properties such as facecolor, linestyle, alpha, etc.
A list of available `Patch` properties can be found here:
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.patches.Rectangle.html
Returns
-------
None
"""
from .base_func import axes_handler,dict_splicer,plot_finalizer
from matplotlib.pyplot import errorbar, legend, gca
from numpy import shape, full, array
from matplotlib.collections import PatchCollection
from matplotlib.patches import Ellipse, Rectangle, Patch
from warnings import warn
# Handle deprecated variables
deprecated = {'plabel':'label','boxtype':'box_type'}
for dep in deprecated:
if dep in kwargs:
warn(f"'{dep}' will be deprecated in future verions, using '{deprecated[dep]}' instead")
if (dep=='plabel'): label = kwargs.pop(dep)
if (dep=='boxtype'): box_type = kwargs.pop(dep)
if ax is not None:
old_axes=axes_handler(ax)
else:
ax=gca()
old_axes=ax
if type(x) is not list:
x=[x]
if type(y) is not list:
y=[y]
if type(xerr) is not list:
xerr=[xerr]
if type(yerr) is not list:
yerr=[yerr]
boxdict = {'rec':Rectangle,'ell':Ellipse}
if (box_type.lower()[:3] not in ['rec','ell']):
raise ValueError(f"box_type '{box_type}' not recognised.")
L=len(x)
if type(label) is not list:
label=[label for i in range(L)]
# Validate format of xerr and yerr
for i in range(L):
# x-axis errors
if (shape(xerr[i]) == ()): # single error for all points
xerr[i]=full((2,len(x[i])), xerr[i])
else:
if (len(shape(xerr[i])) == 1):
if (shape(xerr[i])[0] == len(x[i])): # single error for each point
xerr[i]=array([xerr[i], xerr[i]])
elif (shape(xerr[i])[0] == 2): # separate upper and lower errors for all points
xerr[i]=full((len(x[i]), 2), xerr[i]).T
else:
raise ValueError(f"Invalid shape ({shape(xerr[i])}) for 'xerr' array.")
elif (len(shape(xerr[i])) == 2): # separate upper and lower errors for each point
xerr[i]=array(xerr[i])
if (shape(xerr[i])[0] != 2 or shape(xerr[i])[1] != len(x[i])):
raise ValueError(f"Invalid shape ({shape(xerr[i])}) for 'xerr' array.")
# y-axis errors
if (shape(yerr[i]) == ()): # single error for all points
yerr[i]=full((2,len(y[i])), yerr[i])
else:
if (len(shape(yerr[i])) == 1):
if (shape(yerr[i])[0] == len(y[i])): # single error for each point
yerr[i]=array([yerr[i], yerr[i]])
elif (shape(yerr[i])[0] == 2): # separate upper and lower errors for all points
yerr[i]=full((len(y[i]), 2), yerr[i]).T
else:
raise ValueError(f"Invalid shape ({shape(yerr[i])}) for 'yerr' array.")
elif (len(shape(yerr[i])) == 2): # separate upper and lower errors for each point
yerr[i]=array(yerr[i])
if (shape(yerr[i])[0] != 2 or shape(yerr[i])[1] != len(y[i])):
raise ValueError(f"Invalid shape ({shape(yerr[i])}) for 'yerr' array.")
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
# Create 'L' number of plot kwarg dictionaries to parse into each plot call
plot_par=dict_splicer(plot_par,L,[1]*L)
PathColls=[]
# Loop over data points; create box/ellipse from errors at each point
boxdict = {'rec':Rectangle,'ell':Ellipse}
boxhandles = []
for i in range(L):
errorboxes=[]
for j, (xx, yy, xe, ye) in enumerate(zip(x[i], y[i], xerr[i].T, yerr[i].T)):
errorboxes.append( boxdict[box_type.lower()[:3]]((xx - xe[0], yy - ye[0]), xe.sum(), ye.sum()) )
# Create and add patch collection with specified colour/alpha
pc=PatchCollection(errorboxes, **plot_par[i])
boxhandles.append(Patch(**plot_par[i]))
ax.add_collection(pc)
if any(label):
legend(handles=boxhandles,labels=label,loc=lab_loc)
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
####################################
# Hexagonal 2D histogram
####################################
def hexbin(x,y,bins=None,binlim=None,dens=True,scale=None,
c=None,cstat=None,xlim=None,ylim=None,clim=[None,None],nmin=0,
xinvert=False,yinvert=False,cbar_invert=False,xlog=False,ylog=False,clog=None,title=None,xlabel=None,
ylabel=None,clabel=None,lab_loc=0,ax=None,grid=None,output=None,plot_kw={},**kwargs):
"""Hexagonal 2D bins function.
Parameters
----------
x : array-like
Position of data points in the x axis.
y : array-like
Position of data points in the y axis.
bins : int or list, optional
Gives the number of bins
binlim : array-like, optional
Defines the limits for the bins. It must have one dimension and contain four elements,
with the expected order being (left, right, bottom, top).
dens : bool or list, optional
If false the histogram returns raw counts.
c : array-like, optional
If a valid argument is given in cstat, defines the value used for the binned statistics.
cstat : str or function, optional
Must be one of the valid str arguments for the statistics variable in scipy.stats.binned_statistic_2d
('mean’, 'median’, 'count’, 'sum’, 'min’ or 'max’) or a function that takes a 1D array and
outputs an integer or float.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
clim : list, optional
Defines the limits of the colour map ranges, it must contain two elements (lower and higer limits).
nmin : int, optional (default: 0)
The minimum number of points required in a bin in order to be plotted.
xinvert : bool, optional
If True, inverts the x-axis.
yinvert : bool, optional
If True, inverts the y-axis.
cbar_invert : bool, optional
If True, inverts the direction of the colour bar (not the colour map).
xlog : bool, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool, optional
If True, the scale of the x-axis is logarithmic.
clog : bool, optional
If True, the colour map is changed from linear to logarithmic.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
clabel : str, optional
Setting `clabel` triggers the generation of a colourbar with axis label given by its value.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
output : boolean, optional
If True, returns the edges and values of the histogram.
plot_kw : dict, optional
Explicit dictionary of kwargs to be parsed to matplotlib hexbin function.
Parameters will be overwritten if also given implicitly as a **kwarg.
**kwargs : pcolormesh properties, optional
kwargs are used to specify matplotlib specific properties such as cmap, norm, edgecolors etc.
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hexbin.html
Returns
-------
n : array
The values of the histogram. Only provided if output is True.
x_edges : array
The bin edges for the x axis. Only provided if output is True.
y_edges : array
The bin edges for the y axis. Only provided if output is True.
"""
#dens : bool or list, optional
# If false the histogram returns raw counts.
#scale : float or list, optional
# Scaling of the data counts.
from numpy import diff, log10, nan, nanmin, nanmax, nanmedian, nanmax, nanstd, unique, size, zeros, shape
from matplotlib.colors import LogNorm
from matplotlib.pyplot import hexbin, colorbar
from .base_func import axes_handler,plot_finalizer
if ax is not None:
old_axes=axes_handler(ax)
if type(bins) not in [list,tuple]:
if bins is None:
bins=max([10,int(len(x)**0.4)]) # Defaults to min of 10 bins
bins=[bins,int(bins/(3**0.5))]
if None in (clog,output):
from .defaults import Params
if clog is None:
clog=Params.hist2D_caxis_log
if output is None:
output=Params.hist2D_output
if size([x,y])==0: # Zero-sized arrays given
if (clog == True): raise ValueError("Cannot set 'clog'=True if zero-size array given.")
if (cstat != None): raise ValueError(f"Cannot compute statistic (cstat='{cstat}') on zero-size array, set cstat=None if no data given.")
temp_x=x*1.0
temp_y=y*1.0
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
if binlim:
plot_par['extent']=binlim
#plot_par[]=dens
#plot_par[]=scale
if c is not None:
plot_par['C']=c
if cstat:
cstat_func={'min':nanmin,'mean':nanmax,'median':nanmedian,'max':nanmax,'std':nanstd}
if cstat in cstat_func.keys():
plot_par['reduce_C_function']=cstat_func[cstat]
else:
plot_par['reduce_C_function']=cstat
if clim:
plot_par['vmin']=clim[0]
plot_par['vmax']=clim[1]
if nmin:
plot_par['mincnt']=nmin
if xlog:
plot_par['xscale']='log'
temp_x=log10(temp_x)
if ylog:
plot_par['yscale']='log'
temp_y=log10(temp_y)
if clog:
plot_par['bins']='log'
if 'mincnt' not in plot_par.keys():
plot_par['mincnt']=1
if dens and c is None:
# This is nasty, but seems to be the quickest way to do this without fully rewriting hexbin here
hist_return=hexbin(temp_x,temp_y,gridsize=bins)
hist_return.remove()
offsets=hist_return.get_offsets()
offsets_x=unique(offsets[:,0])
offsets_y=unique(offsets[:,1])
hex_area=diff(offsets_x)[0]*2*diff(offsets_y)[0]
def density_scaling(bin_data):
bin_dens=1.0*len(bin_data)/(hex_area)
if scale:
bin_dens/=1.0*scale
else:
bin_dens/=len(x)
return(bin_dens)
plot_par['C']=y
plot_par['reduce_C_function']=density_scaling
hist_return=hexbin(x,y,gridsize=bins,**plot_par)
if clabel is not None:
cbar=colorbar()
cbar.set_label(clabel)
if cbar_invert:
cbar.ax.invert_yaxis()
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
if output:
return(hist_return.get_array(),hist_return.get_offsets())
####################################
# 2D histogram and binned statistics
####################################
def hist2D(x,y,bin_type=None,bins=None,dens=True,scale=None,c=None,cstat=None,xlim=None,ylim=None,clim=[None,None],nmin=0,
xinvert=False,yinvert=False,cbar_invert=False,xlog=False,ylog=False,clog=None,title=None,xlabel=None,
ylabel=None,clabel=None,lab_loc=0,ax=None,grid=None,output=None,plot_kw={},**kwargs):
"""2D histogram function.
Parameters
----------
x : array-like
Position of data points in the x axis.
y : array-like
Position of data points in the y axis.
bin_type : {'number','width','edges','equal'}, optional
Defines how is understood the value given in bins: 'number' for givinf the desired number of
bins, 'width' for the width of the bins, 'edges' for the edges of bins, and 'equal' for
making bins with equal number of elements (or as close as possible). If not given it is
inferred from the data type of bins: 'number' if int, 'width' if float and 'edges' if ndarray.
bins : int, float, array-like or list, optional
Gives the values for the bins, according to bin_type.
dens : bool or list, optional
If false the histogram returns raw counts.
scale : float or list, optional
Scaling of the data counts.
c : array-like, optional
If a valid argument is given in cstat, defines the value used for the binned statistics.
cstat : str or function, optional
Must be one of the valid str arguments for the statistics variable in scipy.stats.binned_statistic_2d
('mean’, 'median’, 'count’, 'sum’, 'min’ or 'max’) or a function that takes a 1D array and
outputs an integer or float.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
clim : list, optional
Defines the limits of the colour map ranges, it must contain two elements (lower and higer limits).
nmin : int, optional (default: 0)
The minimum number of points required in a bin in order to be plotted.
xinvert : bool, optional
If True, inverts the x-axis.
yinvert : bool, optional
If True, inverts the y-axis.
cbar_invert : bool, optional
If True, inverts the direction of the colour bar (not the colour map).
xlog : bool, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool, optional
If True, the scale of the x-axis is logarithmic.
clog : bool, optional
If True, the colour map is changed from linear to logarithmic.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
clabel : str, optional
Setting `clabel` triggers the generation of a colourbar with axis label given by its value.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
output : boolean, optional
If True, returns the edges and values of the histogram.
plot_kw : dict, optional
Explicit dictionary of kwargs to be parsed to matplotlib pcolormesh function.
Parameters will be overwritten if also given implicitly as a **kwarg.
**kwargs : pcolormesh properties, optional
kwargs are used to specify matplotlib specific properties such as cmap, norm, edgecolors etc.
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.pcolormesh.html
Returns
-------
n : array
The values of the histogram. Only provided if output is True.
x_edges : array
The bin edges for the x axis. Only provided if output is True.
y_edges : array
The bin edges for the y axis. Only provided if output is True.
"""
from numpy import nan, size, zeros, shape
from matplotlib.colors import LogNorm
from matplotlib.pyplot import pcolormesh, colorbar
from .base_func import axes_handler,basehist2D,plot_finalizer
if ax is not None:
old_axes=axes_handler(ax)
if type(bin_type) is not list:
bin_type=[bin_type]*2
if type(bins) not in [list,tuple]:
if bins is None:
bins=max([10,int(len(x)**0.4)]) # Defaults to min of 10 bins
bins=[bins]*2
if None in (clog,output):
from .defaults import Params
if clog is None:
clog=Params.hist2D_caxis_log
if output is None:
output=Params.hist2D_output
if size([x,y])==0: # Zero-sized arrays given
if (clog == True): raise ValueError("Cannot set 'clog'=True if zero-size array given.")
if (cstat != None): raise ValueError(f"Cannot compute statistic (cstat='{cstat}') on zero-size array, set cstat=None if no data given.")
X,Y,Z=basehist2D(x,y,c,bin_type,bins,scale,dens,cstat,xlog,ylog)
# Also get counts for number threshold cut
if (size([x,y])==0):
counts=zeros(shape=shape(Z))
else:
_,_,counts = basehist2D(x,y,c,bin_type,bins,None,False,None,xlog,ylog)
# Cut bins which do not meet the number count threshold
Z[counts<nmin]=nan
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
if clog:
pcolormesh(X,Y,Z.T,norm=LogNorm(vmin=clim[0],vmax=clim[1],clip=False),**plot_par)
else:
pcolormesh(X,Y,Z.T,vmin=clim[0],vmax=clim[1],**plot_par)
if clabel is not None:
cbar=colorbar()
cbar.set_label(clabel)
if cbar_invert:
cbar.ax.invert_yaxis()
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
if output:
return(Z.T,X,Y)
####################################
# Image from 2D array
####################################
def img(im,x=None,y=None,xlim=None,ylim=None,clim=[None,None],cmin=0,xinvert=False,yinvert=False,cbar_invert=False,clog=None,
title=None,xlabel=None,ylabel=None,clabel=None,lab_loc=0,ax=None,grid=None,plot_kw={},**kwargs):
"""2D pixel-based image plotting function.
Parameters
----------
im : array-like
Value for each pixel in an x-y 2D array, where the first dimension is the x-position and the
second is the y-position.
x : array-like, optional
Position of data points in the x axis.
y : array-like, optional
Position of data points in the y axis.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
clim : list, optional
Defines the limits of the colour map ranges, it must contain two elements (lower and higer limits).
clog : bool, optional
If True, the colour map is changed from linear to logarithmic.
xinvert : bool, optional
If True, inverts the x-axis.
yinvert : bool, optional
If True, inverts the y-axis.
cbar_invert : bool, optional
If True, inverts the direction of the colour bar (not the colour map).
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
clabel : str, optional
Setting `clabel` triggers the generation of a colourbar with axis label given by its value.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
plot_kw : dict, optional
Explicit dictionary of kwargs to be parsed to matplotlib pcolormesh function.
Parameters will be overwritten if also given implicitly as a **kwarg.
**kwargs : pcolormesh properties, optional
kwargs are used to specify matplotlib specific properties such as `cmap`, `marker`, `norm`, etc.
A list of available `pcolormesh` properties can be found here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.pcolormesh.html
Returns
-------
None
"""
from numpy import arange, meshgrid
from matplotlib.colors import LogNorm
from matplotlib.pyplot import pcolormesh, colorbar
from .base_func import axes_handler,plot_finalizer
if ax is not None:
old_axes=axes_handler(ax)
if x is None:
x=arange(len(im[:,0])+1)
if y is None:
y=arange(len(im[0,:])+1)
if clog is None:
from .defaults import Params
clog=Params.img_caxis_log
X, Y=meshgrid(x, y)
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
if clog:
pcolormesh(X,Y,im.T,norm=LogNorm(vmin=clim[0],vmax=clim[1],clip=True),**plot_par)
else:
pcolormesh(X,Y,im.T,vmin=clim[0],vmax=clim[1],**plot_par)
if clabel is not None:
cbar=colorbar()
cbar.set_label(clabel)
if cbar_invert:
cbar.ax.invert_yaxis()
plot_finalizer(False,False,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
####################################
# Scatter plots
####################################
def scatter(x,y,c=None,xlim=None,ylim=None,clim=None,density=False,xinvert=False,yinvert=False,cbar_invert=False,xlog=False,ylog=False,title=None,
xlabel=None,ylabel=None,clabel=None,label=None,lab_loc=0,ax=None,grid=None,plot_kw={},**kwargs):
"""2D pixel-based image plotting function.
Parameters
----------
x : array-like or list
Position of data points in the x-axis.
y : array-like or list
Position of data points in the y-axis.
c : array-like or list or str, optional
Value of data points in the z-axis (colour-axis).
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
clim : tuple-like, optional
Defines the limits of the colour-axis, it must contain two elements (lower and higer limits).
Functions equivalently to the `vmin, vmax` arguments used by `colors.Normalize`. If both are
given, `clim` takes priority.
density : bool, optional
If True, color-codes points by their spatial density to nearby points using a Gaussian
kernel density estimate. If 'c' also given, 'density' takes precedence. Default: False.
xinvert : bool, optional
If True, inverts the x-axis.
yinvert : bool, optional
If True, inverts the y-axis.
cbar_invert : bool, optional
If True, inverts the direction of the colour bar (not the colour map).
xlog : bool, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool, optional
If True, the scale of the x-axis is logarithmic.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
clabel : str, optional
Setting `clabel` triggers the generation of a colourbar with axis label given by its value.
label : str, optional
Sets the label for the scatter plot.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
plot_kw : dict, optional
Explicit dictionary of kwargs to be parsed to matplotlib scatter function.
Parameters will be overwritten if also given implicitly as a **kwarg.
**kwargs : Collection properties, optional
kwargs are used to specify matplotlib specific properties such as cmap, marker, norm, etc.
A list of available `Collection` properties can be found here:
https://matplotlib.org/api/collections_api.html#matplotlib.collections.Collection
Returns
-------
paths
A list of PathCollection objects representing the plotted data.
"""
from numpy import array, dtype, shape, vstack
from matplotlib.pyplot import scatter, colorbar, legend
from .base_func import axes_handler,dict_splicer,plot_finalizer
from scipy.stats import gaussian_kde
from warnings import warn
# Handle deprecated variables
deprecated = {'plabel':'label'}
for dep in deprecated:
if dep in kwargs:
warn(f"'{dep}' will be deprecated in future verions, using '{deprecated[dep]}' instead")
if (dep=='plabel'): label = kwargs.pop(dep)
if ax is not None:
old_axes=axes_handler(ax)
if type(x) is not list or (len(shape(x))==1 and array(x).dtype is not dtype('O')):
x=[x]
if type(y) is not list or (len(shape(y))==1 and array(y).dtype is not dtype('O')):
y=[y]
L=len(x)
if type(c) is not list or (len(shape(c))==1 and array(c).dtype is not dtype('O')):
c=[c]
if type(c[0]) is str or c[0] is None:
c=[c[0] for i in range(L)]
if type(label) is not list:
label=[label for i in range(L)]
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
# Insert clim as vmin, vmax into **kwargs dictionary, if given.
if (clim != None):
try:
_=(e for e in clim)
if (len(clim) == 2):
plot_par['vmin']=clim[0]
plot_par['vmax']=clim[1]
else:
raise TypeError("`clim` must be of iterable type and have two values only.")
except (TypeError):
raise TypeError("`clim` must be of iterable type and have two values only.")
if (density == True):
if (all([kk is not None for kk in c])):
warn("Cannot specify both `c` and `density`, ignoring `c`.")
c=[None]*L
for i in range(L):
xy=vstack([x[i],y[i]])
c[i]=gaussian_kde(xy)(xy) # Calculate the Gaussian kernel density estimate
# Create 'L' number of plot kwarg dictionaries to parse into each scatter call
plot_par=dict_splicer(plot_par,L,[len(i) for i in x])
paths=[]
for i in range(L):
p=scatter(x[i],y[i],c=c[i],label=label[i],**plot_par[i])
paths.append(p)
if clabel is not None:
cbar=colorbar()
cbar.set_label(clabel)
if cbar_invert:
cbar.ax.invert_yaxis()
if any(label):
legend(loc=lab_loc)
plot_finalizer(xlog,ylog,xlim,ylim,title,xlabel,ylabel,xinvert,yinvert,grid)
if ax is not None:
old_axes=axes_handler(old_axes)
return paths[0] if len(paths) == 1 else paths
####################################
# Sector plots
####################################
def sector(r,theta,rlim=(0.0,1.0),thetalim=(0.0,360.0),clim=None,rotate=0.0,rlabel="",thetalabel="",clabel=None,label=None,rstep=None,
thetastep=15.0,rticks='auto',thetaticks='auto',cbar_invert=False,fig=None,plot_kw={},**kwargs):
""" Sector Plot function
Plots a sector plot (a.k.a "pizza plot") based on data with one radial axis and an angular axis
Parameters
----------
r : array-like or list
Radial axis data.
theta : array-like or list
Angular axis data (degrees).
rlim : tuple-like, optional
The lower and upper limits for the radial axis (degrees).
thetalim : tuple-like, optional
The lower and upper limits for the angular axis (degrees).
clim : tuple-like, optional
Defines the limits of the colour-axis, it must contain two elements (lower and higer limits).
Functions equivalently to the `vmin, vmax` arguments used by `colors.Normalize`. If both are
given, `clim` takes priority.
rotate : float, optional
By how many degrees (clockwise) to rotate the entire plot (valid values in [-180, 180]).
rlabel : str, optional
Sets the label of the r-axis.
thetalabel : str, optional
Sets the label of the theta-axis.
clabel : str, optional
Setting `clabel` triggers the generation of a colourbar with axis label given by its value.
label : str, optional
Sets the label for the scatter plot.
rstep : float, optional
Sets the step size of r ticks.
thetastep : float, optional, default: 15.0
Sets the step size of theta ticks (degrees).
rticks : 'auto', or ticker
* Not implement *
thetaticks : 'auto', or ticker
* Not implement *
cbar_invert : bool, optional
If True, inverts the direction of the colour bar (not the colour map).
fig : pyplot.Figure, optional
Use the given figure to make the plot, defaults to the current figure.
plot_kw : dict, optional
Explicit dictionary of kwargs to be parsed to matplotlib scatter function.
Parameters will be overwritten if also given implicitly in **kwargs.
**kwargs : Collection properties, optional
kwargs are used to specify matplotlib specific properties such as cmap, marker, norm, etc.
A list of available `Collection` properties can be found here:
https://matplotlib.org/3.1.0/api/collections_api.html#matplotlib.collections.Collection
Returns
-------
ax : The pyplot.Axes object created for the sector plot.
"""
from matplotlib.transforms import Affine2D
from matplotlib.projections.polar import PolarAxes
from matplotlib.pyplot import gcf, colorbar, legend
from mpl_toolkits.axisartist import floating_axes
from mpl_toolkits.axisartist.grid_finder import (FixedLocator, MaxNLocator, DictFormatter)
import mpl_toolkits.axisartist.angle_helper as angle_helper
from numpy import array, linspace, arange, shape, sqrt, floor, round, degrees, radians, pi
if (fig == None):
fig=gcf()
# rotate a bit for better orientation
trans_rotate=Affine2D().translate(0.0, 0)
# scale degree to radians
trans_scale=Affine2D().scale(pi/180.0, 1.)
trans=trans_rotate + trans_scale + PolarAxes.PolarTransform()
# Get theta ticks
#if (thetaticks == 'auto'):
thetaticks=arange(*radians(array(thetalim)-rotate),step=radians(thetastep))
theta_gridloc=FixedLocator(thetaticks[thetaticks/(2*pi) < 1])
theta_tickfmtr=DictFormatter(dict(zip(thetaticks,[f"{(round(degrees(tck)+rotate)):g}" for tck in thetaticks])))
#tick_fmtr=DictFormatter(dict(angle_ticks))
#tick_fmtr=angle_helper.Formatter()
if (rstep == None):
rstep=0.5
r_gridloc=FixedLocator(arange(rlim[0],rlim[1],step=rstep))
grid=floating_axes.GridHelperCurveLinear(
PolarAxes.PolarTransform(),
extremes=(*radians(array(thetalim)-rotate), *rlim),
grid_locator1=theta_gridloc,
grid_locator2=r_gridloc,
tick_formatter1=theta_tickfmtr,
tick_formatter2=None,
)
ax=floating_axes.FloatingSubplot(fig, 111, grid_helper=grid)
fig.add_subplot(ax)
# tick references
thetadir_ref=['top','right','bottom','left']
rdir_ref=['bottom','left','top','right']
# adjust axes directions
ax.axis["left"].set_axis_direction('bottom') # Radius axis (displayed)
ax.axis["right"].set_axis_direction('top') # Radius axis (hidden)
ax.axis["top"].set_axis_direction('bottom') # Theta axis (outer)
ax.axis["bottom"].set_axis_direction('top') # Theta axis (inner)
# Top theta axis
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction(thetadir_ref[(int(rotate)//90)%4])
ax.axis["top"].label.set_axis_direction(thetadir_ref[(int(rotate)//90)%4])
# Bottom theta axis
ax.axis["bottom"].set_visible(False if rlim[0] < (rlim[1]-rlim[0])/3 else True)
ax.axis["bottom"].major_ticklabels.set_axis_direction(thetadir_ref[(int(rotate)//90+2)%4])
# Visible radius axis
ax.axis["left"].major_ticklabels.set_axis_direction(rdir_ref[(int(rotate)//90)%4])
ax.axis["left"].label.set_axis_direction(rdir_ref[(int(rotate)//90)%4])
# Labels
ax.axis["left"].label.set_text(rlabel)
ax.axis["top"].label.set_text(thetalabel)
# create a parasite axes whose transData in RA, cz
sector_ax=ax.get_aux_axes(trans)
# This has a side effect that the patch is drawn twice, and possibly over some other
# artists. So, we decrease the zorder a bit to prevent this.
sector_ax.patch=ax.patch
sector_ax.patch.zorder=0.9
L=shape(theta)[0] if len(shape(theta)) > 1 else 1
plot_par=plot_kw.copy()
plot_par.update(kwargs)
# Insert clim as vmin, vmax into **kwargs dictionary, if given.
if (clim != None):
try:
_=(e for e in clim)
if (len(clim) == 2):
plot_par['vmin']=clim[0]
plot_par['vmax']=clim[1]
else:
raise TypeError("`clim` must be of iterable type and have two values only.")
except (TypeError):
raise TypeError("`clim` must be of iterable type and have two values only.")
# Create 'L' number of plot kwarg dictionaries to parse into each plot call
#plot_par=dict_splicer(plot_par,L,[1]*L)
if (L == 1):
sctr=sector_ax.scatter(theta-rotate, r, label=label, **plot_par)
else:
for ii in range(L):
sctr=sector_ax.scatter(theta[ii]-rotate, r[ii], label=label[ii],**plot_par[ii])
if clabel is not None:
cbar=colorbar(sctr)
cbar.set_label(clabel)
if cbar_invert:
cbar.ax.invert_yaxis()
return sector_ax
####################################
# Statistics bands
####################################
def statband(x,y,bin_type=None,bins=None,stat_mid='mean',stat_low='std',stat_high='std',from_mid=None,line=False,xlim=None,ylim=None,
xinvert=False,yinvert=False,xlog=False,ylog=None,title=None,xlabel=None,ylabel=None,
label=None,lab_loc=0,ax=None,grid=None,line_kw={},band_kw={},**kwargs):
"""Statistics line and band plotting function.
Parameters
----------
x : array-like or list
If list it is assumed that each elemement is array-like.
y : array-like or list
If list it is assumed that each elemement is array-like.
bin_type : {'number','width','edges','equal'}, optional
Defines how is understood the value given in bins: 'number' for the desired number of bins, 'width' for the width
of the bins, 'edges' for the edges of bins, and 'equal' for making bins with equal number of elements (or as close
as possible). If not given it is inferred from the data type of bins: 'number' if int, 'width' if float and 'edges'
if ndarray.
bins : int, float, array-like or list, optional
Gives the values for the bins, according to bin_type.
stat_mid : str, int, float or function, optional
Defines how to calculate the midpoint of the statistics band. When passing a string it must be either one of the options
for scipy.stats.binned_statistic(), i.e. 'mean', 'std', 'median', 'count', 'sum', 'min', 'max' or a user-defined function.
If given as an integer or float, the number represents the value for the percentile to calculate in each bin.
A function can be given which takes (only) a 1D array of values and returns a numerical statistic.
stat_low / stat_high : str, int, float or function, optional
Defines how to calculate the lower/upper limits for the statistic band. Can be given as one of the recognised strings above or as
a string combining 'std' with a number, i.e. '[n]std', where [n] is the number of standard deviations away from the line of `stat_mid`.
Can also be given as a number (integer or float) or function as described for stat_mid.
from_mid : boolean, optional
If True, the lower/upper bounds of the band are determined as the separation from the stat_mid line: i.e. stat_mid +/- stat_[low/high],
otherwise, they are set to the values returned by stat_[low/high]. Defaults to True if stat_[low/high] are standard deviations.
line : boolean, optional
If True, draw a line that follows the statistic defined in line_stat.
xlim : tuple-like, optional
Defines the limits of the x-axis, it must contain two elements (lower and higer limits).
ylim : tuple-like, optional
Defines the limits of the y-axis, it must contain two elements (lower and higer limits).
xinvert : bool or list, optional
If True, inverts the x-axis.
yinvert : bool or list, optional
If True, inverts the y-axis.
xlog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
ylog : bool or list, optional
If True, the scale of the x-axis is logarithmic.
title : str, optional
Sets the title of the plot
xlabel : str, optional
Sets the label of the x-axis.
ylabel : str, optional
Sets the label of the y-axis.
label : str, optional
Sets the label for the plot.
lab_loc : int, optional
Defines the position of the legend
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
grid : boolean, optional
If not given defaults to the value defined in splotch.Params.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are Line2D properties.
**kwargs: Line2D properties, optional
kwargs are used to specify matplotlib specific properties such as linecolor, linewidth, antialiasing, etc.
A list of available `Line2D` properties can be found here:
https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
Returns
-------
None
"""
from splotch.base_func import axes_handler,bin_axis,plot_finalizer
import numpy as np
from numbers import Number
import scipy.stats as stats
from numpy import percentile
from functools import partial
import matplotlib.colors as clr
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
from warnings import warn
if ax is not None:
old_axes=axes_handler(ax)
else:
ax=gca()
old_axes=ax
if ylog is None:
from splotch.defaults import Params
ylog=Params.hist1D_yaxis_log
if bins is None:
bins=int((len(x))**0.4)
if 'linewidth' not in band_kw.keys():
band_kw['linewidth']=0
if 'alpha' not in band_kw.keys():
band_kw['alpha']=0.4
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#band_par={**plot_kw, **kwargs} # For Python > 3.5
band_kw.update(kwargs)
# Check stat_low/stat_high arguments
band_stat=np.array([None,None])
band_multi=
|
np.ones(2)
|
numpy.ones
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 09:47:10 2020
@author: grat05
"""
import sys
sys.path.append('../../../')
import pickle
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pandas as pd
import arviz as az
import atrial_model
from atrial_model.iNa.define_sims import exp_parameters
from atrial_model.iNa.model_setup import model_param_names
import atrial_model.run_sims_functions
from atrial_model.run_sims import calc_results
from atrial_model.iNa.define_sims import sim_fs, datas, keys_all
from atrial_model.iNa.model_setup import model_params_initial, mp_locs, sub_mps, model
from multiprocessing import Pool
from functools import partial
import os
plot_trace = True
plot_sim = False
plot_regressions = False
plot_pymc_diag = False
from SaveSAP import savePlots,setAxisSizePlots
sizes = {'logp': (3.5, 3.5), 'model_param_intercept': (3.5, 3.5), 'b_temp': (3.5, 3.5),
'paper_eff Sakakibara et al': (3.5, 3.5), 'paper_eff Cai et al': (3.5,3.5),
'paper_eff Feng et al': (3.5, 3.5), 'paper_eff Schneider et al': (3.5, 3.5),
'paper_eff Lalevée et al': (3.5, 3.5), 'paper_eff Wettwer et al': (3.5, 3.5),
'paper_eff_sd': (3.5, 3.5), 'model_param_sd': (3.5, 3.5),
'model_params_legend': (2, 6), 'error_sd': (3.5, 3.5), 'sim_groups_legend': (2, 6),
'GNaFactor': (3.5, 2), 'baselineFactor': (3.5, 2), 'mss_tauFactor': (3.5, 2),
'mss_shiftFactor': (3.5, 2), 'tm_maxFactor': (3.5, 2), 'tm_tau1Factor': (3.5, 2),
'tm_shiftFactor': (3.5, 2), 'tm_tau2Factor': (3.5, 2), 'hss_tauFactor': (3.5, 2),
'hss_shiftFactor': (3.5, 2), 'thf_maxFactor': (3.5, 2), 'thf_shiftFactor': (3.5, 2),
'thf_tau1Factor': (3.5, 2), 'thf_tau2Factor': (3.5, 2), 'ths_maxFactor': (3.5, 2),
'ths_shiftFactor': (3.5, 2), 'ths_tau1Factor': (3.5, 2), 'ths_tau2Factor': (3.5, 2),
'Ahf_multFactor': (3.5, 2), 'jss_tauFactor': (3.5, 2), 'jss_shiftFactor': (3.5, 2),
'tj_maxFactor': (3.5, 2), 'tj_shiftFactor': (3.5, 2), 'tj_tau2Factor': (3.5, 2),
'tj_tau1Factor': (3.5, 2),
'model_param_corr': (6,6)}
#setAxisSizePlots(sizes)
#savePlots('R:/Hund/DanielGratz/atrial_model/plots/latest/plots/', ftype='svg')
#setAxisSizePlots([(3.5,3.5)]*40)
#setAxisSizePlots((3,3))
atrial_model.run_sims_functions.plot1 = False #sim
atrial_model.run_sims_functions.plot2 = False #diff
atrial_model.run_sims_functions.plot3 = False #tau
burn_till =0#500#2000#500#800#40000#34_000#2500#31_000 #35000
max_loc = 2688
chain = 0#7
#burn_till = 60000
stack = False
if __name__ == '__main__':
class ObjContainer():
pass
#filename = 'mcmc_OHaraRudy_wMark_INa_0824_1344'
#filename = 'mcmc_OHaraRudy_wMark_INa_0919_1832_sc'
filename = 'mcmc_OHaraRudy_wMark_INa_0924_1205'
#filename = 'mcmc_OHaraRudy_wMark_INa_0831_1043_sc'
#filename = 'mcmc_OHaraRudy_wMark_INa_0829_1748'
#filename = 'mcmc_OHaraRudy_wMark_INa_0829_1334'
#filename = 'mcmc_OHaraRudy_wMark_INa_0827_1055'
#filename = 'mcmc_OHaraRudy_wMark_INa_0826_0958'
#filename = 'mcmc_OHaraRudy_wMark_INa_0821_1132'
#filename = 'mcmc_OHaraRudy_wMark_INa_0702_1656'
filename = 'mcmc_OHaraRudy_wMark_INa_1012_1149'
filename = 'mcmc_OHaraRudy_wMark_INa_1202_1906'
filename = 'mcmc_OHaraRudy_wMark_INa_1204_1201'
filename = 'mcmc_OHaraRudy_wMark_INa_1205_1323'
filename = 'mcmc_OHaraRudy_wMark_INa_1213_1353'
filename = 'mcmc_OHaraRudy_wMark_INa_1216_1109'
filename = 'mcmc_OHaraRudy_wMark_INa_1222_1754'
filename = 'mcmc_OHaraRudy_wMark_INa_0109_1802'
# filename = 'mcmc_OHaraRudy_wMark_INa_0121_1201'
# filename = 'mcmc_OHaraRudy_wMark_INa_0121_1450'
filename = 'mcmc_OHaraRudy_wMark_INa_0121_1531'
# filename = 'mcmc_OHaraRudy_wMark_INa_0122_1447'
filename = 'mcmc_OHaraRudy_wMark_INa_0122_1607'
filename = 'mcmc_OHaraRudy_wMark_INa_0125_1328'
filename = 'mcmc_OHaraRudy_wMark_INa_0125_1346'
filename = 'mcmc_OHaraRudy_wMark_INa_0127_1333'
filename = 'mcmc_OHaraRudy_wMark_INa_0127_1525'
filename = 'mcmc_OHaraRudy_wMark_INa_0128_1623'
# filename = 'mcmc_OHaraRudy_wMark_INa_0129_1549'
filename = 'mcmc_OHaraRudy_wMark_INa_0129_1601'
filename = 'mcmc_OHaraRudy_wMark_INa_0215_0722'
# filename = 'mcmc_OHaraRudy_wMark_INa_0319_1706'
#filename = 'mcmc_OHaraRudy_wMark_INa_0322_1334'
# filename = 'mcmc_OHaraRudy_wMark_INa_0322_1603'
# # filename = 'mcmc_OHaraRudy_wMark_INa_0323_0955'
# filename = 'mcmc_OHaraRudy_wMark_INa_0323_1628'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0324_1010'
# filename = 'mcmc_OHaraRudy_wMark_INa_0324_1609'
# # filename = 'test'
# filename = 'mcmc_OHaraRudy_wMark_INa_0325_1044'
# filename = 'mcmc_OHaraRudy_wMark_INa_0325_1300'
# filename = 'mcmc_OHaraRudy_wMark_INa_0325_1518'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0325_2128'
# filename = 'mcmc_OHaraRudy_wMark_INa_0326_1753'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0326_1721'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0326_2028'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0326_2030'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0329_0817'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0326_2030'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0329_1005'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0329_1730'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0330_0906'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0330_1020'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0330_1130'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0330_1212'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0330_1428'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0331_0817'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0331_1057'
# # filename = 'mcmc_OHaraRudy_wMark_INa_0402_1513'
# #filename = 'mcmc_OHaraRudy_wMark_INa_0407_1328'
# filename = 'mcmc_OHaraRudy_wMark_INa_0408_1723'
#filename = 'mcmc_OHaraRudy_wMark_INa_0215_0722'
# filename = 'mcmc_OHaraRudy_wMark_INa_0106_1257'
# filename = 'mcmc_OHaraRudy_wMark_INa_0106_1547'
# filename = 'mcmc_OHaraRudy_wMark_INa_0107_1145'
# filename = 'mcmc_OHaraRudy_wMark_INa_0108_0941'
# filename = 'mcmc_OHaraRudy_wMark_INa_0108_1108'
# filename = 'mcmc_OHaraRudy_wMark_INa_1223_1730'
# filename = 'mcmc_OHaraRudy_wMark_INa_1228_1411'
# filename = 'mcmc_OHaraRudy_wMark_INa_1230_1217'
# filename = 'mcmc_OHaraRudy_wMark_INa_0101_1124'
# filename = 'mcmc_OHaraRudy_wMark_INa_0104_1052'
# filename = 'mcmc_OHaraRudy_wMark_INa_0105_1517'
#filename = 'mcmc_OHaraRudy_wMark_INa_1229_1140'
# filename = 'mcmc_OHaraRudy_wMark_INa_1226_1624'
#filename = 'mcmc_OHaraRudy_wMark_INa_0627_1152'
#filename = 'mcmc_OHaraRudy_wMark_INa_0626_0808'
#filename = 'mcmc_OHaraRudy_wMark_INa_0606_0047'
#filename = 'mcmc_Koval_0601_1835'
# filename = 'mcmc_OHaraRudy_wMark_INa_0603_1051'
#filename = 'mcmc_OHara_0528_1805'
# filename = 'mcmc_OHaraRudy_wMark_INa_0528_1833'
#filename = 'mcmc_Koval_0526_1728'
#filename = 'mcmc_Koval_0519_1830'
base_dir = atrial_model.fit_data_dir+'/'
with open(base_dir+'/'+filename+'.pickle','rb') as file:
db_full = pickle.load(file)
db = db_full['trace']
db_post = db.warmup_posterior#posterior#
# with open(base_dir+'/'+filename+'_metadata.pickle','rb') as file:
# model_metadata = pickle.load(file)
# with open(base_dir+model_metadata.trace_pickel_file,'rb') as file:
# db = pickle.load(file)
# if db['_state_']['sampler']['status'] == 'paused':
# current_iter = db['_state_']['sampler']['_current_iter']
# current_iter -= db['_state_']['sampler']['_burn']
# for key in db.keys():
# if key != '_state_':
# db[key][chain] = db[key][chain][:current_iter]
# if stack:
# for key in db.keys():
# if key != '_state_' and key != 'AdaptiveSDMetropolis_model_param_adaptive_scale_factor'\
# and key != 'biophys_res':
# stacked = [db[key][chain] for chain in db[key]]
# db[key] = [np.concatenate(stacked)]
key_frame = db_full['key_frame']
sim_groups = key_frame['Sim Group']
group_names = key_frame['Sim Group'].unique()
sim_names = key_frame.index
pmid2idx = {}
curr_idx = 0
for key in key_frame.index:
pubmed_id = int(key[0].split('_')[0])
if not pubmed_id in pmid2idx:
pmid2idx[pubmed_id] = curr_idx
curr_idx += 1
# group_names = []
# sim_groups = []
# sim_names = []
# for key_group in db_full['keys_all']:
# group_names.append(exp_parameters.loc[key_group[0], 'Sim Group'])
# for key in key_group:
# sim_names.append(key)
# sim_groups.append(group_names[-1])
# bounds = np.array(db_full['param_bounds'])[db_full['mp_locs'], :]
model_param_index = np.arange(start=0,stop=len(mp_locs),step=1,dtype=int)
model_param_index = np.tile(model_param_index, (len(key_frame),1))
paper_idx = {}
curr_idx = 0
sim_idx = []
for key in key_frame.index:
pubmed_id = int(key[0].split('_')[0])
if not pubmed_id in paper_idx:
paper_idx[pubmed_id] = curr_idx
curr_idx += 1
sim_idx.append(paper_idx[pubmed_id])
sim_paper_idx = np.array(sim_idx)
model_param_intercept = db_post['model_param_intercept'][0]
b_temp = db_post['b_temp'][0]
temperature_arr = np.array(key_frame['temp ( K )'], dtype=float) -290.15
paper_eff = db_post['paper_eff'][0]
mean = np.array(model_param_intercept)[:,model_param_index] +\
np.array(b_temp)[:,model_param_index]*temperature_arr[...,None] +\
np.array(paper_eff)[:,sim_paper_idx,:]
model_param_sd = db_post['model_param_sd'][0]
from SaveSAP import paultcolors
c_scheme = 'muted'
legend_labels = {
'8928874': 'Feng et al',
'21647304': 'Cai et al',
'12890054': 'Lalevée et al',
'23341576': 'Wettwer et al',
'1323431': 'Sakakibara et al',
'7971163': 'Schneider et al'
}
if plot_trace:
c_by_f_type = [0,1,2,3,4,5,6,5,2,3,4,6,5,5,4,6,5,5,6,2,3,4,6,5,5]
c_by_s_type = [0,1,2,2,3,3,3,3,4,4,5,5,5,5,6,6,6,6,9,7,7,8,8,8,8]
stroke_by_s_type = ['-',
'-',
'-', '--',
'-', '--', '-.', ':',
'-', '--',
'-', '--', '-.', ':',
'-', '--', '-.', ':',
'-',
'-', '--',
'-', '--', '-.', ':']
no_stroke = ['-']*len(stroke_by_s_type)
c_by_mp = [paultcolors[c_scheme][i] for i in c_by_s_type]
strokes = no_stroke#stroke_by_s_type
trace = 'logp'
trace_data = db.sample_stats['lp'][-1]
fig = plt.figure(trace)
ax = [fig.add_subplot(1,1,1)]
ax[0].plot(trace_data[burn_till:], c=c_by_mp[0])
# trace = 'deviance'
# trace_data = db[trace][chain]
# if stack:
# trace_data_all = trace_data
# else:
# trace_data_all = [db[trace][ch] for ch in db[trace].keys()]
# trace_data_all = np.concatenate(trace_data_all)
# fig = plt.figure(trace)
# ax = [fig.add_subplot(1,1,1)]
# # ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
# ax[0].plot(trace_data_all, c=c_by_mp[0])
# # ax[1].hist(trace_data[burn_till:], orientation='horizontal', color=c_by_mp[0])
# fig = plt.figure(trace+'_zoomed')
# ax = [fig.add_subplot(1,1,1)]
# # ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
# ax[0].plot(trace_data[burn_till:], c=c_by_mp[0])
trace = 'model_param_intercept'
trace_data = db_post[trace][chain]
fig = plt.figure(trace)
ax = [fig.add_subplot(1,2,1)]
ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
ax[1].yaxis.set_visible(False)
fig.subplots_adjust(hspace=0.1, wspace=0.05)
sd = np.array(np.std(trace_data[burn_till:,:], axis=0))
sorted_params = np.arange(len(sd))[np.argsort(-sd)]
for i in sorted_params:
ax[0].plot(trace_data[burn_till:,i], label=model_param_names[i],
c=c_by_mp[i], linestyle=strokes[i])
# _,_,hist = ax[1].hist(trace_data[:,i], orientation='horizontal', label=model_param_names[i])
density = stats.gaussian_kde(trace_data[burn_till:,i])
lower = np.min(trace_data[burn_till:,i])
upper = np.max(trace_data[burn_till:,i])
values = np.linspace(lower, upper, num=100)
ax[1].plot(density(values), values, label=model_param_names[i], c=c_by_mp[i])
ax[1].set_xlim(right=20)
# color = hist[0].get_facecolor()
# ax[0].axhline(bounds[i, 0]+i/100, c=color, label=model_param_names[i]+'_lower')
# ax[0].axhline(bounds[i, 1]+i/100, c=color, label=model_param_names[i]+'_upper')
handles, labels = ax[0].get_legend_handles_labels()
# ax[1].legend(handles, labels, frameon=False)
trace = 'b_temp'
trace_data = db_post[trace][chain]
fig = plt.figure(trace)
ax = [fig.add_subplot(1,2,1)]
ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
ax[1].yaxis.set_visible(False)
fig.subplots_adjust(hspace=0.1, wspace=0.05)
sd = np.array(np.std(trace_data[burn_till:,:], axis=0))
sorted_params = np.arange(len(sd))[np.argsort(-sd)]
for i in sorted_params:
ax[0].plot(trace_data[burn_till:,i], label=model_param_names[i],
c=c_by_mp[i], linestyle=strokes[i])
density = stats.gaussian_kde(trace_data[burn_till:,i])
lower = np.min(trace_data[burn_till:,i])
upper = np.max(trace_data[burn_till:,i])
values = np.linspace(lower, upper, num=100)
ax[1].plot(density(values), values, label=model_param_names[i], c=c_by_mp[i])
ax[1].set_xlim(right=60)
#ax[1].hist(trace_data[:,i], orientation='horizontal', label=model_param_names[i])
handles, labels = ax[0].get_legend_handles_labels()
# ax[1].legend(handles, labels, frameon=False)
trace = 'paper_eff'
trace_data = db_post[trace][chain]
for pmid, paper_idx in pmid2idx.items():
fig = plt.figure(trace+' '+legend_labels[str(pmid)])
ax = [fig.add_subplot(1,2,1)]
ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
ax[1].yaxis.set_visible(False)
fig.subplots_adjust(hspace=0.1, wspace=0.05)
sd = np.array(np.std(trace_data[burn_till:,paper_idx,:], axis=0))
sorted_params = np.arange(len(sd))[np.argsort(-sd)]
for i in sorted_params:
ax[0].plot(trace_data[burn_till:,paper_idx,i], label=model_param_names[i],
c=c_by_mp[i], linestyle=strokes[i])
# _,_,hist = ax[1].hist(trace_data[:,i], orientation='horizontal', label=model_param_names[i])
density = stats.gaussian_kde(trace_data[burn_till:,paper_idx,i])
lower = np.min(trace_data[burn_till:,paper_idx,i])
upper = np.max(trace_data[burn_till:,paper_idx,i])
values = np.linspace(lower, upper, num=100)
ax[1].plot(density(values), values, label=model_param_names[i], c=c_by_mp[i])
ax[1].set_xlim(right=10)
# color = hist[0].get_facecolor()
# ax[0].axhline(bounds[i, 0]+i/100, c=color, label=model_param_names[i]+'_lower')
# ax[0].axhline(bounds[i, 1]+i/100, c=color, label=model_param_names[i]+'_upper')
handles, labels = ax[0].get_legend_handles_labels()
# ax[1].legend(handles, labels, frameon=False)
trace = 'paper_eff_sd'
trace_data = db_post[trace][chain]
fig = plt.figure(trace)
ax = [fig.add_subplot(1,2,1)]
ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
ax[1].yaxis.set_visible(False)
fig.subplots_adjust(hspace=0.1, wspace=0.05)
sd = np.array(np.std(trace_data[burn_till:,:], axis=0))
sorted_params = np.arange(len(sd))[np.argsort(-sd)]
for i in sorted_params:
ax[0].plot(trace_data[burn_till:,i], label=model_param_names[i],
c=c_by_mp[i], linestyle=strokes[i])
# _,_,hist = ax[1].hist(trace_data[:,i], orientation='horizontal', label=model_param_names[i])
density = stats.gaussian_kde(trace_data[burn_till:,i])
lower = np.min(trace_data[burn_till:,i])
upper = np.max(trace_data[burn_till:,i])
values = np.linspace(lower, upper, num=100)
ax[1].plot(density(values), values, label=model_param_names[i], c=c_by_mp[i])
ax[1].set_xlim(right=60)
# color = hist[0].get_facecolor()
# ax[0].axhline(bounds[i, 0]+i/100, c=color, label=model_param_names[i]+'_lower')
# ax[0].axhline(bounds[i, 1]+i/100, c=color, label=model_param_names[i]+'_upper')
handles, labels = ax[0].get_legend_handles_labels()
trace = 'model_param_sd'
trace_data = db_post[trace][chain]
fig = plt.figure(trace)
ax = [fig.add_subplot(1,2,1)]
ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
ax[1].yaxis.set_visible(False)
fig.subplots_adjust(hspace=0.1, wspace=0.05)
sd = np.array(np.std(trace_data[burn_till:,:], axis=0))
sorted_params = np.arange(len(sd))[np.argsort(-sd)]
for i in sorted_params:
ax[0].plot(trace_data[burn_till:,i], label=model_param_names[i],
c=c_by_mp[i], linestyle=strokes[i])
density = stats.gaussian_kde(trace_data[burn_till:,i])
lower = np.min(trace_data[burn_till:,i])
upper = np.max(trace_data[burn_till:,i])
values = np.linspace(lower, upper, num=100)
ax[1].plot(density(values), values, label=model_param_names[i], c=c_by_mp[i])
ax[1].set_xlim(right=90)
# ax[1].hist(trace_sigma, orientation='horizontal', label=model_param_names[i])
handles, labels = ax[0].get_legend_handles_labels()
# ax[1].legend(handles, labels, frameon=False)
fig = plt.figure('model_params_legend')
order = [labels.index(mp_name) for mp_name in model_param_names]
handles = [handles[i] for i in order]
ax = fig.add_subplot(1,1,1)
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.legend(handles, model_param_names, frameon=False)
trace = 'error_sd'
trace_data = db_post[trace][chain]
fig = plt.figure(trace)
ax = [fig.add_subplot(2,2,1)]
ax.append(fig.add_subplot(2,2,2, sharey=ax[0]))
ax.append(fig.add_subplot(2,2,3, sharex=ax[0]))
ax.append(fig.add_subplot(2,2,4, sharex=ax[1], sharey=ax[2]))
ax[0].xaxis.set_visible(False)
ax[0].spines['bottom'].set_visible(False)
ax[1].xaxis.set_visible(False)
ax[1].xaxis.set_visible(False)
ax[1].spines['bottom'].set_visible(False)
ax[1].yaxis.set_visible(False)
ax[3].yaxis.set_visible(False)
fig.subplots_adjust(hspace=0.1, wspace=0.05)
for i in range(trace_data.shape[1]):
if group_names[i] == 'iv curve':
ax0 = 0
ax1 = 1
else:
ax0 = 2
ax1 = 3
color = paultcolors[c_scheme][i]
ax[ax0].plot(trace_data[burn_till:,i], label=group_names[i], c=color)
density = stats.gaussian_kde(trace_data[burn_till:,i])
lower = np.min(trace_data[burn_till:,i])
upper = np.max(trace_data[burn_till:,i])
values = np.linspace(lower, upper, num=100)
ax[ax1].plot(density(values), values, label=group_names[i], c=color)
#ax[1].hist(trace_sigma, orientation='horizontal', label=group_names[i])
handles, labels = ax[0].get_legend_handles_labels()
handles1, labels1 = ax[2].get_legend_handles_labels()
handles += handles1
labels += labels1
fig = plt.figure('sim_groups_legend')
ax = fig.add_subplot(1,1,1)
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.legend(handles, labels, frameon=False)
#ax[1].legend(handles, labels, frameon=False)
trace = 'model_param'
trace_data = db_post[trace][chain]
for param_i in range(trace_data.shape[2]):
fig = plt.figure(model_param_names[param_i])
ax = [fig.add_subplot()]
# ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
sd = np.std(trace_data[burn_till:,:,param_i], axis=0)
sorted_sims = np.arange(len(sd))[np.argsort(-sd)]
for sim_i in sorted_sims:
label = sim_groups[sim_i]
group_i = list(group_names).index(label)
ax[0].plot(trace_data[burn_till:,sim_i,param_i], c=paultcolors[c_scheme][group_i], label=label)#, label=model_param_names[sim_i])
# ax[1].hist(1/np.sqrt(trace_data[:,i]), orientation='horizontal', label=group_names[i])
handles, labels = ax[0].get_legend_handles_labels()
by_label = dict(zip(labels, handles))
#ax[0].legend(by_label.values(), by_label.keys(), frameon=False)
trace = 'model_param_corr'
trace_data = db_post[trace][chain]
fig = plt.figure(trace)
ax = np.empty((len(mp_locs),len(mp_locs)), dtype=object)
avgtrace = np.mean(trace_data,axis=0)
for i in range(len(mp_locs)):
for j in range(len(mp_locs)):
sharex = ax[i-1,j] if i-1 > 0 else None
sharey = ax[i,j-1] if j-1 > 0 else None
ax[i,j] = fig.add_subplot(*ax.shape,
i*ax.shape[0]+j+1,
#sharex=sharex,
sharey=sharey)
ax[i,j].xaxis.set_visible(False)
ax[i,j].spines['bottom'].set_visible(False)
ax[i,j].yaxis.set_visible(False)
ax[i,j].spines['left'].set_visible(False)
ax[i,j].set_ylim(top=1, bottom=-1)
if i <= j:
ax[i,j].imshow([[avgtrace[i,j]]], vmin=-1, vmax=1, cmap='bwr')
#ax[i,j].plot(trace_data[burn_till:, i,j])
else:
density = stats.gaussian_kde(trace_data[burn_till:,i,j])
lower = np.min(trace_data[burn_till:,i,j])
upper = np.max(trace_data[burn_till:,i,j])
values = np.linspace(lower, upper, num=100)
ax[i,j].plot(density(values), values)
ax[i,j].axhline(c='black', alpha=0.5)
#for i in range(len(mp_locs)):
# ax[i,0].yaxis.set_visible(True)
# ax[i,0].spines['left'].set_visible(True)
#for j in range(len(mp_locs)):
# ax[-1,j].xaxis.set_visible(True)
# ax[-1,j].spines['bottom'].set_visible(True)
# trace = 'biophys_res'
# trace_data = db[trace][0]
# fig = plt.figure(trace)
# ax = [fig.add_subplot(1,2,1)]
# ax[0].plot(trace_data)
# ax.append(fig.add_subplot(1,2,2, sharey=ax[0]))
# for i in range(trace_data.shape[1]):
# ax[1].hist(trace_data[:,i], orientation='horizontal')
if plot_sim:
try:
with open(base_dir+'/plot_mcmc_cache.pickle','rb') as file:
cache = pickle.load(file)
except FileNotFoundError:
cache = {'overall':{
'model_param': {},
'results': {}},
'individual':{
'model_param': {},
'results': {}}
}
# b_temp = np.zeros_like(mp_locs, dtype=float)
# b_temp[[1,4,10,14,21]] = -0.7/10
# b_temp[[3,6,9,11,15,20,22]] = 0.4
# b_temp[[3]] = -0.4
b_temp = db_post['b_temp'][chain][max_loc]
# for i in range(db['b_temp'][chain].shape[1]):
# trace = db['b_temp'][chain][burn_till:, i]
# f_sig = np.sum(trace > 0)/len(trace)
# if not (f_sig < 0.05 or f_sig > 0.95):
# b_temp[i] = 0
# b_temp[[2]] = 0
# b_temp[[10]] = -0.2
# b_temp[0] = 0.2
intercept = db_post['model_param_intercept'][chain][max_loc]
num_sims = len(key_frame)#sum(map(len, model_metadata.keys_all))
model_params = {}
fit_keys = key_frame.index #[key for keys in model_metadata.keys_all for key in keys]
defined_keys = [key for keys in keys_all for key in keys]
good_keys = [key for key in fit_keys if key in defined_keys]
sim_fs_good = {key: sim_fs[key] for key in good_keys}
for key in good_keys:
temperature = exp_parameters.loc[key, 'temp ( K )'] -290.15
b_temp_eff = b_temp * temperature
sub_mps = np.array(intercept) + np.array(b_temp_eff)
#sub_mps[18] = 1.7
model_params[key] = sub_mps
model_params = {key: mp for key, mp in model_params.items() if key in good_keys}
model_param_mean = db_post['model_param'][chain][max_loc]
# model_param_mean[:,2] = 0
model_param_sim_mean = {key: model_param_mean[k]
for k, key in enumerate(fit_keys)
if key in good_keys}
use_cache = False
for key, mp in model_params.items():
if key in cache['overall']['model_param']:
if not np.array_equal(cache['overall']['model_param'][key], mp):
use_cache = False
break
else:
use_cache = False
break
if use_cache:
for key, mp in model_param_sim_mean.items():
if key in cache['individual']['model_param']:
if not np.array_equal(cache['individual']['model_param'][key], mp):
use_cache = False
break
else:
use_cache = False
break
if use_cache:
res_overall = cache['overall']['results']
res_indiv = cache['individual']['results']
else:
with Pool() as proc_pool:
# proc_pool = None
res_overall = calc_results(model_params, sim_funcs=sim_fs_good,\
model_parameters_full=model_params_initial,\
mp_locs=mp_locs, data=datas,error_fill=0,\
pool=proc_pool)
res_indiv = calc_results(model_param_sim_mean, sim_funcs=sim_fs_good,\
model_parameters_full=model_params_initial,\
mp_locs=mp_locs, data=datas,error_fill=0,\
pool=proc_pool)
cache['overall'] = {'model_param': model_params,
'results': res_overall}
cache['overall'] = {'model_param': model_param_sim_mean,
'results': res_indiv}
with open(base_dir+'/plot_mcmc_cache.pickle','wb') as file:
pickle.dump(cache, file)
text_leg_lab = True
handles, labels = [], []
for key in good_keys:
figname = exp_parameters.loc[key, 'Sim Group']
figname = figname if not pd.isna(figname) else 'Missing Label'
if text_leg_lab:
paper_key = key[0].split('_')[0]
color = list(legend_labels.keys()).index(paper_key)
color = paultcolors[c_scheme][color]
label = legend_labels[paper_key]
else:
label = key
color = None
if figname == 'iv curve':
plot_data = datas[key].copy()
plot_data[:,1] = np.sign(plot_data[:,1])*np.square(plot_data[:,1])
sim_overall = np.sign(res_overall[key])*np.square(res_overall[key])
sim_individ = np.sign(res_indiv[key])*np.square(res_indiv[key])
else:
plot_data = datas[key]
sim_overall = res_overall[key]
sim_individ = res_indiv[key]
fig = plt.figure(figname + " overall fit")
ax = fig.get_axes()
if len(ax) == 0:
fig.add_subplot()
ax = fig.get_axes()
ax = ax[0]
ax.plot(plot_data[:,0], sim_overall, label=label, color=color)
ax.scatter(plot_data[:,0], plot_data[:,1], color=color)
if text_leg_lab:
handles_labels = ax.get_legend_handles_labels()
handles += handles_labels[0]
labels += handles_labels[1]
else:
ax.legend(frameon=False)
fig = plt.figure(figname + " individual fit")
ax = fig.get_axes()
if len(ax) == 0:
fig.add_subplot()
ax = fig.get_axes()
ax = ax[0]
ax.plot(plot_data[:,0], sim_individ, label=label, color=color)
ax.scatter(plot_data[:,0], plot_data[:,1], color=color)
if text_leg_lab:
idx = [labels.index(name) for name in legend_labels.values()]
handles = [handles[i] for i in idx]
labels = [labels[i] for i in idx]
fig = plt.figure('sim_fits_legend')
ax = fig.add_subplot(1,1,1)
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.legend(handles, labels, frameon=False)
if plot_regressions:
group_is = np.array([group_names.index(sim_grp) for sim_grp in sim_groups])
temperatures = [exp_parameters.loc[key, 'temp ( K )'] -290.15 for key in sim_names]
intercept = np.median(db['model_param_mean'][chain][burn_till:], axis=0)
b_temp =
|
np.median(db['b_temp'][chain][burn_till:], axis=0)
|
numpy.median
|
import os,re,time,glob
import numpy as np
import pickle as pickle
import matplotlib.pylab as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import scipy
from scipy.signal import fftconvolve
from scipy.ndimage.filters import maximum_filter,minimum_filter,median_filter,gaussian_filter
from scipy import ndimage, stats
from skimage import morphology, restoration, measure
from skimage.segmentation import random_walker
from scipy.ndimage import gaussian_laplace
import cv2
import multiprocessing as mp
from sklearn.decomposition import PCA
from scipy.ndimage.interpolation import map_coordinates
from . import get_img_info, corrections, alignment_tools
from .External import Fitting_v3
from . import _correction_folder,_temp_folder,_distance_zxy,_sigma_zxy,_image_size, _allowed_colors
# generate common colors
# generate my colors
from matplotlib.colors import ListedColormap
# red
Red_colors = np.ones([256,4])
Red_colors[:,1] = np.linspace(1,0,256)
Red_colors[:,2] = np.linspace(1,0,256)
myReds = ListedColormap(Red_colors)
# blue
Blue_colors = np.ones([256,4])
Blue_colors[:,0] = np.linspace(1,0,256)
Blue_colors[:,1] = np.linspace(1,0,256)
myBlues = ListedColormap(Blue_colors)
# green
Green_colors = np.ones([256,4])
Green_colors[:,0] = np.linspace(1,0,256)
Green_colors[:,2] = np.linspace(1,0,256)
myGreens = ListedColormap(Green_colors)
_myCmaps = [myReds, myBlues, myGreens]
def partition_map(list_,map_, enumerate_all=False):
"""
Inputs
takes a list [e1,e2,e3,e4,e5,e6] and a map (a list of indices [0,0,1,0,1,2]). map can be a list of symbols too. ['aa','aa','bb','aa','bb','cc']
Output
returns a sorted list of lists, e.g. [[e1, e2,e4],[e3,e5],[e6]]
"""
list__=np.array(list_)
map__=np.array(map_)
if enumerate_all:
return [list(list__[map__==_i]) for _i in np.arange(0, np.max(map__)+1)]
else:
return [list(list__[map__==element]) for element in np.unique(map__)]
def old_gauss_ker(sig_xyz=[2,2,2],sxyz=16,xyz_disp=[0,0,0]):
'''Create a gaussian kernal, return standard gaussian level within sxyz size and sigma 2,2,2'''
dim = len(xyz_disp)
xyz=np.indices([sxyz+1]*dim)
print(sxyz)
for i in range(len(xyz.shape)-1):
sig_xyz=np.expand_dims(sig_xyz,axis=-1)
xyz_disp=np.expand_dims(xyz_disp,axis=-1)
im_ker = np.exp(-np.sum(((xyz-xyz_disp-sxyz/2.)/sig_xyz**2)**2,axis=0)/2.)
return im_ker
def gauss_ker(sig_xyz=[2,2,2],sxyz=16,xyz_disp=[0,0,0]):
"""Faster version of gaussian kernel"""
dim = len(xyz_disp)
xyz=np.swapaxes(np.indices([sxyz+1]*dim), 0,dim)
return np.exp(-np.sum(((xyz-np.array(xyz_disp)-sxyz/2.)/np.array(sig_xyz)**2)**2,axis=dim)/2.)
def gaussian_kernel_2d(center_xy, sigma_xy=[2,2], radius=8):
"""Function to generate gaussian kernel in 2d space"""
## check inputs
if len(center_xy) != 2:
raise IndexError(f"center_xy should be length=2 list or array")
if len(sigma_xy) != 2:
raise IndexError(f"sigma_xy should be length=2 list or array")
radius = int(radius)
if radius < 3 * max(sigma_xy): # if radius is smaller than 3-sigma, expand
radius = 3*max(sigma_xy)
xy_coords=np.swapaxes(np.indices([radius*2+1]*2), 0, 2)
return np.exp(-np.sum(((xy_coords-np.array(center_xy)-radius)/np.array(sigma_xy)**2)**2,axis=2)/2.)
def add_source(im_,pos=[0,0,0],h=200,sig=[2,2,2],size_fold=10):
'''Impose a guassian distribution with given position, height and sigma, onto an existing figure'''
im=np.array(im_,dtype=float)
pos = np.array(pos)
pos_int = np.array(pos,dtype=int)
xyz_disp = -pos_int+pos
im_ker = gauss_ker(sig, int(max(sig)*size_fold), xyz_disp)
im_ker_sz = np.array(im_ker.shape,dtype=int)
pos_min = np.array(pos_int-im_ker_sz/2, dtype=np.int)
pos_max = np.array(pos_min+im_ker_sz, dtype=np.int)
im_shape = np.array(im.shape)
def in_im(pos__):
pos_=np.array(pos__,dtype=np.int)
pos_[pos_>=im_shape]=im_shape[pos_>=im_shape]-1
pos_[pos_<0]=0
return pos_
pos_min_ = in_im(pos_min)
pos_max_ = in_im(pos_max)
pos_min_ker = pos_min_-pos_min
pos_max_ker = im_ker_sz+pos_max_-pos_max
slices_ker = tuple(slice(pm,pM) for pm,pM in zip(pos_min_ker,pos_max_ker))
slices_im = tuple(slice(pm,pM) for pm,pM in zip(pos_min_,pos_max_))
im[slices_im] += im_ker[slices_ker]*h
return im
def subtract_source(im,pfit):
return add_source(im,pos=pfit[1:4],h=-pfit[0],sig=pfit[-3:])
def plus_source(im,pfit):
return add_source(im,pos=pfit[1:4],h=pfit[0],sig=pfit[-3:])
def sphere(center,radius,imshape=None):
"""Returns an int array (size: n x len(center)) with the xyz... coords of a sphere(elipsoid) of radius in imshape"""
radius_=np.array(radius,dtype=float)
if len(radius_.shape)==0:
radius_ = np.array([radius]*len(center),dtype=np.int)
xyz = np.array(np.indices(2*radius_+1),dtype=float)
radius__=np.array(radius_,dtype=float)
for i in range(len(xyz.shape)-1):
radius__=np.expand_dims(radius__,axis=-1)
xyz_keep = np.array(np.where(np.sum((xyz/radius__-1)**2,axis=0)<1))
xyz_keep = xyz_keep-np.expand_dims(np.array(radius_,dtype=int),axis=-1)+np.expand_dims(np.array(center,dtype=int),axis=-1)
xyz_keep = xyz_keep.T
if imshape is not None:
xyz_keep=xyz_keep[np.all((xyz_keep>=0)&(xyz_keep<np.expand_dims(imshape,axis=0)),axis=-1)]
return xyz_keep
def grab_block(im,center,block_sizes):
dims = im.shape
slices = []
def in_dim(c,dim):
c_ = c
if c_<0: c_=0
if c_>dim: c_=dim
return c_
for c,block,dim in zip(center,block_sizes,dims):
block_ = int(block/2)
c=int(c)
c_min,c_max = in_dim(c-block_,dim),in_dim(c+block-block_,dim)
slices.append(slice(c_min,c_max))
slices.append(Ellipsis)
return im[slices]
# fit single gaussian
def fitsinglegaussian_fixed_width(data,center,radius=10,n_approx=10,width_zxy=_sigma_zxy):
"""Returns (height, x, y,z, width_x, width_y,width_z,bk)
the gaussian parameters of a 2D distribution found by a fit"""
data_=np.array(data,dtype=float)
dims = np.array(data_.shape)
if center is not None:
center_z,center_x,center_y = center
else:
xyz = np.array(list(map(np.ravel,np.indices(data_.shape))))
data__=data_[xyz[0],xyz[1],xyz[2]]
args_high = np.argsort(data__)[-n_approx:]
center_z,center_x,center_y = np.median(xyz[:,args_high],axis=-1)
xyz = sphere([center_z,center_x,center_y],radius,imshape=dims).T
if len(xyz[0])>0:
data__=data_[xyz[0],xyz[1],xyz[2]]
sorted_data = np.sort(data__)#np.sort(np.ravel(data__))
bk = np.median(sorted_data[:n_approx])
height = (np.median(sorted_data[-n_approx:])-bk)
width_z,width_x,width_y = np.array(width_zxy)
params_ = (height,center_z,center_x,center_y,bk)
def gaussian(height,center_z, center_x, center_y,
bk=0,
width_z=width_zxy[0],
width_x=width_zxy[1],
width_y=width_zxy[2]):
"""Returns a gaussian function with the given parameters"""
width_x_ = np.abs(width_x)
width_y_ = np.abs(width_y)
width_z_ = np.abs(width_z)
height_ = np.abs(height)
bk_ = np.abs(bk)
def gauss(z,x,y):
g = bk_+height_*np.exp(
-(((center_z-z)/width_z_)**2+((center_x-x)/width_x_)**2+
((center_y-y)/width_y_)**2)/2.)
return g
return gauss
def errorfunction(p):
f=gaussian(*p)(*xyz)
g=data__
#err=np.ravel(f-g-g*np.log(f/g))
err=np.ravel(f-g)
return err
p, success = scipy.optimize.leastsq(errorfunction, params_)
p=np.abs(p)
p = np.concatenate([p,width_zxy])
#p[:1:4]+=0.5
return p,success
else:
return None,None
def fit_seed_points_base(im, centers, width_z=_sigma_zxy[0], width_xy=_sigma_zxy[1],
radius_fit=5, n_max_iter = 10, max_dist_th=0.25):
'''Basic function used for multiple gaussian fitting, given image:im, seeding_result:centers '''
print("Fitting:" +str(len(centers[0]))+" points")
z,x,y = centers # fitting kernels provided by previous seeding
if len(x)>0:
#estimate height
#gfilt_size=0.75
#filt_size=3
#im_plt = gaussian_filter(im,gfilt_size)
#max_filt = maximum_filter(im_plt,filt_size)
#min_filt = minimum_filter(im_plt,filt_size)
#h = max_filt[z,x,y]-min_filt[z,x,y]
#inds = np.argsort(h)[::-1]
#z,x,y = z[inds],x[inds],y[inds]
zxy = np.array([z,x,y],dtype=int).T
ps = []
im_subtr = np.array(im,dtype=float)
for center in zxy:
p,success = fitsinglegaussian_fixed_width(im_subtr,center,radius=radius_fit,n_approx=10,width_zxy=[width_z,width_xy,width_xy])
if p is not None: # If got any successful fitting, substract fitted profile
ps.append(p)
im_subtr = subtract_source(im_subtr,p)
im_add = np.array(im_subtr)
max_dist=np.inf
n_iter = 0
while max_dist > max_dist_th:
ps_1=np.array(ps)
ps_1=ps_1[np.argsort(ps_1[:,0])[::-1]]
ps = []
ps_1_rem=[]
for p_1 in ps_1:
center = p_1[1:4]
im_add = plus_source(im_add,p_1)
p,success = fitsinglegaussian_fixed_width(im_add,center,radius=radius_fit,n_approx=10,width_zxy=[width_z,width_xy,width_xy])
if p is not None:
ps.append(p)
ps_1_rem.append(p_1)
im_add = subtract_source(im_add,p)
ps_2=np.array(ps)
ps_1_rem=np.array(ps_1_rem)
dif = ps_1_rem[:,1:4]-ps_2[:,1:4]
max_dist = np.max(np.sum(dif**2,axis=-1))
n_iter+=1
if n_iter>n_max_iter:
break
return ps_2
else:
return np.array([])
## Fit bead centers
def get_STD_centers(im, seeds=None, th_seed=150,
dynamic=False, seed_by_per=False, th_seed_percentile=95,
min_num_seeds=1,
remove_close_pts=True, close_threshold=0.1, fit_radius=5,
sort_by_h=False, save=False, save_folder='', save_name='',
plt_val=False, force=False, verbose=False):
'''Fit beads for one image:
Inputs:
im: image, ndarray
th_seeds: threshold for seeding, float (default: 150)
dynamic: whether do dynamic seeding, bool (default:True)
th_seed_percentile: intensity percentile for seeding, float (default: 95)
remove_close_pts: whether remove points really close to each other, bool (default:True)
close_threshold: threshold for removing duplicates within a distance, float (default: 0.01)
fit_radius
sort_by_h: whether sort fitted points by height, bool (default:False)
plt_val: whether making plot, bool (default: False)
save: whether save fitting result, bool (default: False)
save_folder: full path of save folder, str (default: None)
save_name: full name of save file, str (default: None)
force: whether force fitting despite of saved file, bool (default: False)
verbose: say something!, bool (default: False)
Outputs:
beads: fitted spots with information, n by 4 array'''
import os
import pickle as pickle
if not force and os.path.exists(save_folder+os.sep+save_name) and save_name != '':
if verbose:
print("- loading file:,", save_folder+os.sep+save_name)
beads = pickle.load(open(save_folder+os.sep+save_name, 'rb'))
if verbose:
print("--", len(beads), " of beads loaded.")
return beads
else:
# seeding
if seeds is None:
seeds = get_seed_in_distance(im, center=None, dynamic=dynamic,
th_seed_percentile=th_seed_percentile,
seed_by_per=seed_by_per,
min_dynamic_seeds=min_num_seeds,
gfilt_size=0.75, filt_size=3,
th_seed=th_seed, hot_pix_th=4, verbose=verbose)
# fitting
fitter = Fitting_v3.iter_fit_seed_points(im, seeds.T, radius_fit=5)
fitter.firstfit()
pfits = fitter.ps
#pfits = visual_tools.fit_seed_points_base_fast(im,seeds.T,width_z=1.8*1.5/2,width_xy=1.,radius_fit=5,n_max_iter=3,max_dist_th=0.25,quiet=not verbose)
# get coordinates for fitted beads
if len(pfits) > 0:
if sort_by_h:
_intensity_order = np.argsort(np.array(pfits)[:,0])
beads = np.array(pfits)[np.flipud(_intensity_order), 1:4]
else:
beads = np.array(pfits)[:, 1:4]
# remove very close spots
if remove_close_pts:
remove = np.zeros(len(beads), dtype=np.bool)
for i, bead in enumerate(beads):
if np.isnan(bead).any() or np.sum(np.sum((beads-bead)**2, axis=1) < close_threshold) > 1:
remove[i] = True
if (bead < 0).any() or (bead > np.array(im.shape)).any():
remove[i] = True
beads = beads[remove==False]
else:
beads = None
if verbose:
print(f"- fitting {len(pfits)} points")
print(
f"-- {np.sum(remove)} points removed given smallest distance {close_threshold}")
# make plot if required
if plt_val:
plt.figure()
plt.imshow(np.max(im, 0), interpolation='nearest')
plt.plot(beads[:, -1], beads[:, -2], 'or')
plt.show()
# save to pickle if specified
if save:
if not os.path.exists(save_folder):
os.makedirs(save_folder)
if verbose:
print("-- saving fitted spots to",
save_folder+os.sep+save_name)
pickle.dump(beads[:,-3:], open(save_folder+os.sep+save_name, 'wb'))
return beads
def get_seed_points_base(im, gfilt_size=0.75, background_gfilt_size=10, filt_size=3,
th_seed=300, hot_pix_th=0, return_h=False):
"""Base function to do seeding"""
# gaussian-filter + max-filter
if gfilt_size:
max_im = gaussian_filter(im,gfilt_size)
else:
max_im = im
# gaussian_filter (large) + min_filter
if background_gfilt_size:
min_im = gaussian_filter(im,background_gfilt_size)
else:
min_im = im
max_filt = np.array(maximum_filter(max_im,filt_size), dtype=np.int64)
min_filt = np.array(minimum_filter(min_im,filt_size), dtype=np.int64)
# get candidate seed points
im_plt2 = (max_filt==max_im) & (min_filt!=min_im) & (min_filt!=0)
z,x,y = np.where(im_plt2)
keep = (max_filt[z,x,y]-min_filt[z,x,y])>th_seed#/np.array(max_filt[z,x,y],dtype=float)>0.5
x,y,z = x[keep],y[keep],z[keep]
h = max_filt[z,x,y]-min_filt[z,x,y]
#get rid of hot pixels
if hot_pix_th>0:
xy_str = [str([x_,y_]) for x_,y_ in zip(x,y)]
xy_str_,cts_ = np.unique(xy_str,return_counts=True)
keep = np.array([xy_str__ not in xy_str_[cts_>hot_pix_th] for xy_str__ in xy_str],dtype=bool)
x,y,z = x[keep],y[keep],z[keep]
h = h[keep]
centers = np.array([z,x,y])
if return_h:
centers = np.array([z,x,y,h])
return centers
def fit_seed_points_base_fast(im,centers,width_z=_sigma_zxy[0],width_xy=_sigma_zxy[1],radius_fit=5,n_max_iter = 10,max_dist_th=0.25, quiet=False):
if not quiet:
print("Fitting:" +str(len(centers[0]))+" points")
z,x,y = centers
if len(x)>0:
zxy = np.array([z,x,y],dtype=int).T
ps = []
im_subtr = np.array(im,dtype=float)
for center in zxy:
p,success = fitsinglegaussian_fixed_width(im_subtr,center,radius=radius_fit,n_approx=5,width_zxy=[width_z,width_xy,width_xy])
if p is not None:
ps.append(p)
im_add = np.array(im_subtr)
max_dist=np.inf
n_iter = 0
while max_dist>max_dist_th:
ps_1=np.array(ps)
ps_1=ps_1[np.argsort(ps_1[:,0])[::-1]]
ps = []
ps_1_rem=[]
for p_1 in ps_1:
center = p_1[1:4]
p,success = fitsinglegaussian_fixed_width(im_add,center,radius=5,n_approx=10,width_zxy=[1.8,1.,1.])
if p is not None:
ps.append(p)
ps_1_rem.append(p_1)
ps_2=np.array(ps)
ps_1_rem=np.array(ps_1_rem)
dif = ps_1_rem[:,1:4]-ps_2[:,1:4]
max_dist = np.max(np.sum(dif**2,axis=-1))
n_iter+=1
if n_iter>n_max_iter:
break
return ps_2
else:
return np.array([])
# fast alignment of fitted items which are bright and sparse (like beads)
def beads_alignment_fast(beads, ref_beads, unique_cutoff=2., check_outlier=True, outlier_sigma=1., verbose=True):
'''beads_alignment_fast, for finding pairs of beads when they are sparse
Inputs:
beads: ndarray of beads coordnates, num_beads by [z,x,y], n-by-3 numpy ndarray
ref_beads: similar coorndiates for beads in reference frame, n-by-3 numpy ndarray
unique_cutoff: a threshold that assuming there are only unique pairs within it, float
check_outlier: whether using Delaunay triangulation neighbors to check
outlier_sigma: times for sigma that determine threshold in checking outlier, positive float
verbose: whether say something during alignment, bool
Outputs:
_paired_beads: beads that find their pairs in ref frame, n-by-3 numpy array
_paired_ref_beads: ref_beads that find their pairs (sorted), n-by-3 numpy array
_shifts: 3d shift of beads (bead - ref_bead), n-by-3 numpy array
'''
# initialize
_paired_beads, _paired_ref_beads, _shifts = [], [], []
# loop through all beads in ref frame
for _rb in ref_beads:
_competing_ref_beads = ref_beads[np.sqrt(np.sum((ref_beads - _rb)**2,1)) < unique_cutoff]
if len(_competing_ref_beads) > 1: # in this case, other ref_bead exist within cutoff
continue
else:
_candidate_beads = beads[np.sqrt(np.sum((beads - _rb)**2,1)) < unique_cutoff]
if len(_candidate_beads) == 1: # if unique pairs identified
_paired_beads.append(_candidate_beads[0])
_paired_ref_beads.append(_rb)
_shifts.append(_candidate_beads[0] - _rb)
# covert to numpy array
_paired_beads = np.array(_paired_beads)
_paired_ref_beads = np.array(_paired_ref_beads)
_shifts = np.array(_shifts)
# remove suspicious shifts
for _j in range(np.shape(_shifts)[1]):
_shift_keeps = np.abs(_shifts)[:,_j] < np.mean(np.abs(_shifts)[:,_j])+outlier_sigma*np.std(np.abs(_shifts)[:,_j])
# filter beads and shifts
_paired_beads = _paired_beads[_shift_keeps]
_paired_ref_beads = _paired_ref_beads[_shift_keeps]
_shifts = _shifts[_shift_keeps]
# check outlier
if check_outlier:
from scipy.spatial import Delaunay
from mpl_toolkits.mplot3d import Axes3D
# initialize list for shifts calculated by neighboring points
_alter_shifts = []
# calculate Delaunay triangulation for ref_beads
_tri = Delaunay(_paired_ref_beads)
# loop through all beads
for _i in range(_paired_ref_beads.shape[0]):
# initialize diff, which used to judge whether keep this
_keep = True
# extract shift
_shift = _shifts[_i]
# initialize neighboring point ids
_neighbor_ids = []
# find neighbors for this point
for _simplex in _tri.simplices.copy():
if _i in _simplex:
_neighbor_ids.append(_simplex)
_neighbor_ids = np.array(np.unique(_neighbor_ids).astype(np.int))
_neighbor_ids = _neighbor_ids[_neighbor_ids != _i] # remove itself
_neighbor_ids = _neighbor_ids[_neighbor_ids != -1] # remove error
# calculate alternative shift
_neighbors = _paired_ref_beads[_neighbor_ids,:]
_neighbor_shifts = _shifts[_neighbor_ids,:]
_neighbor_weights = 1/np.sqrt(np.sum((_neighbors-_paired_ref_beads[_i])**2,1))
_alter_shift = np.dot(_neighbor_shifts.T, _neighbor_weights) / np.sum(_neighbor_weights)
_alter_shifts.append(_alter_shift)
#print _i, _alter_shift, _shift
# differences between shifts and alternative shifts
_diff = [np.linalg.norm(_shift-_alter_shift) for _shift,_alter_shift in zip(_shifts, _alter_shifts)]
# determine whether keep this:
print('-- differences in original drift and neighboring dirft:', _diff, np.mean(_diff), np.std(_diff))
_keeps = np.array(_diff < np.mean(_diff)+np.std(_diff)*outlier_sigma, dtype=np.bool)
# filter beads and shifts
_paired_beads = _paired_beads[_keeps]
_paired_ref_beads = _paired_ref_beads[_keeps]
_shifts = _shifts[_keeps]
return np.array(_paired_beads), np.array(_paired_ref_beads), np.array(_shifts)
class imshow_mark_3d_v2:
def master_reset(self):
#self.dic_min_max = {}
self.class_ids = []
self.draw_x,self.draw_y,self.draw_z=[],[],[]
self.coords = list(zip(self.draw_x,self.draw_y,self.draw_z))
#load vars
self.load_coords()
self.set_image()
def __init__(self,ims,fig=None,image_names=None,rescz=1.,min_max_default = [None,None], given_dic=None,save_file=None,paramaters={}):
#internalize
#seeding paramaters
self.gfilt_size = paramaters.get('gfilt_size',0.75)#first gaussian blur with radius # to avoid false local max from camera fluc
self.filt_size = paramaters.get('filt_size',3)#local maxima and minima are computed on blocks of size #
self.th_seed = paramaters.get('th_seed',300.)#keep points when difference between local minima and maxima is more than #
self.hot_pix_th = paramaters.get('hot_pix_th',0)
#fitting paramaters
self.width_z = paramaters.get('width_z',1.8*1.5)#fixed width in z # 1.8 presuposes isotropic pixel size
self.width_xy = paramaters.get('width_xy',1.)#fixed width in xy
self.radius_fit = paramaters.get('radius_fit',5)#neibouring of fitting for each seed point
self.paramaters=paramaters
self.ims=ims
self.rescz = rescz
if image_names is None:
self.image_names = ['Image '+str(i+1) for i in range(len(ims))]
else:
self.image_names = image_names
self.save_file = save_file
#define extra vars
self.dic_min_max = {}
self.class_ids = []
self.draw_x,self.draw_y,self.draw_z=[],[],[]
self.coords = list(zip(self.draw_x,self.draw_y,self.draw_z))
self.delete_mode = False
#load vars
self.load_coords(_given_dic=given_dic)
#construct images
self.index_im = 0
self.im_ = self.ims[self.index_im]
self.im_xy = np.max(self.im_,axis=0)
self.im_z = np.max(self.im_,axis=1)
im_z_len = self.im_z.shape[0]
indz=np.array(np.round(np.arange(0,im_z_len,self.rescz)),dtype=int)
self.im_z = self.im_z[indz[indz<im_z_len],...]
#setup plots
if fig is None:
self.f=plt.figure()
else:
self.f=fig
self.ax1,self.ax2 = ImageGrid(self.f, 111, nrows_ncols=(2, 1), axes_pad=0.1)
self.lxy,=self.ax1.plot(self.draw_x, self.draw_y, 'o',
markersize=12,markeredgewidth=1,markeredgecolor='y',markerfacecolor='None')
self.lz,=self.ax2.plot(self.draw_x, self.draw_z, 'o',
markersize=12,markeredgewidth=1,markeredgecolor='y',markerfacecolor='None')
self.imshow_xy = self.ax1.imshow(self.im_xy,interpolation='nearest',cmap='gray')
self.imshow_z = self.ax2.imshow(self.im_z,interpolation='nearest',cmap='gray')
self.min_,self.max_ = min_max_default
if self.min_ is None: self.min_ = np.min(self.im_)
if self.max_ is None: self.max_ = np.max(self.im_)
self.imshow_xy.set_clim(self.min_,self.max_)
self.imshow_z.set_clim(self.min_,self.max_)
self.ax1.callbacks.connect('ylim_changed', self.xy_on_lims_change)
self.ax2.callbacks.connect('ylim_changed', self.z_on_lims_change)
self.f.suptitle(self.image_names[self.index_im])
#connect mouse and keyboard
cid = self.f.canvas.mpl_connect('button_press_event', self.onclick)
cid2 = self.f.canvas.mpl_connect('key_press_event', self.press)
cid3 = self.f.canvas.mpl_connect('key_release_event', self.release)
self.set_image()
if fig is None:
plt.show()
def onclick(self,event):
if event.button==3:
#print "click"
if event.inaxes is self.ax1:
if self.delete_mode:
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
x_,y_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
#print x_min,x_max,y_min,y_max,z_min,z_max
#print x_,y_,z_
keep_in_window = (x_>y_min)&(x_<y_max)&(y_>x_min)&(y_<x_max)&(z_>z_min)&(z_<z_max)
keep_class = (np.array(self.class_ids)==self.index_im)&(np.isnan(self.draw_x)==False)
keep = keep_in_window&keep_class
if np.sum(keep)>0:
keep_ind = np.arange(len(keep))[keep]
coords_xy_class = list(zip(np.array(self.draw_x)[keep],
np.array(self.draw_y)[keep]))
difs = np.array(coords_xy_class)-np.array([[event.xdata,event.ydata]])
ind_= np.argmin(np.sum(np.abs(difs),axis=-1))
self.draw_x.pop(keep_ind[ind_])
self.draw_y.pop(keep_ind[ind_])
self.draw_z.pop(keep_ind[ind_])
self.class_ids.pop(keep_ind[ind_])
print(ind_)
else:
print('test')
else:
if event.xdata is not None and event.ydata is not None:
self.draw_x.append(event.xdata)
self.draw_y.append(event.ydata)
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.draw_z.append((z_min+z_max)/2.)
self.class_ids.append(self.index_im)
if event.inaxes is self.ax2:
if event.xdata is not None and event.ydata is not None:
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
x_,y_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
keep_in_window = (x_>y_min)&(x_<y_max)&(y_>x_min)&(y_<x_max)&(z_>z_min)&(z_<z_max)
keep_class = (np.array(self.class_ids)==self.index_im)&(np.isnan(self.draw_x)==False)
keep = keep_in_window&keep_class
if np.sum(keep)>0:
keep_ind = np.arange(len(keep))[keep]
coords_x = np.array(self.draw_x)[keep]
ind_ = np.argmin(np.abs(coords_x-event.xdata))
self.draw_z[keep_ind[ind_]]=event.ydata
self.update_point_plot()
def press(self,event):
if event.key== 'd':
self.index_im = (self.index_im+1)%len(self.ims)
self.set_image()
if event.key== 'a':
self.index_im = (self.index_im-1)%len(self.ims)
self.set_image()
if event.key=='s':
self.save_ims()
if event.key== 'x':
self.auto_scale()
if event.key== 't':
self.get_seed_points()
if event.key== 'n':
self.handle_in_nucleus()
if event.key== 'q':
prev_im = self.index_im
for self.index_im in range(len(self.ims)):
self.set_image()
self.get_seed_points()
self.fit_seed_points()
self.index_im = prev_im
self.set_image()
if event.key== 'y':
self.fit_seed_points()
if event.key == 'delete':
self.draw_x.pop(-1)
self.draw_y.pop(-1)
self.draw_z.pop(-1)
self.class_ids.pop(-1)
self.update_point_plot()
if event.key == 'shift':
self.delete_mode = True
def release(self, event):
if event.key == 'shift':
self.delete_mode = False
def populate_draw_xyz(self,flip=False):
if len(self.coords)>0:
self.draw_x,self.draw_y,self.draw_z = list(zip(*self.coords))
if flip: self.draw_x,self.draw_y,self.draw_z = list(map(list,[self.draw_y,self.draw_x,self.draw_z]))
else: self.draw_x,self.draw_y,self.draw_z = list(map(list,[self.draw_x,self.draw_y,self.draw_z]))
else:
self.draw_x,self.draw_y,self.draw_z = [],[],[]
def create_text(self):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.texts = []
i_ims = np.zeros(len(self.ims),dtype=int)
for (xyz,c_id) in zip(self.coords,self.class_ids):
i_ims[c_id]+=1
if c_id==self.index_im:
if not np.isnan(xyz[0]):
if z_min<xyz[2] and z_max>xyz[2] and y_min<xyz[0] and y_max>xyz[0] and x_min<xyz[1] and x_max>xyz[1]:
text_ = str(i_ims[c_id])
color_='r'
if hasattr(self,'dec_text'):
key_dec = tuple(list(np.array(xyz,dtype=int))+[c_id])
if key_dec in self.dec_text:
text_=self.dec_text[key_dec]['text']
color_='b'
self.texts.append(self.ax1.text(xyz[0],xyz[1],text_,color=color_))
self.texts.append(self.ax2.text(xyz[0],xyz[2],text_,color=color_))
def update_point_plot(self):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.coords = list(zip(self.draw_x,self.draw_y,self.draw_z))
x_,y_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
#print x_min,x_max,y_min,y_max,z_min,z_max
#print x_,y_,z_
keep_class = np.array(self.class_ids)==self.index_im
keep_in_window = (x_>y_min)&(x_<y_max)&(y_>x_min)&(y_<x_max)&(z_>z_min)&(z_<z_max)
keep = keep_class&keep_in_window
self.lxy.set_xdata(x_[keep])
self.lxy.set_ydata(y_[keep])
self.lz.set_xdata(x_[keep])
self.lz.set_ydata(z_[keep])
self.save_coords()
self.remove_text()
self.create_text()
self.f.canvas.draw()
def remove_text(self):
if not hasattr(self,'texts'): self.texts = []
for txt in self.texts:
txt.remove()
def load_coords(self, _given_dic=None):
save_file = self.save_file
if _given_dic:
save_dic = _given_dic
elif save_file is not None and os.path.exists(save_file):
with open(save_file,'rb') as fid:
save_dic = pickle.load(fid)
else:
return False
# load information from save_dic
self.coords,self.class_ids = save_dic['coords'],save_dic['class_ids']
if 'pfits' in save_dic:
self.pfits_save = save_dic['pfits']
if 'dec_text' in save_dic:
self.dec_text=save_dic['dec_text']
self.populate_draw_xyz()#coords to plot list
def save_coords(self):
save_file = self.save_file
if save_file is not None:
if not os.path.exists(os.path.dirname(save_file)):
os.makedirs(os.path.dirname(save_file))
fid = open(save_file,'wb')
self.pfits_save = getattr(self,'pfits_save',{})
self.dec_text = getattr(self,'dec_text',{})
save_dic = {'coords':self.coords,'class_ids':self.class_ids,'pfits':self.pfits_save,'dec_text':self.dec_text}
pickle.dump(save_dic,fid)
fid.close()
def auto_scale(self):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
im_chop = self.im_[z_min:z_max,x_min:x_max,y_min:y_max,...]
min_,max_ = np.min(im_chop),np.max(im_chop)
self.imshow_xy.set_clim(min_,max_)
self.imshow_z.set_clim(min_,max_)
self.dic_min_max[self.index_im] = [min_,max_]
self.f.canvas.draw()
def del_ext(self,str_):
"Deletes extention"
if os.path.basename(str_).count('.')>0:
return '.'.join(str_.split('.')[:-1])
else:
return str_
def save_ims(self):
import scipy.misc
save_file = self.save_file
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
for index_im,im_ in enumerate(self.ims):
im_chop = im_[self.get_z_ind(),x_min:x_max,y_min:y_max,...]
im_xy =
|
np.max(im_chop,axis=0)
|
numpy.max
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
"""
CUDA_VISIBLE_DEVICES=1 python -W ignore Genesis_Chest_CT.py \
--note genesis_chest_ct \
--arch Vnet \
--input_rows 64 \
--input_cols 64 \
--input_deps 32 \
--nb_class 1 \
--verbose 1 \
--batch_size 16 \
--scale 32 \
--data generated_cubes
"""
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import os
import keras
print("Keras = {}".format(keras.__version__))
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import copy
import sys
import math
import random
import shutil
import numpy as np
from tqdm import tqdm
from scipy.misc import comb
from sklearn import metrics
from unet2d import *
from keras.callbacks import LambdaCallback, TensorBoard, EarlyStopping, ModelCheckpoint, LearningRateScheduler
from keras.optimizers import SGD
from skimage.transform import resize
from optparse import OptionParser
from keras.utils import plot_model
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("--arch", dest="arch", help="Vnet", default=None, type="string")
parser.add_option("--input_rows", dest="input_rows", help="input rows", default=128, type="int")
parser.add_option("--input_cols", dest="input_cols", help="input cols", default=128, type="int")
parser.add_option("--input_deps", dest="input_deps", help="input deps", default=1, type="int")
parser.add_option("--nb_class", dest="nb_class", help="number of class", default=1, type="int")
parser.add_option("--verbose", dest="verbose", help="verbose", default=0, type="int")
parser.add_option("--weights", dest="weights", help="pre-trained weights", default=None, type="string")
parser.add_option("--note", dest="note", help="notes of experiment setup", default="", type="string")
parser.add_option("--batch_size", dest="batch_size", help="batch size", default=8, type="int")
parser.add_option("--scale", dest="scale", help="the scale of pre-trained data", default=32, type="int")
parser.add_option("--optimizer", dest="optimizer", help="SGD | Adam", default="Adam", type="string")
parser.add_option("--data", dest="data", help="the address of data cube", default=None, type="string")
parser.add_option("--workers", dest="workers", help="number of CPU cores", default=8, type="int")
parser.add_option("--nonlinear_rate", dest="nonlinear_rate", help="chance to perform nonlinear", default=0.9, type="float")
parser.add_option("--paint_rate", dest="paint_rate", help="chance to perform painting", default=0.9, type="float")
parser.add_option("--outpaint_rate", dest="outpaint_rate", help="chance to perform out-painting", default=0.8, type="float")
parser.add_option("--flip_rate", dest="flip_rate", help="chance to perform flipping", default=0.9, type="float")
parser.add_option("--local_rate", dest="local_rate", help="chance to perform local shuffle pixel", default=0.1, type="float")
(options, args) = parser.parse_args()
assert options.arch in ['Vnet']
assert options.data is not None
assert os.path.exists(options.data) == True
seed = 1
random.seed(seed)
model_path = "pretrained_weights"
if not os.path.exists(model_path):
os.makedirs(model_path)
logs_path = os.path.join(model_path, "Logs")
if not os.path.exists(logs_path):
os.makedirs(logs_path)
class setup_config():
nb_epoch = 10000
patience = 50
lr = 1e-0
train_fold=[0,1,2,3,4]
valid_fold=[5,6]
test_fold=[7,8,9]
hu_max = 255
hu_min = 0
def __init__(self, model="Unet",
note="",
data_augmentation=True,
input_rows=64,
input_cols=64,
input_deps=1,
batch_size=64,
nb_class=1,
nonlinear_rate=0.95,
paint_rate=0.6,
outpaint_rate=0.8,
flip_rate=0.0,
local_rate=0.9,
verbose=1,
workers=2,
optimizer=None,
DATA_DIR=None,
):
self.model = model
self.exp_name = model + "-" + note
self.data_augmentation = data_augmentation
self.input_rows, self.input_cols = input_rows, input_cols
self.input_deps = input_deps
self.batch_size = batch_size
self.verbose = verbose
self.nonlinear_rate = nonlinear_rate
self.paint_rate = paint_rate
self.outpaint_rate = outpaint_rate
self.inpaint_rate = 1.0 - self.outpaint_rate
self.flip_rate = flip_rate
self.local_rate = local_rate
self.nb_class = nb_class
self.optimizer = optimizer
self.workers = workers
self.DATA_DIR = DATA_DIR
self.max_queue_size = self.workers * 4
if nb_class > 1:
self.activation = "softmax"
else:
self.activation = "sigmoid"
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
config = setup_config(model=options.arch,
note=options.note,
input_rows=options.input_rows,
input_cols=options.input_cols,
input_deps=options.input_deps,
batch_size=options.batch_size,
nb_class=options.nb_class,
verbose=options.verbose,
nonlinear_rate=options.nonlinear_rate,
paint_rate=options.paint_rate,
outpaint_rate=options.outpaint_rate,
flip_rate=options.flip_rate,
local_rate=options.local_rate,
optimizer=options.optimizer,
DATA_DIR=options.data,
workers=options.workers,
)
config.display()
# In[2]:
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
Control points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def data_augmentation(x, y, prob=0.5):
# augmentation by flipping
cnt = 3
while random.random() < prob and cnt > 0:
degree = random.choice([0, 1])
x = np.flip(x, axis=degree)
y = np.flip(y, axis=degree)
cnt = cnt - 1
return x, y
def nonlinear_transformation(x, prob=0.5):
if random.random() >= prob:
return x
points = [[0, 0], [random.random(), random.random()], [random.random(), random.random()], [1, 1]]
xpoints = [p[0] for p in points]
ypoints = [p[1] for p in points]
xvals, yvals = bezier_curve(points, nTimes=100000)
if random.random() < 0.5:
# Half change to get flip
xvals = np.sort(xvals)
else:
xvals, yvals = np.sort(xvals), np.sort(yvals)
nonlinear_x = np.interp(x, xvals, yvals)
return nonlinear_x
def local_pixel_shuffling(x, prob=0.5):
if random.random() >= prob:
return x
image_temp = copy.deepcopy(x)
orig_image = copy.deepcopy(x)
img_rows, img_cols = x.shape
num_block = 500
for _ in range(num_block):
block_noise_size_x = random.randint(1, img_rows//10)
block_noise_size_y = random.randint(1, img_cols//10)
noise_x = random.randint(0, img_rows-block_noise_size_x)
noise_y = random.randint(0, img_cols-block_noise_size_y)
window = orig_image[noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y]
window = window.flatten()
np.random.shuffle(window)
window = window.reshape((block_noise_size_x,
block_noise_size_y))
image_temp[noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y] = window
local_shuffling_x = image_temp
return local_shuffling_x
def image_in_painting(x, prob=0.9):
in_painting_x = copy.deepcopy(x)
img_rows, img_cols = x.shape
num_painting = 5
for _ in range(num_painting):
if random.random() >= prob:
continue
block_noise_size_x = random.randint(10, 20)
block_noise_size_y = random.randint(10, 20)
noise_x = random.randint(3, img_rows - block_noise_size_x - 3)
noise_y = random.randint(3, img_cols - block_noise_size_y - 3)
in_painting_x[noise_x:noise_x + block_noise_size_x,
noise_y:noise_y + block_noise_size_y] = random.random()
return in_painting_x
def image_out_painting(x):
out_painting_x = copy.deepcopy(x)
out_painting_x[:, :] = random.random()
img_rows, img_cols = x.shape
block_noise_size_x = img_rows - random.randint(20, 30)
block_noise_size_y = img_cols - random.randint(20, 30)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
image_temp = copy.deepcopy(x)
out_painting_x[noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y] = image_temp[noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y]
return out_painting_x
def generate_pair(img, batch_size):
img_rows, img_cols = img.shape[1], img.shape[2]
while True:
index = [i for i in range(img.shape[0])]
random.shuffle(index)
y = img[index[:batch_size]]
x = copy.deepcopy(y)
for n in range(batch_size):
# Autoencoder
x[n] = copy.deepcopy(y[n])
# Flip
x[n], y[n] = data_augmentation(x[n], y[n], config.flip_rate)
# Local Shuffle Pixel
x[n] = local_pixel_shuffling(x[n], prob=config.local_rate)
# Apply non-Linear transformation with an assigned probability
x[n] = nonlinear_transformation(x[n], config.nonlinear_rate)
# Inpainting & Outpainting
if random.random() < config.paint_rate:
if random.random() < config.inpaint_rate:
# Inpainting
x[n] = image_in_painting(x[n])
else:
# Outpainting
x[n] = image_out_painting(x[n])
yield (np.expand_dims(x, axis=-1), np.expand_dims(y, axis=-1))
# learning rate schedule
# source: https://machinelearningmastery.com/using-learning-rate-schedules-deep-learning-models-python-keras/
def step_decay(epoch):
initial_lrate = config.lr
drop = 0.5
epochs_drop = int(config.patience * 0.8)
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
x_train = []
for i,fold in enumerate(tqdm(config.train_fold)):
s = np.load(os.path.join(config.DATA_DIR, "bat_"+str(options.scale)+"_"+
str(options.input_cols)+"x"+str(options.input_rows)+"_"+str(fold)+".npy"))
x_train.extend(s)
# x_train = np.expand_dims(np.array(x_train), axis=-1)
x_train = np.array(x_train)
print("x_train: {} | {:.2f} ~ {:.2f}".format(x_train.shape, np.min(x_train),
|
np.max(x_train)
|
numpy.max
|
# This file is part of NEORL.
# Copyright (c) 2021 Exelon Corporation and MIT Nuclear Science and Engineering
# NEORL is free software: you can redistribute it and/or modify
# it under the terms of the MIT LICENSE
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
#"""
#Created on Sun Jun 28 18:21:05 2020
#
#@author: <NAME>
#"""
from neorl.hybrid.pesacore.er import ExperienceReplay
from neorl.hybrid.pesacore.sa import SAMod
from neorl.hybrid.pesacore.es import ESMod
from neorl.hybrid.pesacore.pso import PSOMod
from copy import deepcopy
from multiprocessing import Process, Queue
import random
import numpy as np
from collections import defaultdict
import time
import sys
import uuid
from neorl.evolu.discrete import encode_grid_to_discrete, decode_discrete_to_grid
from neorl.utils.seeding import set_neorl_seed
#multiprocessing trick to paralllelize nested functions in python (un-picklable objects!)
def globalize(func):
def result(*args, **kwargs):
return -func(*args, **kwargs)
result.__name__ = result.__qualname__ = uuid.uuid4().hex
setattr(sys.modules[result.__module__], result.__name__, result)
return result
class PESA(ExperienceReplay):
"""
*PESA Major Parameters*
:param mode: (str) problem type, either "min" for minimization problem or "max" for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param npop: (int) total number of individuals in each group. So for ES, PSO, and SA, full population is ``npop*3``.
:param mu: (int) number of individuals to survive to the next generation.
Also, ``mu`` equals to the number of individuals to sample from the memory. If None, ``mu=int(npop/2)``.
So 1/2 of PESA population comes from previous generation, and 1/2 comes from the replay memory (See **Notes** below for more info)
:param memory_size: (int) max size of the replay memory (if None, ``memory_size`` is built to accommodate all samples during search)
:param alpha_init: (float) initial value of the prioritized replay coefficient (See **Notes** below)
:param alpha_end: (float) final value of the prioritized replay coefficient (See **Notes** below)
:param alpha_backdoor: (float) backdoor greedy replay rate/probability to sample from the memory for SA instead of random-walk (See **Notes** below)
*PESA Auxiliary Parameters (for the internal algorithms)*
:param cxpb: (float) for **ES**, population crossover probability between [0,1]
:param mutpb: (float) for **ES**, population mutation probability between [0,1]
:param c1: (float) for **PSO**, cognitive speed constant
:param c2: (float) for **PSO**, social speed constant
:param speed_mech: (str) for **PSO**, type of speed mechanism for to update particle velocity, choose between ``constric``, ``timew``, ``globw``.
:param Tmax: (float) for **SA**, initial/max temperature to start the annealing process
:param chi: (float) for **SA**, probability to perturb an attribute during SA annealing (occurs when ``rand(0,1) < chi``).
*PESA Misc. Parameters*
:param ncores: (int) number of parallel processors
:param seed: (int) random seed for sampling
"""
def __init__ (self, mode, bounds, fit, npop, mu=None, #general parameters
memory_size=None, alpha_init=0.1, alpha_end=1, alpha_backdoor=0.1, #replay parameters
Tmax=10000, chi=0.1, #SA parameters
cxpb=0.7, mutpb=0.1, #ES parameters
c1=2.05, c2=2.05, speed_mech='constric', #PSO parameters
ncores=1, seed=None): #misc parameters
#--------------------
#General Parameters
#--------------------
set_neorl_seed(seed)
self.bounds=bounds
#--mir
self.mode=mode
if mode == 'max':
self.FIT=fit
elif mode == 'min':
self.FIT = globalize(lambda x: fit(x)) #use the function globalize to serialize the nested fit
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
self.ncores=ncores
self.NPOP=npop
self.pso_flag=True
self.ncores=ncores
if ncores <= 3:
self.NCORES=1
self.PROC=False
else:
self.PROC=True
if self.pso_flag:
self.NCORES=int(ncores/3)
else:
self.NCORES=int(ncores/2)
# option for first-level parallelism
#self.PROC=True
self.SEED=seed
#--------------------
#Experience Replay
#--------------------
self.MODE='prior'; self.ALPHA0=alpha_init; self.ALPHA1=alpha_end
#--------------------
# SA hyperparameters
#--------------------
self.TMAX=Tmax; self.CHI=chi; self.REPLAY_RATE=alpha_backdoor
#--------------------
# ES HyperParameters
#--------------------
if mu:
assert mu < npop, '--error: The value of mu ({}) MUST be less than npop ({})'.format(mu, npop)
self.MU=mu
else:
self.MU=int(npop/2)
self.CXPB=cxpb; self.MUTPB=mutpb; self.INDPB=1.0
#--------------------
# PSO HyperParameters
#--------------------
self.C1=c1; self.C2=c2; self.SPEED_MECH=speed_mech
#-------------------------------
#Memory Supply for each method
#-------------------------------
self.ES_MEMORY=self.MU
self.SA_MEMORY=self.NCORES
self.PSO_MEMORY=self.NPOP-self.MU
#--------------------
# Fixed/Derived parameters
#--------------------
self.nx=len(bounds) #all
self.memory_size=memory_size
self.COOLING='fast' #SA
self.TMIN=1 #SA
self.LAMBDA=self.NPOP #ES
self.NPAR=self.NPOP #PSO
self.SMIN = 1/self.nx #ES
self.SMAX = 0.5 #ES
self.v0=0.1 #constant to initialize PSO speed, not very important
#infer variable types
self.datatype = np.array([bounds[item][0] for item in bounds])
#mir-grid
if "grid" in self.datatype:
self.grid_flag=True
self.orig_bounds=bounds #keep original bounds for decoding
#print('--debug: grid parameter type is found in the space')
self.bounds, self.bounds_map=encode_grid_to_discrete(self.bounds) #encoding grid to int
#define var_types again by converting grid to int
self.datatype = np.array([self.bounds[item][0] for item in self.bounds])
else:
self.grid_flag=False
self.bounds = bounds
self.lb = np.array([self.bounds[item][1] for item in self.bounds])
self.ub = np.array([self.bounds[item][2] for item in self.bounds])
def fit_worker(self, x):
#"""
#Evaluates fitness of an individual.
#"""
#mir-grid
if self.grid_flag:
#decode the individual back to the int/float/grid mixed space
x=decode_discrete_to_grid(x,self.orig_bounds,self.bounds_map)
fitness = self.FIT(x)
return fitness
def evolute(self, ngen, x0=None, warmup=100, verbose=0):
"""
This function evolutes the PESA algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) initial samples to start the replay memory (``len(x0)`` must be equal or more than ``npop``)
:param warmup: (int) number of random warmup samples to initialize the replay memory and must be equal or more than ``npop`` (only used if ``x0=None``)
:param verbose: (int) print statistics to screen, 0: no print, 1: PESA print, 2: detailed print
:return: (tuple) (best individual, best fitness, and a list of fitness history)
"""
self.verbose=verbose
self.NGEN=ngen
self.STEPS=self.NGEN*self.NPOP #all
if self.memory_size:
self.MEMORY_SIZE=self.memory_size
else:
self.MEMORY_SIZE=self.STEPS*3+1 #PESA
#-------------------------------------------------------
# Check if initial pop is provided as initial guess
#-------------------------------------------------------
if x0:
# use provided initial guess
warm=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.ncores)
x0size=len(x0)
assert x0size >= self.NPOP, 'the number of lists in x0 ({}) must be more than or equal npop ({})'.format(x0size, self.NPOP)
self.pop0=warm.init_pop(warmup=x0size, x_known=x0) #initial population for ES
else:
#create initial guess
assert warmup > self.NPOP, 'the number of warmup samples ({}) must be more than npop ({})'.format(warmup, self.NPOP)
warm=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.ncores)
self.pop0=warm.init_pop(warmup=warmup) #initial population for ES
self.partime={}
self.partime['pesa']=[]
self.partime['es']=[]
self.partime['pso']=[]
self.partime['sa']=[]
self.fit_hist=[]
#------------------------------
# Step 1: Initialize the memory
#------------------------------
self.mymemory=ExperienceReplay(size=self.MEMORY_SIZE) #memory object
xvec0, obj0=[self.pop0[item][0] for item in self.pop0], [self.pop0[item][2] for item in self.pop0] #parse the initial samples
self.mymemory.add(xvec=xvec0, obj=obj0, method=['na']*len(xvec0)) # add initial samples to the replay memory
#--------------------------------
# Step 2: Initialize all methods
#--------------------------------
# Obtain initial population for all methods
espop0, swarm0, swm_pos0, swm_fit0, local_pos, local_fit, x0, E0=self.init_guess(pop0=self.pop0)
# Initialize ES class
es=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.NCORES, indpb=self.INDPB,
cxpb=self.CXPB, mutpb=self.MUTPB, smin=self.SMIN, smax=self.SMAX)
# Initialize SA class
sa=SAMod(bounds=self.bounds, memory=self.mymemory, fit=self.fit_worker, steps=self.STEPS, ncores=self.NCORES,
chi=self.CHI, replay_rate=self.REPLAY_RATE, cooling=self.COOLING, Tmax=self.TMAX, Tmin=self.TMIN)
# Initialize PSO class (if USED)
if self.pso_flag:
pso=PSOMod(bounds=self.bounds, fit=self.fit_worker, npar=self.NPAR, swm0=[swm_pos0,swm_fit0],
ncores=self.NCORES, c1=self.C1, c2=self.C2, speed_mech=self.SPEED_MECH)
#--------------------------------
# Step 3: Initialize PESA engine
#--------------------------------
#Use initial samples as first guess for SA, ES, and PSO
self.pop_next=deepcopy(espop0) # x0 for ES
self.x_next, self.E_next=deepcopy(x0), deepcopy(E0) # x0 for SA
if self.pso_flag:
self.swm_next, self.local_pos_next, self.local_fit_next=deepcopy(swarm0), deepcopy(local_pos), deepcopy(local_fit) # x0 for PSO (if used)
self.STEP0=1 #step counter
self.ALPHA=self.ALPHA0 #set alpha to alpha0
#--------------------------------
# Step 4: PESA evolution
#--------------------------------
for gen in range(1,self.NGEN+1):
caseids=['es_gen{}_ind{}'.format(gen,ind+1) for ind in range(self.LAMBDA)] # save caseids for ES
if self.pso_flag:
pso_caseids=['pso_gen{}_par{}'.format(gen+1,ind+1) for ind in range(self.NPAR)] # save caseids for PSO
#-------------------------------------------------------------------------------------------------------------------
# Step 5: evolute all methods for 1 generation
#-------------------------------------------------------------------------------------------------------------------
#**********************************
#--Step 5A: Complete PARALEL calcs
# via multiprocess.Process
#*********************************
if self.PROC:
t0=time.time()
QSA = Queue(); QES=Queue(); QPSO=Queue()
def sa_worker():
random.seed(self.SEED)
x_new, E_new, self.T, self.acc, self.rej, self.imp, x_best, E_best, sa_partime= sa.anneal(ngen=1,npop=self.NPOP, x0=self.x_next,
E0=self.E_next, step0=self.STEP0)
QSA.put((x_new, E_new, self.T, self.acc, self.rej, self.imp, x_best, E_best, sa_partime))
def es_worker():
random.seed(self.SEED)
pop_new, es_partime=es.evolute(population=self.pop_next,ngen=1,caseids=caseids)
QES.put((pop_new, es_partime))
def pso_worker():
random.seed(self.SEED)
if gen > 1:
swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next, local_fit=self.local_fit_next,
swm_best=[self.swm_pos, self.swm_fit], mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, verbose=0)
else:
swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next,
local_fit=self.local_fit_next, mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, verbose=0)
QPSO.put((swm_new, swm_pos_new, swm_fit_new, pso_partime))
Process(target=sa_worker).start()
Process(target=es_worker).start()
if self.pso_flag:
Process(target=pso_worker).start()
self.swm_next, self.swm_pos, self.swm_fit, pso_partime=QPSO.get()
self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
self.x_next, self.E_next, self.T, self.acc, self.rej, self.imp, self.x_best, self.E_best, sa_partime=QSA.get()
self.pop_next, es_partime=QES.get()
#self.partime.append(time.time()-t0)
self.partime['pesa'].append(time.time()-t0)
self.partime['pso'].append(pso_partime)
self.partime['es'].append(es_partime)
self.partime['sa'].append(sa_partime)
#*********************************
#--Step 5B: Complete Serial calcs
#*********************************
else:
self.pop_next, _ =es.evolute(population=self.pop_next,ngen=1,caseids=caseids) #ES serial
self.x_next, self.E_next, self.T, self.acc, self.rej, self.imp, self.x_best, self.E_best, _ = sa.anneal(ngen=1,npop=self.NPOP, x0=self.x_next,
E0=self.E_next, step0=self.STEP0) #SA serial
if self.pso_flag:
self.swm_next, self.swm_pos, self.swm_fit, _ =pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next,
local_fit=self.local_fit_next, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, mu=self.MU, verbose=0)
self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
#*********************************************************
# Step 5C: Obtain relevant statistics for this generation
#*********************************************************
self.STEP0=self.STEP0+self.NPOP #update step counter
self.inds, self.rwd=[self.pop_next[i][0] for i in self.pop_next], [self.pop_next[i][2] for i in self.pop_next] #ES statistics
self.mean_strategy=[np.mean(self.pop_next[i][1]) for i in self.pop_next] #ES statistics
if self.pso_flag:
self.pars, self.fits=[self.swm_next[i][0] for i in self.swm_next], [self.swm_next[i][2] for i in self.swm_next] #PSO statistics
self.mean_speed=[np.mean(self.swm_next[i][1]) for i in self.swm_next]
if self.verbose==2:
self.printout(mode=1, gen=gen)
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-----------------------------
# Step 6: Update the memory
#-----------------------------
self.memory_update()
#-----------------------------------------------------------------
# Step 7: Sample from the memory and prepare for next Generation
#-----------------------------------------------------------------
self.resample()
#--------------------------------------------------------
# Step 8: Anneal Alpha if priortized replay is used
#--------------------------------------------------------
if self.MODE=='prior': #anneal alpha between alpha0 (lower) and alpha1 (upper)
self.ALPHA=self.linear_anneal(step=self.STEP0, total_steps=self.STEPS, a0=self.ALPHA0, a1=self.ALPHA1)
#--------------------------------------------------------
# Step 9: Calculate the memory best and print PESA summary
#--------------------------------------------------------
self.pesa_best=self.mymemory.sample(batch_size=1,mode='greedy')[0] #`greedy` will sample the best in memory
self.fit_hist.append(self.pesa_best[1])
self.memory_size=len(self.mymemory.storage) #memory size so far
#--mir
if self.mode=='min':
self.fitness_best=-self.pesa_best[1]
else:
self.fitness_best=self.pesa_best[1]
#mir-grid
if self.grid_flag:
self.xbest_correct=decode_discrete_to_grid(self.pesa_best[0],self.orig_bounds,self.bounds_map)
else:
self.xbest_correct=self.pesa_best[0]
if self.verbose: #print summary data to screen
self.printout(mode=2, gen=gen)
if self.verbose:
print('------------------------ PESA Summary --------------------------')
print('Best fitness (y) found:', self.fitness_best)
print('Best individual (x) found:', self.xbest_correct)
print('--------------------------------------------------------------')
#--mir
if self.mode=='min':
self.fit_hist=[-item for item in self.fit_hist]
return self.xbest_correct, self.fitness_best, self.fit_hist
def linear_anneal(self, step, total_steps, a0, a1):
#"""
#Anneal parameter between a0 and a1
#:param step: current time step
#:param total_steps: total numbe of time steps
#:param a0: lower bound of alpha/parameter
#:param a0: upper bound of alpha/parameter
#:return
# - annealed value of alpha/parameter
#"""
fraction = min(float(step) / total_steps, 1.0)
return a0 + fraction * (a1 - a0)
def memory_update(self):
#"""
#This function updates the replay memory with the samples of SA, ES, and PSO (if used)
#then remove the duplicates from the memory
#"""
self.mymemory.add(xvec=tuple(self.x_next), obj=self.E_next, method=['sanext']*len(self.x_next))
self.mymemory.add(xvec=tuple(self.x_best), obj=self.E_best, method=['sabest']*len(self.x_best))
self.mymemory.add(xvec=tuple(self.inds), obj=self.rwd, method=['es']*len(self.inds))
if self.pso_flag:
self.mymemory.add(xvec=tuple(self.pars), obj=self.fits, method=['pso']*len(self.pars))
#self.mymemory.remove_duplicates() #remove all duplicated samples in memory to avoid biased sampling
def resample(self):
#"""
#This function samples data from the memory and prepares the chains for SA
#the population for ES, and the swarm for PSO for the next generation
# -SA: initial guess for the parallel chains are sampled from the memroy
# -ES: a total of ES_MEMORY (or MU) individuals are sampled from the memory and appended to ES population
# -PSO: a total of PSO_MEMORY (or MU) particles are sampled from the memory and appended to PSO swarm
#For SA: x_next and E_next particpate in next generation
#For PSO: swm_next, local_pso_next, and local_fit_next particpate in next generation
#For ES: pop_next particpates in next generation
#"""
es_replay=self.mymemory.sample(batch_size=self.ES_MEMORY,mode=self.MODE,alpha=self.ALPHA)
index=self.MU
for sample in range(self.ES_MEMORY):
self.pop_next[index].append(es_replay[sample][0])
self.pop_next[index].append([random.uniform(self.SMIN,self.SMAX) for _ in range(self.nx)])
self.pop_next[index].append(es_replay[sample][1])
index+=1
if self.pso_flag:
pso_replay=self.mymemory.sample(batch_size=self.PSO_MEMORY,mode=self.MODE,alpha=self.ALPHA)
for key in self.swm_next:
del self.swm_next[key][3:]
index=self.MU
for sample in range(self.PSO_MEMORY):
self.swm_next[index].append(pso_replay[sample][0])
#self.swm_next[index].append([random.uniform(self.SPMIN,self.SPMAX) for _ in range(self.nx)])
self.swm_next[index].append(list(self.v0*np.array(pso_replay[sample][0])))
self.swm_next[index].append(pso_replay[sample][1])
self.local_pos_next.append(pso_replay[sample][0])
self.local_fit_next.append(pso_replay[sample][1])
index+=1
sa_replay=self.mymemory.sample(batch_size=self.SA_MEMORY,mode=self.MODE,alpha=self.ALPHA)
self.x_next, self.E_next=[item[0] for item in sa_replay], [item[1] for item in sa_replay]
def init_guess(self, pop0):
#"""
#This function takes initial guess pop0 and returns initial guesses for SA, PSO, and ES
#to start PESA evolution
#inputs:
# pop0 (dict): dictionary contains initial population to start with for all methods
#returns:
# espop0 (dict): initial population for ES
# swarm0 (dict): initial swarm for PSO
# swm_pos (list), swm_fit (float): initial guess for swarm best position and fitness for PSO
# local_pos (list of lists), local_fit (list): initial guesses for local best position of each particle and their fitness for PSO
# x0 (list of lists), E0 (list): initial input vectors and their initial fitness for SA
#"""
pop0=list(pop0.items())
pop0.sort(key=lambda e: e[1][2], reverse=True)
sorted_sa=dict(pop0[:self.NCORES])
#sorted_dict=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.NCORES]) # sort the initial samples for SA
x0, E0=[sorted_sa[key][0] for key in sorted_sa], [sorted_sa[key][2] for key in sorted_sa] # initial guess for SA
#sorted_pso=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.NPAR]) # sort the initial samples for PSO
#sorted_es=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.LAMBDA]) # sort the initial samples for ES
sorted_pso=dict(pop0[:self.NPAR])
sorted_es=dict(pop0[:self.LAMBDA])
swarm0=defaultdict(list)
espop0=defaultdict(list)
local_pos=[]
local_fit=[]
index=0
for key in sorted_pso:
swarm0[index].append(sorted_pso[key][0])
swarm0[index].append(list(self.v0*np.array(sorted_pso[key][0])))
swarm0[index].append(sorted_pso[key][2])
local_pos.append(sorted_pso[key][0])
local_fit.append(sorted_pso[key][2])
index+=1
swm_pos=swarm0[0][0]
swm_fit=swarm0[0][2]
index=0
for key in sorted_es:
espop0[index].append(sorted_es[key][0])
espop0[index].append(sorted_es[key][1])
espop0[index].append(sorted_es[key][2])
index+=1
return espop0, swarm0, swm_pos, swm_fit, local_pos, local_fit, x0, E0
def printout(self, mode, gen):
#"""
#Print statistics to screen
#inputs:
# mode (int): 1 to print for individual algorathims and 2 to print for PESA
# gen (int): current generation number
#"""
if mode == 1:
print('***********************************************************************************************')
print('############################################################')
print('ES step {}/{}, CX={}, MUT={}, MU={}, LAMBDA={}'.format(self.STEP0-1,self.STEPS, np.round(self.CXPB,2), np.round(self.MUTPB,2), self.MU, self.LAMBDA))
print('############################################################')
print('Statistics for generation {}'.format(gen))
print('Best Fitness:', np.round(
|
np.max(self.rwd)
|
numpy.max
|
"""
Run CGLE example using specified config file.
"""
import int.cgle as cint
import tests
import lpde
import os
import pickle
import shutil
import configparser
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
import utils_cgle
from scipy.spatial.distance import cdist
torch.set_default_dtype(torch.float32)
POINTS_W = 397.48499
plt.set_cmap('plasma')
def integrate_system(config, n, path, verbose=False, n_min=0):
"""Integrate complex Ginzburg-Landau equation."""
pars = {}
pars["c1"] = float(config["c1"])
pars["c2"] = float(config["c2"])
pars["c3"] = float(config["c3"])
pars["mu"] = float(config["mu"])
pars["L"] = float(config["L"])
data_dict = cint.integrate(pars=pars,
dt=float(config["dt"]), N=int(config["N_int"]), T=int(config["T"]),
tmin=float(config["tmin"]), tmax=float(config["tmax"]),
append_init=True)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data_dict["xx"], data_dict["data"][-1].real, label='real')
ax.plot(data_dict["xx"], data_dict["data"][-1].imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('snapshot')
plt.legend()
plt.show()
for i in range(n_min, n):
for p in [0, -1, 1]:
data_perturbed = cint.integrate(pars=pars,
dt=data_dict["dt"], N=data_dict["N"], T=data_dict["T"],
tmin=0, tmax=data_dict["tmax"]-data_dict["tmin"],
ic='manual',
Ainit=data_dict["data"][int(i*int(config["T_off"]))] +
p*float(config["eps"]) *
data_dict["data"][int(i*int(config["T_off"]))],
append_init=True)
data_perturbed["data"] = data_perturbed["data"][:, ::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["xx"] = data_perturbed["xx"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["N"] = int(config["N"])
output = open(path + 'run'+str(i)+'_p_'+str(p)+'.pkl', 'wb')
pickle.dump(data_perturbed, output)
output.close()
def make_plot_paper(config):
"""Plot CGLE simulation results."""
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_dict = pickle.load(pkl_file)
pkl_file.close()
# t_off = 2000
t_off = 0
idxs = np.arange(data_dict["N"])
np.random.shuffle(idxs)
fig = plt.figure(figsize=(POINTS_W/72, 0.9*POINTS_W/72))
ax1 = fig.add_subplot(321)
pl1 = ax1.pcolor(data_dict["xx"], data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax1.set_xlabel('$x$', labelpad=-2)
ax1.set_ylabel('$t$', labelpad=0)
ax1.set_xlim((0, data_dict["L"]))
ax1.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar1 = plt.colorbar(pl1)
cbar1.set_label('Re $W$', labelpad=-3)
ax2 = fig.add_subplot(322)
pl2 = ax2.pcolor(np.arange(data_dict["N"]), data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10, idxs].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax2.set_xlabel('$i$', labelpad=-2)
ax2.set_ylabel('$t$', labelpad=0)
ax2.set_xlim((0, data_dict["N"]))
ax2.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar2 = plt.colorbar(pl2)
cbar2.set_label('Re $W$', labelpad=-3)
ax3 = fig.add_subplot(323)
v_scaled = np.load(config["GENERAL"]["save_dir"]+'/v_scaled.npy')
pl3 = ax3.scatter(np.arange(data_dict["N"]), v_scaled[idxs], s=2, c=data_dict["xx"][idxs],
cmap='plasma')
ax3.set_xlabel('$i$', labelpad=-2)
ax3.set_xlim((0, data_dict["N"]))
ax3.set_ylabel(r'$\phi_1$', labelpad=-3)
cbar3 = plt.colorbar(pl3)
cbar3.set_label('$x$', labelpad=0)
ax4 = fig.add_subplot(324)
pl4 = ax4.pcolor(v_scaled, data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax4.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
ax4.set_xlabel(r'$\phi_1$', labelpad=0)
ax4.set_xlim((-1, 1))
ax4.set_ylabel(r'$t$', labelpad=0)
cbar4 = plt.colorbar(pl4)
cbar4.set_label('Re $W$', labelpad=-3)
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataloader_train = torch.utils.data.DataLoader(
dataset_train, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=int(config["TRAINING"]['batch_size']), shuffle=False,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
num_pars = sum(p.numel() for p in model.net.parameters() if p.requires_grad)
print(num_pars)
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_unperturbed = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(-1)+'.pkl', 'rb')
data_perturbed_neg = pickle.load(pkl_file)
pkl_file.close()
prediction = model.integrate_svd(dataset_test, dataset_train.svd, 0, data_unperturbed["T"])
print("Calculating closest distances....")
dists_neg = cdist(np.append(data_perturbed_neg["data"].real, data_perturbed_neg["data"].imag,
axis=1), np.append(
data_unperturbed["data"].real, data_unperturbed["data"].imag, axis=1))
dists_learned = cdist(np.append(prediction[:, 0], prediction[:, 1], axis=1), np.append(
data_unperturbed["data"].real, data_unperturbed["data"].imag, axis=1))
phi_arr = np.linspace(-1, 1, data_unperturbed["N"])
t_off = 0
ax5 = fig.add_subplot(325)
pl5 = ax5.pcolor(phi_arr, data_unperturbed["tt"][::10]+t_off,
prediction[1::10, 0], vmin=-1, vmax=1,
rasterized=True)
ax5.axvline(x=(phi_arr[3]+phi_arr[4])/2, ymin=0, ymax=1, color='white', lw=1)
ax5.axvline(x=(phi_arr[-4]+phi_arr[-5])/2, ymin=0, ymax=1, color='white', lw=1)
ax5.set_xlabel(r'$\phi_1$', labelpad=0)
ax5.set_ylabel(r'$t$', labelpad=0)
ax5.set_xlim((-1, 1))
ax5.set_ylim((data_unperturbed["tmin"]+t_off, data_unperturbed["tmax"]+t_off))
cbar5 = plt.colorbar(pl5)
cbar5.set_label('Re $W$', labelpad=-3)
ax6 = fig.add_subplot(326)
ax6.plot(data_unperturbed["tt"]+t_off, np.min(dists_neg, axis=1)[:-1], label='$d$ true')
ax6.plot(data_unperturbed["tt"]+t_off,
|
np.min(dists_learned, axis=1)
|
numpy.min
|
# -*- coding: utf-8 -*-
"""
chebpy.etdrk4
=============
ETDRK4 class and related methods.
"""
import numpy as np
from scipy.linalg import expm, expm2, expm3, inv
from scipy.fftpack import dst, fft, ifft, fft2, ifft2, fftn, ifftn
from scipy.io import loadmat, savemat
from chebpy import BC, DIRICHLET, NEUMANN, ROBIN
from chebpy import cheb_D2_mat_dirichlet_dirichlet, cheb_D2_mat_robin_robin
from chebpy import cheb_D2_mat_dirichlet_robin, cheb_D2_mat_robin_dirichlet
__all__ = ['ETDRK4', # ETDRK4 class
'ETDRK4FxCy', # ETDRK4 Fourier x and Chebyshev y
'ETDRK4FxyCz', # ETDRK4 Fourier x, y, and Chebyshev z
'ETDRK4Polar', # ETDRK4 in polar coordinates, Fourier theta and Chebyshev r
'ETDRK4Cylind', # ETDRK4 in cylindrical coordinates, Fourier theta and z and Chebyshev r
'etdrk4_coeff_nondiag', # complex contour integration
'phi_contour_hyperbolic',
'etdrk4_coeff_contour_hyperbolic',
'etdrk4_coeff_scale_square', # scale and square
'etdrk4_scheme_coxmatthews',
'etdrk4_scheme_krogstad',
'etdrk4_coeff_nondiag_krogstad',
'etdrk4_coeff_contour_hyperbolic_krogstad',
'etdrk4_coeff_scale_square_krogstad',
]
class ETDRK4(object):
def __init__(self, Lx, N, Ns, h=None, c=1.0,
lbc=BC(), rbc=BC(), algo=1, scheme=1):
'''
The PDE is
du/dt = cLu - wu
Here c is an constant, L is a linear operator, w is a function.
The defaut left BC and right BC are DBCs.
Test: PASSED, 2012, 2013
:param:Lx: physical size of the 1D spacial grid.
:param:Ns: number of grid points in time.
:param:lbc: left boundary condition.
:type:lbc: class BC
:param:rbc: right boundary condition.
:type:rbc: class BC
:param:h: time step.
:param:save_all: is save all solutions for each time step?
:param:algo: algorithm for calculation of RK4 coefficients.
:param:scheme: RK4 scheme.
'''
self.Lx = Lx
self.N = N
self.Ns = Ns
self.lbc = lbc
self.rbc = rbc
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.c = c
self.algo = algo
self.scheme = scheme
self.update()
def update(self):
self._calc_operator()
self._calc_RK4_coeff()
def _calc_operator(self):
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
D1, L, x = cheb_D2_mat_dirichlet_dirichlet(self.N)
else:
D1, L, x = cheb_D2_mat_dirichlet_robin(self.N,
self.rbc.beta)
else:
if self.rbc.kind == DIRICHLET:
D1, L, x = cheb_D2_mat_robin_dirichlet(self.N,
self.lbc.beta)
else:
D1, L, x = cheb_D2_mat_robin_robin(self.N,
self.lbc.beta,
self.rbc.beta)
self.x = .5 * (x + 1) * self.Lx
self.L = (4. / self.Lx**2) * L # map [0, Lx] onto [-1, 1]
def _calc_RK4_coeff(self):
L = self.c * self.L # the actual operator
h = self.h
c = 1.0
M = 16; R = 15.;
if self.scheme == 0:
if self.algo == 0:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_nondiag(L, h, M, R)
elif self.algo == 1:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_contour_hyperbolic(L, h, M)
elif self.algo == 2:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_scale_square(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
f4 = None; f5 = None; f6 = None
elif self.scheme == 1:
if self.algo == 0:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_nondiag_krogstad(L, h, M, R)
elif self.algo == 1:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_contour_hyperbolic_krogstad(L, h, c, M)
elif self.algo == 2:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_scale_square_krogstad(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
Q = None
else:
raise ValueError('No such ETDRK4 scheme!')
self.E = E
self.E2 = E2
self.Q = Q
self.f1 = f1
self.f2 = f2
self.f3 = f3
self.f4 = f4
self.f5 = f5
self.f6 = f6
def solve(self, w, u0, q=None):
'''
dq/dt = Dq + Wq = Dq - wq
'''
u = u0.copy(); u.shape = (u.size, 1)
E = self.E; E2 = self.E2; Q = self.Q
f1 = self.f1; f2 = self.f2; f3 = self.f3
f4 = self.f4; f5 = self.f5; f6 = self.f6
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
v = u[1:-1]
W = -w[1:-1]; W.shape = (W.size, 1)
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,1:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,1:-1])
else:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[1:-1] = v
else:
v = u[:-1]
W = -w[:-1]; W.shape = (W.size, 1)
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:-1])
else:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:-1] = v
else:
if self.rbc.kind == DIRICHLET:
v = u[1:]
W = -w[1:]; W.shape = (W.size, 1)
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,1:])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,1:])
else:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[1:] = v
else:
v = u
W = -w; W.shape = (W.size, 1)
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3, q)
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q)
else:
v = etdrk4_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u = v
return (u, self.x)
class ETDRK4FxCy(object):
def __init__(self, Lx, Ly, Nx, Ny, Ns, h=None, c=1.0,
lbc=BC(), rbc=BC(), algo=1, scheme=1):
'''
The PDE is in 2D,
du/dt = cLu - wu
where u=u(x,y), L=d^2/dx^2 + d^2/dy^2, w=w(x,y), c is a constant.
First, do a FFT in x direction to obtain
du(kx,y)/dt = c L u(kx,y) - Fx[w(x,y)u(x,y)]
where L = D^2 - kx^2, with D^2 the Chebyshev 2-nd order differential matrix,
and kx^2 the d^2/dx^2 in Fourier space, see detail in
the Notebook (page 2013.8.2).
The defaut left BC and right BC are DBCs.
Test: PASSED 2013.08.09.
Note: Cox-Matthews scheme not tested.
:param:Lx: physical size of the 1D spacial grid.
:param:Lx: physical size of the 1D spacial grid.
:param:Ns: number of grid points in time.
:param:lbc: left boundary condition.
:type:lbc: class BC
:param:rbc: right boundary condition.
:type:rbc: class BC
:param:h: time step.
:param:save_all: is save all solutions for each time step?
:param:algo: algorithm for calculation of RK4 coefficients.
:param:scheme: RK4 scheme.
'''
self.Lx = Lx
self.Ly = Ly
self.Nx = Nx
self.Ny = Ny
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.c = c
self.lbc = lbc
self.rbc = rbc
self.algo = algo
self.scheme = scheme
self.update()
def update(self):
Nx = self.Nx
L = self._calc_operator() # the shape of coeff depends on BC
N, N = L.shape
I = np.eye(N)
dim = [Nx, N, N]
self.E = np.zeros(dim)
self.E2 = np.zeros(dim)
self.Q = np.zeros(dim)
self.f1 = np.zeros(dim)
self.f2 = np.zeros(dim)
self.f3 = np.zeros(dim)
self.f4 = np.zeros(dim)
self.f5 = np.zeros(dim)
self.f6 = np.zeros(dim)
for i in xrange(Nx):
if i < Nx/2+1:
kx = i * (2 * np.pi / self.Lx)
else:
kx = (i - Nx) * (2 * np.pi / self.Lx)
k2 = kx**2
#L = self._calc_operator(k2)
self._calc_RK4_coeff(i, L-k2*I)
def _calc_operator(self):
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
D1, L, y = cheb_D2_mat_dirichlet_dirichlet(self.Ny)
else:
D1, L, y = cheb_D2_mat_dirichlet_robin(self.Ny,
self.rbc.beta)
else:
if self.rbc.kind == DIRICHLET:
D1, L, y = cheb_D2_mat_robin_dirichlet(self.Ny,
self.lbc.beta)
else:
D1, L, y = cheb_D2_mat_robin_robin(self.Ny,
self.lbc.beta,
self.rbc.beta)
self.y = .5 * (y + 1) * self.Ly
L = (4. / self.Ly**2) * L # map [0, Lx] onto [-1, 1]
return L
def _calc_RK4_coeff(self, i, L):
L = self.c * L # the actual operator
h = self.h
c = 1.0
M = 32; R = 15.;
if self.scheme == 0:
if self.algo == 0:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_nondiag(L, h, M, R)
elif self.algo == 1:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_contour_hyperbolic(L, h, M)
elif self.algo == 2:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_scale_square(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i] = E[:,:]
self.E2[i] = E2[:,:]
self.Q[i] = Q[:,:]
self.f1[i] = f1[:,:]
self.f2[i] = f2[:,:]
self.f3[i] = f3[:,:]
f4 = None; f5 = None; f6 = None
elif self.scheme == 1:
if self.algo == 0:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_nondiag_krogstad(L, h, M, R)
elif self.algo == 1:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_contour_hyperbolic_krogstad(L, h, c, M)
elif self.algo == 2:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_scale_square_krogstad(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i] = E[:,:]
self.E2[i] = E2[:,:]
Q = None
self.f1[i] = f1[:,:]
self.f2[i] = f2[:,:]
self.f3[i] = f3[:,:]
self.f4[i] = f4[:,:]
self.f5[i] = f5[:,:]
self.f6[i] = f6[:,:]
else:
raise ValueError('No such ETDRK4 scheme!')
def solve(self, w, u0, q=None):
'''
w = w(x,y)
u0 = q(x,y,t=0)
q = q(x,y,t)
'''
u = u0.copy()
E = self.E; E2 = self.E2; Q = self.Q
f1 = self.f1; f2 = self.f2; f3 = self.f3
f4 = self.f4; f5 = self.f5; f6 = self.f6
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
v = u[:,1:-1]
W = -w[:,1:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,1:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,1:-1])
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,1:-1] = v
else:
v = u[:,:-1]
W = -w[:,:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,:-1])
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:-1] = v
else:
if self.rbc.kind == DIRICHLET:
v = u[:,1:]
W = -w[:,1:]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,1:])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,1:])
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,1:] = v
else:
v = u
W = -w
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3, q)
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q)
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u = v
return u
class ETDRK4FxyCz(object):
def __init__(self, Lx, Ly, Lz, Nx, Ny, Nz, Ns, h=None, c=1.0,
lbc=BC(), rbc=BC(), algo=1, scheme=1):
'''
The PDE is in 3D,
du/dt = cLu - wu
where u=u(t,x,y,z), L=d^2/dx^2 + d^2/dy^2, w=w(x,y,z), c is a constant.
First, do a FFT in x and y direction to obtain
du(kx,ky,z)/dt = c L u(kx,ky,z) - Fxy[w(x,y,z)u(t,x,y,z)]
where L = D^2 - (kx^2 + ky^2), with D^2 the Chebyshev 2-nd order
differential matrix with appropriate boundary conditions,
and -kx^2 and -ky^2 are d^2/dx^2 and d^2/dy^2 in Fourier space, see
detail in the Notebook (page 2013.8.2).
The defaut left BC and right BC are DBCs.
Test: None
:param:Lx: physical size of the 1D spacial grid.
:param:Lx: physical size of the 1D spacial grid.
:param:Ns: number of grid points in time.
:param:lbc: left boundary condition.
:type:lbc: class BC
:param:rbc: right boundary condition.
:type:rbc: class BC
:param:h: time step.
:param:save_all: is save all solutions for each time step?
:param:algo: algorithm for calculation of RK4 coefficients.
:param:scheme: RK4 scheme.
'''
self.Lx = Lx
self.Ly = Ly
self.Lz = Lz
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.c = c
self.lbc = lbc
self.rbc = rbc
self.algo = algo
self.scheme = scheme
self.update()
def update(self):
Nx = self.Nx
Ny = self.Ny
L = self._calc_operator() # the shape of coeff depends on BC
N, N = L.shape # N may be different than Nz+1 because of DBC
I = np.eye(N)
dim = [Nx, Ny, N, N]
self.E = np.zeros(dim)
self.E2 = np.zeros(dim)
self.Q = np.zeros(dim)
self.f1 = np.zeros(dim)
self.f2 = np.zeros(dim)
self.f3 = np.zeros(dim)
self.f4 = np.zeros(dim)
self.f5 = np.zeros(dim)
self.f6 = np.zeros(dim)
for i in xrange(Nx):
for j in xrange(Ny):
if i < Nx/2+1:
kx = i * (2 * np.pi / self.Lx)
else:
kx = (i - Nx) * (2 * np.pi / self.Lx)
if j < Ny/2+1:
ky = j * (2 * np.pi / self.Ly)
else:
ky = (j - Ny) * (2 * np.pi / self.Ly)
k2 = kx**2 + ky**2
#L = self._calc_operator(k2)
self._calc_RK4_coeff(i, j, L-k2*I)
def _calc_operator(self):
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
D1, L, z = cheb_D2_mat_dirichlet_dirichlet(self.Nz)
else:
D1, L, z = cheb_D2_mat_dirichlet_robin(self.Nz,
self.rbc.beta)
else:
if self.rbc.kind == DIRICHLET:
D1, L, z = cheb_D2_mat_robin_dirichlet(self.Nz,
self.lbc.beta)
else:
D1, L, z = cheb_D2_mat_robin_robin(self.Nz,
self.lbc.beta,
self.rbc.beta)
self.z = .5 * (z + 1) * self.Lz
L = (4. / self.Lz**2) * L # map [0, Lz] onto [-1, 1]
return L
def _calc_RK4_coeff(self, i, j, L):
L = self.c * L # the actual operator
h = self.h
c = 1.0
M = 32; R = 15.;
if self.scheme == 0:
if self.algo == 0:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_nondiag(L, h, M, R)
elif self.algo == 1:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_contour_hyperbolic(L, h, M)
elif self.algo == 2:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_scale_square(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i,j] = E[:,:]
self.E2[i,j] = E2[:,:]
self.Q[i,j] = Q[:,:]
self.f1[i,j] = f1[:,:]
self.f2[i,j] = f2[:,:]
self.f3[i,j] = f3[:,:]
f4 = None; f5 = None; f6 = None
elif self.scheme == 1:
if self.algo == 0:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_nondiag_krogstad(L, h, M, R)
elif self.algo == 1:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_contour_hyperbolic_krogstad(L, h, c, M)
elif self.algo == 2:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_scale_square_krogstad(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i,j] = E[:,:]
self.E2[i,j] = E2[:,:]
Q = None
self.f1[i,j] = f1[:,:]
self.f2[i,j] = f2[:,:]
self.f3[i,j] = f3[:,:]
self.f4[i,j] = f4[:,:]
self.f5[i,j] = f5[:,:]
self.f6[i,j] = f6[:,:]
else:
raise ValueError('No such ETDRK4 scheme!')
def solve(self, w, u0, q=None):
'''
w = w(x,y,z)
u0 = q(t=0,x,y,z)
q = q(t,x,y,z)
'''
u = u0.copy()
E = self.E; E2 = self.E2; Q = self.Q
f1 = self.f1; f2 = self.f2; f3 = self.f3
f4 = self.f4; f5 = self.f5; f6 = self.f6
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
v = u[:,:,1:-1]
W = -w[:,:,1:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,:,1:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,:,1:-1])
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:,1:-1] = v
else:
v = u[:,:,:-1]
W = -w[:,:,:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,:,:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,:,:-1])
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:,:-1] = v
else:
if self.rbc.kind == DIRICHLET:
v = u[:,:,1:]
W = -w[:,:,1:]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,:,1:])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,:,1:])
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:,1:] = v
else:
v = u
W = -w
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3, q)
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q)
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u = v
return u
class ETDRK4Polar(object):
def __init__(self, R, Nr, Nt, Ns, h=None, c=1.0,
lbc=BC(), rbc=BC(), algo=1, scheme=1):
'''
The PDE is in the polar coordinate,
du/dt = cLu - wu
where u=u(r,theta), L=d^2/dr^2 + (1/r)d/dr + (1/r^2)d^2/dtheta^2,
w=w(r,theta), c is a constant. Domain is
theta [0, 2*pi]
r [0, R]
First, do a FFT in theta axis to obtain
du(r,kt)/dt = c L u(r,kt) - Ft[w(r,theta)u(r,theta)]
where L = d^2/dr^2 + (1/r)d/dr - (1/r^2)kt^2*I
See details in the Notebook (page 2013.8.15).
The defaut left BC and right BC are RBCs.
Test: PASSED 2013.8.15.
:param:R: physical size of the disk.
:param:Nr: r axis discretization, 0, 1, 2, ..., Nr. Nr must be ODD.
:param:Nt: theta axis discretization, 0, 1, 2, ..., Nt-1
:param:Ns: number of grid points in time.
:param:lbc: left boundary condition.
:type:lbc: class BC
:param:rbc: right boundary condition.
:type:rbc: class BC
:param:h: time step.
:param:algo: algorithm for calculation of RK4 coefficients.
:param:scheme: RK4 scheme.
'''
self.R = R
self.Nr = Nr
self.Nt = Nt
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.c = c
self.lbc = lbc
self.rbc = rbc
self.algo = algo
self.scheme = scheme
self.update()
def update(self):
Nt = self.Nt
L = self._calc_operator() # the shape of coeff depends on BC
N, N = L.shape
dim = [Nt, N, N]
self.E = np.zeros(dim)
self.E2 = np.zeros(dim)
self.Q = np.zeros(dim)
self.f1 = np.zeros(dim)
self.f2 = np.zeros(dim)
self.f3 = np.zeros(dim)
self.f4 = np.zeros(dim)
self.f5 = np.zeros(dim)
self.f6 = np.zeros(dim)
for i in xrange(Nt):
if i < Nt/2+1:
kt = i
else:
kt = i - Nt
# R**(-2) for maping from [0,R] to [0,1]
Lk = (L - np.diag((kt/self.r)**2)) / self.R**2
self._calc_RK4_coeff(i, Lk)
def _calc_operator(self):
'''
Currently, only symmetric boundary conditions are allowed, that is
DBC-DBC
RBC-RBC (including the special case NBC)
'''
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
D1t, D2t, r = cheb_D2_mat_dirichlet_dirichlet(self.Nr)
r = r[1:-1]
else:
D1t, D2t, r = cheb_D2_mat_dirichlet_robin(self.Nr,
self.rbc.beta)
r = r[:-1]
else:
if self.rbc.kind == DIRICHLET:
D1t, D2t, r = cheb_D2_mat_robin_dirichlet(self.Nr,
self.lbc.beta)
r = r[1:]
else:
D1t, D2t, r = cheb_D2_mat_robin_robin(self.Nr,
self.lbc.beta,
self.rbc.beta)
N, N = D2t.shape # N should be either Nr+1 or Nr-1
self.r = r[:N/2].reshape(N/2) # reshape to vector
D1 = D2t[:N/2,:N/2]
D2 = D2t[:N/2,N-1:N/2-1:-1]
E1 = D1t[:N/2,:N/2]
E2 = D1t[:N/2,N-1:N/2-1:-1]
MR = np.diag(1/self.r)
#print self.r.shape, D1.shape, D2.shape, E1.shape, E2.shape, MR.shape
L = (D1 + D2) + (np.dot(MR,E1) + np.dot(MR,E2))
return L
def _calc_RK4_coeff(self, i, L):
L = self.c * L # the actual operator
h = self.h
c = 1.0
M = 32; R = 15.;
if self.scheme == 0:
if self.algo == 0:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_nondiag(L, h, M, R)
elif self.algo == 1:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_contour_hyperbolic(L, h, M)
elif self.algo == 2:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_scale_square(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i] = E[:,:]
self.E2[i] = E2[:,:]
self.Q[i] = Q[:,:]
self.f1[i] = f1[:,:]
self.f2[i] = f2[:,:]
self.f3[i] = f3[:,:]
f4 = None; f5 = None; f6 = None
elif self.scheme == 1:
if self.algo == 0:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_nondiag_krogstad(L, h, M, R)
elif self.algo == 1:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_contour_hyperbolic_krogstad(L, h, c, M)
elif self.algo == 2:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_scale_square_krogstad(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i] = E[:,:]
self.E2[i] = E2[:,:]
Q = None
self.f1[i] = f1[:,:]
self.f2[i] = f2[:,:]
self.f3[i] = f3[:,:]
self.f4[i] = f4[:,:]
self.f5[i] = f5[:,:]
self.f6[i] = f6[:,:]
else:
raise ValueError('No such ETDRK4 scheme!')
def solve(self, w, u0, q=None):
'''
w = w(theta, r)
u0 = q(theta, r, t=0)
q = q(theta, r, t)
for r in (0, R] and t in [0,1].
Discretization form:
w(i,j), u0(i,j), q(i,j,t)
i in [0, Nt-1]
j in [0, (Nr+1)/2]
t in [0, Ns]
The particular order of theta, r is chosen to be compatible with
etdrk4fxcy_scheme_krogstad
which perform FFT in first dimension.
'''
u = u0.copy()
E = self.E; E2 = self.E2; Q = self.Q
f1 = self.f1; f2 = self.f2; f3 = self.f3
f4 = self.f4; f5 = self.f5; f6 = self.f6
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
v = u[:,1:-1]
W = -w[:,1:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,1:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,1:-1])
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,1:-1] = v
else: # not allowed in current implementation.
v = u[:,:-1]
W = -w[:,:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,:-1])
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:-1] = v
else: # not allowed in current implementation.
if self.rbc.kind == DIRICHLET:
v = u[:,1:]
W = -w[:,1:]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,1:])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,1:])
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,1:] = v
else:
v = u
W = -w
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3, q)
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q)
else:
v = etdrk4fxcy_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u = v
return u
class ETDRK4Cylind(object):
def __init__(self, R, Lz, Nr, Nt, Nz, Ns, h=None, c=1.0,
lbc=BC(), rbc=BC(), algo=1, scheme=1):
'''
The PDE is in the cylindrical coordinate,
du/dt = cLu - wu
where u=u(r,theta, z), L=d^2/dr^2 + (1/r)d/dr + (1/r^2)d^2/dtheta^2 +
d^2/dz^2,
w=w(r,theta, z), c is a constant. Domain is
theta [0, 2*pi]
r [0, R]
z [0, Lz]
First, do FFT in theta and z axes to obtain
du(r,kt, kz)/dt = c L u(r,kt, kz) - Ftz[w(r,theta, z)u(r,theta, z)]
where L = d^2/dr^2 + (1/r)d/dr - (1/r^2)kt^2 - kz^2
See details in the Notebook (page 2013.8.16).
The defaut left BC and right BC are RBCs.
Test: None.
:param:R: physical size of the radius of the cylinder.
:param:Lz: physical size of the length of the cylinder.
:param:Nr: r axis discretization, 0, 1, 2, ..., Nr. Nr must be ODD.
:param:Nt: theta axis discretization, 0, 1, 2, ..., Nt-1
:param:Nz: z axis discretization, 0, 1, 2, ..., Nz-1
:param:Ns: number of grid points in time.
:param:lbc: left boundary condition.
:type:lbc: class BC
:param:rbc: right boundary condition.
:type:rbc: class BC
:param:h: time step.
:param:algo: algorithm for calculation of RK4 coefficients.
:param:scheme: RK4 scheme.
'''
self.R = R
self.Lz = Lz
self.Nr = Nr
self.Nt = Nt
self.Nz = Nz
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.c = c
self.lbc = lbc
self.rbc = rbc
self.algo = algo
self.scheme = scheme
self.update()
def update(self):
Nt = self.Nt
Nz = self.Nz
L = self._calc_operator() # the shape of coeff depends on BC
N, N = L.shape
I = np.eye(N)
dim = [Nt, Nz, N, N]
self.E = np.zeros(dim)
self.E2 = np.zeros(dim)
self.Q = np.zeros(dim)
self.f1 = np.zeros(dim)
self.f2 = np.zeros(dim)
self.f3 = np.zeros(dim)
self.f4 = np.zeros(dim)
self.f5 = np.zeros(dim)
self.f6 = np.zeros(dim)
for i in xrange(Nt):
for j in xrange(Nz):
if i < Nt/2+1:
kt = i
else:
kt = i - Nt
if j < Nz/2+1:
kz = j
else:
kz = j - Nz
# R**(-2) for maping from [0,R] to [0,1]
Lk = (L-np.diag((kt/self.r)**2))/self.R**2
Lk -= I*kz**2/self.Lz**2
self._calc_RK4_coeff(i, j, Lk)
def _calc_operator(self):
'''
Currently, only symmetric boundary conditions are allowed, that is
DBC-DBC
RBC-RBC (including the special case NBC)
'''
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
D1t, D2t, r = cheb_D2_mat_dirichlet_dirichlet(self.Nr)
r = r[1:-1]
else:
D1t, D2t, r = cheb_D2_mat_dirichlet_robin(self.Nr,
self.rbc.beta)
r = r[:-1]
else:
if self.rbc.kind == DIRICHLET:
D1t, D2t, r = cheb_D2_mat_robin_dirichlet(self.Nr,
self.lbc.beta)
r = r[1:]
else:
D1t, D2t, r = cheb_D2_mat_robin_robin(self.Nr,
self.lbc.beta,
self.rbc.beta)
N, N = D2t.shape # N should be either Nr+1 or Nr-1
self.r = r[:N/2].reshape(N/2) # reshape to vector
D1 = D2t[:N/2,:N/2]
D2 = D2t[:N/2,N-1:N/2-1:-1]
E1 = D1t[:N/2,:N/2]
E2 = D1t[:N/2,N-1:N/2-1:-1]
MR = np.diag(1/self.r)
#print self.r.shape, D1.shape, D2.shape, E1.shape, E2.shape, MR.shape
L = (D1 + D2) + (np.dot(MR,E1) + np.dot(MR,E2))
return L
def _calc_RK4_coeff(self, i, j, L):
L = self.c * L # the actual operator
h = self.h
c = 1.0
M = 32; R = 15.;
if self.scheme == 0:
if self.algo == 0:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_nondiag(L, h, M, R)
elif self.algo == 1:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_contour_hyperbolic(L, h, M)
elif self.algo == 2:
E, E2, Q, f1, f2, f3 = \
etdrk4_coeff_scale_square(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i,j] = E[:,:]
self.E2[i,j] = E2[:,:]
self.Q[i,j] = Q[:,:]
self.f1[i,j] = f1[:,:]
self.f2[i,j] = f2[:,:]
self.f3[i,j] = f3[:,:]
f4 = None; f5 = None; f6 = None
elif self.scheme == 1:
if self.algo == 0:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_nondiag_krogstad(L, h, M, R)
elif self.algo == 1:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_contour_hyperbolic_krogstad(L, h, c, M)
elif self.algo == 2:
E, E2, f1, f2, f3, f4, f5, f6 = \
etdrk4_coeff_scale_square_krogstad(L, h)
else:
raise ValueError('No such ETDRK4 coefficient algorithm!')
self.E[i,j] = E[:,:]
self.E2[i,j] = E2[:,:]
Q = None
self.f1[i,j] = f1[:,:]
self.f2[i,j] = f2[:,:]
self.f3[i,j] = f3[:,:]
self.f4[i,j] = f4[:,:]
self.f5[i,j] = f5[:,:]
self.f6[i,j] = f6[:,:]
else:
raise ValueError('No such ETDRK4 scheme!')
def solve(self, w, u0, q=None):
'''
w = w(theta, z, r)
u0 = q(theta, z, r, t=0)
q = q(theta, z, r, t)
for r in (0, R], z in [0, Lz], theta in [0, 2pi], and t in [0,1].
Discretization form:
w(i,j, k), u0(i,j, k), q(i,j,k,t)
i in [0, Nt-1]
j in [0, Nz-1]
k in [0, (Nr+1)/2]
t in [0, Ns]
The particular order of theta, z, r is chosen to be compatible with
etdrk4fxycz_scheme_krogstad
which perform FFT in first two dimensions.
'''
u = u0.copy()
E = self.E; E2 = self.E2; Q = self.Q
f1 = self.f1; f2 = self.f2; f3 = self.f3
f4 = self.f4; f5 = self.f5; f6 = self.f6
if self.lbc.kind == DIRICHLET:
if self.rbc.kind == DIRICHLET:
v = u[:,:,1:-1]
W = -w[:,:,1:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,1:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,1:-1])
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:,1:-1] = v
else: # not allowed in current implementation.
v = u[:,:,:-1]
W = -w[:,:,:-1]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,:-1])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,:-1])
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:,:-1] = v
else: # not allowed in current implementation.
if self.rbc.kind == DIRICHLET:
v = u[:,:,1:]
W = -w[:,:,1:]
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v, E, E2,
Q, f1, f2, f3, q[:,:,1:])
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q[:,:,1:])
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u[:,:,1:] = v
else:
v = u
W = -w
if self.scheme == 0:
if q is not None:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3, q)
else:
v = etdrk4_scheme_coxmatthews(self.Ns, W, v,
E, E2, Q, f1, f2, f3)
else:
if q is not None:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6, q)
else:
v = etdrk4fxycz_scheme_krogstad(self.Ns, W, v,
E, E2, f1, f2, f3,
f4, f5, f6)
u = v
return u
def etdrk4_coeff_nondiag(L, h, M=32, R=1.0):
'''
Evaluate the coefficients Q, f1, f2, f3 of ETDRK4 for
non-diagonal case.
'''
A = h * L
N, N = L.shape
I = np.eye(N)
E = expm(A)
E2 = expm(A/2)
theta = np.linspace(.5/M, 1-.5/M, M) * np.pi
r = R * np.exp(1j * theta)
Z = 1j * np.zeros((N, N))
f1 = Z.copy(); f2 = Z.copy(); f3 = Z.copy(); Q = Z.copy()
for j in xrange(M):
z = r[j]
zIA = inv(z * I - A)
zIAz2 = zIA / z**2
Q += zIA * (np.exp(z/2) - 1)
f1 += zIAz2 * (-4 - z + np.exp(z) * (4 - 3*z + z**2))
f2 += zIAz2 * (2 + z + np.exp(z) * (z - 2))
f3 += zIAz2 * (-4 - 3*z - z*z + np.exp(z) * (4 - z))
f1 = (h/M) * np.real(f1)
f2 = 2 * (h/M) * np.real(f2)
f3 = (h/M) * np.real(f3)
Q = (h/M) * np.real(Q)
return (E, E2, Q, f1, f2, f3)
def etdrk4_coeff_nondiag_krogstad(L, h, M=32, R=1.0):
pass
def phi_contour_hyperbolic_old(z, l=0, M=32):
'''
Evaluate phi_l(h*L) using complex contour integral methods with hyperbolic contour.
phi_l(z) = [phi_{l-1}(z) - phi_{l-1}(0)] / z, with
phi_0(z) = exp(z)
For example:
phi_1(z) = [exp(z) - 1] / z
phi_2(z) = [exp(z) - z - 1] / z^2
phi_3(z) = [exp(z) - z^2/2 - z - 1] / z^3
'''
N, N = z.shape
I = np.eye(N)
phi = 1j * np.zeros((N,N))
#theta = np.pi * (2. * np.arange(M+1) / M - 1.)
theta = np.pi * ((2. * np.arange(M) + 1) / M - 1.)
u = 1.0818 * theta / np.pi
mu = 0.5 * 4.4921 * M
alpha = 1.1721
s = mu * (1 - np.sin(alpha - u*1j))
v = np.cos(alpha - u*1j)
if l == 0:
c = np.exp(s) * v
else:
c = np.exp(s) * v / (s)**l
for k in np.arange(M):
sIA = inv(s[k] * I - z)
phi += c[k] * sIA
return np.real((0.5 * 4.4921 * 1.0818 / np.pi) * phi)
def phi_contour_hyperbolic(A, t=1, l=0, M=16):
'''
Evaluate \phi_l(tA) using complex contour integral methods with hyperbolic contour.
See my Notebook page 2013.07.05.
phi_l(z) = [phi_{l-1}(z) - phi_{l-1}(0)] / z, with
phi_0(z) = exp(z)
For example:
phi_1(z) = [exp(z) - 1] / z
phi_2(z) = [exp(z) - z - 1] / z^2
phi_3(z) = [exp(z) - z^2/2 - z - 1] / z^3
Note:
Here M=16 shall be optimal. Large M cause error saturates or increases.
2014.5.26, from experimental studies and REF 1&2.
REF:
1. <NAME>.; <NAME>. Electronic Transaction on Numerical
Analysis, 2007, 29, 1-18.
2. <NAME>.; <NAME>. Mathematics of Computation,
2007, 76, 1341-1356.
'''
N, N = A.shape
I =
|
np.eye(N)
|
numpy.eye
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import ClassyVirtualReferencePoint as ClassyVirtualReferencePoint
import ransac
#imports from AoisDefiner
import tkinter as tk
from tkinter import Label,Tk
from PIL import Image, ImageTk
import PIL.Image
import json
from tkinter import messagebox
from tkinter import filedialog
from tkinter import *
import csv
from pyModelChecking.LTL import *
from pyModelChecking import *
import time
# set doTraining = False to display debug graphics:
# You should do this first. There should be a green line from your
# forehead to one pupil; the end of the line is the estimate of pupil position. The blue
# circles should generally track your pupils, though less reliably than the green line.
# If performance is bad, you can tweak the "TUNABLE PARAMETER" lines. (This is a big
# area where improvement is needed; probably some learning of parameters.)
# Set True to run the main program:
# You click where you're looking, and, after around 10-20 such clicks,
# the program will learn the correspondence and start drawing a blue blur
# where you look. It's important to keep your head still (in position AND angle)
# or it won't work.
doTraining = False
aois = []
def featureCenter(f):
return (.5*(f.mExtents[0]+f.mExtents[1]),.5*(f.mExtents[2]+f.mExtents[3]) )
# returns center in form (y,x)
def featureCenterXY(rect):
#eyes are arrays of the form [minX, minY, maxX, maxY]
return (.5*(rect[0]+rect[2]), .5*(rect[1]+rect[3]))
def centeredBox(feature1, feature2, boxWidth, boxHeight, yOffsetToAdd = 0):
f1 = np.array(featureCenterXY(feature1))
f2 = np.array(featureCenterXY(feature2))
center = (f1[:]+f2[:])/2
center[1] += yOffsetToAdd
offset = np.array([boxWidth/2,boxHeight/2])
return np.concatenate( (center-offset, center+offset) )
def contains(outerFeature, innerFeature):
p = featureCenterXY(innerFeature)
#eyes are arrays of the form [minX, minY, maxX, maxY]
return p[0] > outerFeature[0] and p[0] < outerFeature[2] and p[1] > outerFeature[1] and p[1] < outerFeature[3]
def containsPoint(outerFeature, p):
#eyes are arrays of the form [minX, minY, maxX, maxY]
return p[0] > outerFeature[0] and p[0] < outerFeature[2] and p[1] > outerFeature[1] and p[1] < outerFeature[3]
# Takes an ndarray of face rects, and an ndarray of eye rects.
# Returns the first eyes that are inside the face but not inside each other.
# Eyes are returned as the tuple (leftEye, rightEye)
def getLeftAndRightEyes(faces, eyes):
#loop through detected faces. We'll do our processing on the first valid one.
if len(eyes)==0:
return ()
for face in faces:
for i in range(eyes.shape[0]):
for j in range(i+1,eyes.shape[0]):
leftEye = eyes[i] #by left I mean camera left
rightEye = eyes[j]
#eyes are arrays of the form [minX, minY, maxX, maxY]
if (leftEye[0]+leftEye[2]) > (rightEye[0]+rightEye[2]): #leftCenter is > rightCenter
rightEye, leftEye = leftEye, rightEye #swap
if contains(leftEye,rightEye) or contains(rightEye, leftEye):#they overlap. One eye containing another is due to a double detection; ignore it
debugPrint('rejecting double eye')
continue
if leftEye[3] < rightEye[1] or rightEye[3] < leftEye[1]:#top of one is below (>) bottom of the other. One is likely a mouth or something, not an eye.
debugPrint('rejecting non-level eyes')
continue
## if leftEye.minY()>face.coordinates()[1] or rightEye.minY()>face.coordinates()[1]: #top of eyes in top 1/2 of face
## continue;
if not (contains(face,leftEye) and contains(face,rightEye)):#face contains the eyes. This is our standard of humanity, so capture the face.
debugPrint("face doesn't contain both eyes")
continue
return (leftEye, rightEye)
return ()
verbose=True
def debugPrint(s):
if verbose:
print(s)
showMainImg=True;
def debugImg(arr):
global showMainImg
showMainImg=False;
toShow = cv2.resize((arr-arr.min())*(1.0/(arr.max()-arr.min())),(0,0), fx=8,fy=8,interpolation=cv2.INTER_NEAREST)
cv2.imshow(WINDOW_NAME,toShow)
# displays data that is stored in a sparse format. Uses the coords to draw the corresponding
# element of the vector, on a blank image of dimensions shapeToCopy
def debugImgOfVectors(vectorToShow, gradXcoords, gradYcoords, shapeToCopy):
img = np.zeros(shapeToCopy)
for i,gradXcoord in enumerate(gradXcoords):
img[gradYcoords[i]][gradXcoord] = vectorToShow[i]
debugImg(img)
BLOWUP_FACTOR = 1 # Resizes image before doing the algorithm. Changing to 2 makes things really slow. So nevermind on this.
RELEVANT_DIST_FOR_CORNER_GRADIENTS = 8*BLOWUP_FACTOR
dilationWidth = 1+2*BLOWUP_FACTOR #must be an odd number
dilationHeight = 1+2*BLOWUP_FACTOR #must be an odd number
dilationKernel = np.ones((dilationHeight,dilationWidth),'uint8')
writeEyeDebugImages = False #enable to export image files showing pupil center probability
eyeCounter = 0
# Returns (cy,cx) of the pupil center, where y is down and x is right. You should pass in a grayscale Cv2 image which
# is closely cropped around the center of the eye (using the Haar cascade eye detector)
def getPupilCenter(gray, getRawProbabilityImage=False):
## (scleraY, scleraX) = np.unravel_index(gray.argmax(),gray.shape)
## scleraColor = colors[scleraY,scleraX,:]
## img[scleraX,scleraY] = (255,0,0)
## img.colorDistance(skinColor[:]).save(disp)
## img.edges().save(disp)
## print skinColor, scleraColor
gray = gray.astype('float32')
if BLOWUP_FACTOR != 1:
gray = cv2.resize(gray, (0,0), fx=BLOWUP_FACTOR, fy=BLOWUP_FACTOR, interpolation=cv2.INTER_LINEAR)
IRIS_RADIUS = gray.shape[0]*.75/2 #conservative-large estimate of iris radius TODO: make this a tracked parameter--pass a prior-probability of radius based on last few iris detections. TUNABLE PARAMETER
#debugImg(gray)
dxn = cv2.Sobel(gray,cv2.CV_32F,1,0,ksize=3) #optimization opportunity: blur the image once, then just subtract 2 pixels in x and 2 in y. Should be equivalent.
dyn = cv2.Sobel(gray,cv2.CV_32F,0,1,ksize=3)
magnitudeSquared = np.square(dxn)+np.square(dyn)
# ########### Pupil finding
magThreshold = magnitudeSquared.mean()*.6 #only retain high-magnitude gradients. <-- VITAL TUNABLE PARAMETER
# The value of this threshold is critical for good performance.
# todo: adjust this threshold using more images. Maybe should train our tuned parameters.
# form a bool array, unrolled columnwise, which can index into the image.
# we will only use gradients whose magnitude is above the threshold, and
# (optionally) where the gradient direction meets characteristics such as being more horizontal than vertical.
gradsTouse = (magnitudeSquared>magThreshold) & (np.abs(4*dxn)>np.abs(dyn))
lengths = np.sqrt(magnitudeSquared[gradsTouse]) #this converts us to double format
gradDX = np.divide(dxn[gradsTouse],lengths) #unrolled columnwise
gradDY = np.divide(dyn[gradsTouse],lengths)
## debugImg(gradsTouse*255)
## ksize = 7 #kernel size = x width and y height of the filter
## sigma = 4
## blurredGray = cv2.GaussianBlur(gray, (ksize,ksize), sigma, borderType=cv2.BORDER_REPLICATE)
## debugImg(gray)
## blurredGray = cv2.blur(gray, (ksize,ksize)) #x width and y height. TODO: try alternately growing and eroding black instead of blurring?
#isDark = blurredGray < blurredGray.mean()
isDark = gray< (gray.mean()*.8) #<-- TUNABLE PARAMETER
global dilationKernel
isDark = cv2.dilate(isDark.astype('uint8'), dilationKernel) #dilate so reflection goes dark too
## isDark = cv2.erode(isDark.astype('uint8'), dilationKernel)
## debugImg(isDark*255)
gradXcoords =np.tile( np.arange(dxn.shape[1]), [dxn.shape[0], 1])[gradsTouse] # build arrays holding the original x,y position of each gradient in the list.
gradYcoords =np.tile( np.arange(dxn.shape[0]), [dxn.shape[1], 1]).T[gradsTouse] # These lines are probably an optimization target for later.
minXForPupil = 0 #int(dxn.shape[1]*.3)
## #original method
## centers = np.array([[phi(cx,cy,gradDX,gradDY,gradXcoords,gradYcoords) if isDark[cy][cx] else 0 for cx in range(dxn.shape[1])] for cy in range(dxn.shape[0])])
#histogram method
centers = np.array([[phiWithHist(cx,cy,gradDX,gradDY,gradXcoords,gradYcoords, IRIS_RADIUS) if isDark[cy][cx] else 0 for cx in range(minXForPupil,dxn.shape[1])] for cy in range(dxn.shape[0])]).astype('float32')
# display outputs for debugging
## centers = np.array([[phiTest(cx,cy,gradDX,gradDY,gradXcoords,gradYcoords) for cx in range(dxn.shape[1])] for cy in range(dxn.shape[0])])
## debugImg(centers)
maxInd = centers.argmax()
(pupilCy,pupilCx) = np.unravel_index(maxInd, centers.shape)
pupilCx += minXForPupil
pupilCy /= BLOWUP_FACTOR
pupilCx /= BLOWUP_FACTOR
if writeEyeDebugImages:
global eyeCounter
eyeCounter = (eyeCounter+1)%5 #write debug image every 5th frame
if eyeCounter == 1:
cv2.imwrite( "eyeGray.png", gray/gray.max()*255) #write probability images for our report
cv2.imwrite( "eyeIsDark.png", isDark*255)
cv2.imwrite( "eyeCenters.png", centers/centers.max()*255)
if getRawProbabilityImage:
return (pupilCy, pupilCx, centers)
else:
return (pupilCy, pupilCx)
lastCornerProb = np.ones([1,1])
#This was a failed attempt to find eye corners, not used in final version.
# Returns (cy,cx) of the eye corner, where y is down and x is right. You should pass in a grayscale Cv2 image which
# is closely cropped around the corner of the eye (using the Haar cascade eye detector)
def getEyeCorner(gray):
## (scleraY, scleraX) = np.unravel_index(gray.argmax(),gray.shape)
## scleraColor = colors[scleraY,scleraX,:]
## img[scleraX,scleraY] = (255,0,0)
## img.colorDistance(skinColor[:]).save(disp)
## img.edges().save(disp)
## print skinColor, scleraColor
if BLOWUP_FACTOR != 1:
gray = cv2.resize(gray, (0,0), fx=BLOWUP_FACTOR, fy=BLOWUP_FACTOR, interpolation=cv2.INTER_LINEAR)
gray = gray.astype('float32')
#debugImg(gray)
dxn = cv2.Sobel(gray,cv2.CV_32F,1,0,ksize=3) #optimization opportunity: blur the image once, then just subtract 2 pixels in x and 2 in y. Should be equivalent.
dyn = cv2.Sobel(gray,cv2.CV_32F,0,1,ksize=3)
magnitudeSquared = np.square(dxn)+np.square(dyn)
## debugImg(np.sqrt(magnitudeSquared))
# ########### Eye corner finding. TODO: limit gradients to search area
rangeOfXForCorner = int(dxn.shape[1]/2)
magThreshold = magnitudeSquared.mean()*.5 #only retain high-magnitude gradients. todo: adjust this threshold using more images. Maybe should train our tuned parameters.
# form a bool array, unrolled columnwise, which can index into the image.
# we will only use gradients whose magnitude is above the threshold, and
# (optionally) where the gradient direction meets characteristics such as being more horizontal than vertical.
gradsTouse = (magnitudeSquared>magThreshold) & (
|
np.abs(2*dyn)
|
numpy.abs
|
import matplotlib.pyplot as plt
import numpy as np
def plot_mean(mean, labels, start_index=1):
fig, ax = plt.subplots(figsize=(9,6))
x_dim = list(range(start_index,start_index+mean.shape[0]))
for i in range(mean.shape[1]):
ax.plot(x_dim, mean[:,i] , label=labels[i])
title = "Log marginal likelihood vs. rank in trigram data"
ax.set_title(title, size=15)
ax.set_ylabel("Log marginal likelihood",size=13)
ax.set_xlabel("Rank",size=12)
ax.legend()
fig.savefig(title.replace(" ", "_").lower()+".pdf", format="pdf")
def plot_variance(mean, variance):
fig, ax = plt.subplots(figsize=(9,6))
x_dim = list(range(1,102,5))
ax.plot(x_dim, 2*variance[:,0] - 2*mean[:,0] , label="SIS")
ax.plot(x_dim, 2*variance[:,1] - 2*mean[:,1] , label="ARS")
ax.plot(x_dim, 2*variance[:,2] - 2*mean[:,2], label="PARS")
ax.plot(x_dim, 2*variance[:,3] - 2*mean[:,3], label="VB")
ax.set_title("Relative variance vs. Rank")
ax.set_ylabel("Relative variance")
ax.set_xlabel("Rank")
ax.legend()
def plot_mean_with_variance_around(mean, variance):
fig, ax = plt.subplots(figsize=(9,6))
x_dim = list(range(1,102,5))
ax.plot(x_dim, mean[:,0] , label="SIS")
upper_0 = - 2 * variance[:,0] + 3*mean[:,0]
lower_0 = + 2*variance[:,0] - mean[:,0]
ax.fill_between(x_dim, upper_0, lower_0, alpha=0.15)
ax.plot(x_dim, mean[:,1] , label="APF")
upper_1 = - 2 * variance[:,1] + 3*mean[:,1]
lower_1 = + 2*variance[:,1] - mean[:,1]
ax.fill_between(x_dim, upper_1, lower_1, alpha=0.15)
ax.plot(x_dim, mean[:,2], label="Patient APF")
upper_2 = - 2 * variance[:,2] + 3*mean[:,2]
lower_2 = + 2*variance[:,2] - mean[:,2]
ax.fill_between(x_dim, upper_2, lower_2, alpha=0.15)
ax.plot(x_dim, mean[:,3], label="VB")
upper_3 = - 2 * variance[:,3] + 3*mean[:,3]
lower_3 = + 2*variance[:,3] - mean[:,3]
ax.fill_between(x_dim, upper_3, lower_3, alpha=0.15)
ax.set_title("Log marginal likelihood vs. Rank")
ax.set_ylabel("Log marginal likelihood")
ax.set_xlabel("Rank")
ax.legend()
def plot_ess(ess):
fig, ax = plt.subplots(figsize=(9,6))
x_dim = list(range(1,102,5))
ax.plot(x_dim, ess[:,0], label="SIS")
ax.plot(x_dim, ess[:,1], label="ARS")
ax.plot(x_dim, ess[:,2], label="PARS")
ax.set_title("ESS vs. Rank")
ax.set_ylabel("ESS")
ax.set_xlabel("Rank")
ax.legend()
desired_letter_order = 'aeioubcdfghjklmnpqrstvwxyz'
def convert_to_desired_order(S_IR, S_JR, S_KR):
ir = pd.DataFrame(S_IR, index=list(string.ascii_lowercase)).loc[list(desired_letter_order)]
jr = pd.DataFrame(S_JR, index=list(string.ascii_lowercase)).loc[list(desired_letter_order)]
kr = pd.DataFrame(S_KR, index=list(string.ascii_lowercase)).loc[list(desired_letter_order)]
return ir, jr, kr
def plot_rank(S, save=False, title=""):
S_R, S_IR, S_JR, S_KR = S
ir, jr, kr = convert_to_desired_order(S_IR, S_JR, S_KR)
r = S_IR.shape[1]
fig, axes = plt.subplots(1,3, figsize=(10,8));
axes[0].imshow(ir)
axes[1].imshow(jr)
axes[2].imshow(kr)
axes[0].set_yticks(range(26))
axes[0].set_yticklabels(desired_letter_order)
axes[1].set_yticks(range(26))
axes[1].set_yticklabels(desired_letter_order)
axes[2].set_yticks(range(26))
axes[2].set_yticklabels(desired_letter_order)
axes[0].set_xticks(range(r))
axes[0].set_xticklabels(range(1,r+1))
axes[1].set_xticks(range(r))
axes[1].set_xticklabels(range(1,r+1))
axes[2].set_xticks(range(r))
axes[2].set_xticklabels(range(1,r+1))
axes[0].set_title("$X_1$")
axes[1].set_title("$X_2$")
axes[2].set_title("$X_3$")
fig.tight_layout(pad=3, rect=[0.12, .0, .88, 1.])
if title != "":
fig.suptitle(title, size=14)
if save==True:
fig.savefig(title.replace(" ", "_").lower()+".pdf", format="pdf", bbox_inches='tight')
return S_R, S_IR, S_JR, S_KR
#test this
def cumsum_max(array, limit=0.95):
array = array / array.sum()
arg_indices_sorted = np.argsort(-array)
cumsums = np.cumsum(array[arg_indices_sorted])
for i in range(len(array)):
if cumsums[i] > limit:
return
|
np.sort(arg_indices_sorted[:i+1])
|
numpy.sort
|
""" module for the sample random forest model """
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
def data_preprocess():
""" function for data pre-processing"""
train_data = pd.read_csv("train.csv")
test_data = pd.read_csv("test.csv")
train_data.insert(
loc=0,
column="CabinNull",
value=np.array(train_data.isnull()["Cabin"], dtype=int)
)
train_data.insert(
loc=0,
column="AgeNull",
value=np.array(train_data.isnull()["Age"], dtype=int)
)
train_data.insert(
loc=0,
column="FamilyName",
value=train_data["Name"].str.split(pat=",", expand=True)[0]
)
train_data["Name"] = train_data["Name"].str.split(pat=",", expand=True)[1]
train_data.insert(
loc=0,
column="Title",
value=train_data["Name"].str.split(pat=".", expand=True)[0]
)
train_data["Name"] = train_data["Name"].str.split(pat=".", expand=True)[1]
family_size = train_data.groupby("FamilyName", as_index=False).agg({"PassengerId": np.size})
family_size = family_size.rename(columns={"PassengerId": "FamilySize"})
train_data = pd.merge(train_data, family_size, on='FamilyName')
t1 = np.zeros((train_data.shape[0],))
t2 = []
ticket_split = train_data["Ticket"].str.split(pat=" ")
for i in range(len(ticket_split)):
if ticket_split[i][0] == "LINE":
t1[i] = 1
t2.append(0)
elif len(ticket_split[i]) >= 2:
t1[i] = 1
t2.append(float(int(ticket_split[i][len(ticket_split[i]) - 1])))
else:
t2.append(float(int(ticket_split[i][0])))
train_data.insert(loc=0, column="ExtraTicketInfo", value=t1)
train_data.insert(loc=0, column="TicketNum", value=t2)
t1 = np.zeros((test_data.shape[0],))
t2 = []
ticket_split = test_data["Ticket"].str.split(pat=" ")
for i in range(len(ticket_split)):
if ticket_split[i][0] == "LINE":
t1[i] = 1
t2.append(0)
elif len(ticket_split[i]) >= 2:
t1[i] = 1
t2.append(float(int(ticket_split[i][len(ticket_split[i]) - 1])))
else:
t2.append(float(int(ticket_split[i][0])))
test_data.insert(loc=0, column="ExtraTicketInfo", value=t1)
test_data.insert(loc=0, column="TicketNum", value=t2)
ticket_bucket = []
for i in range(len(train_data["TicketNum"])):
if train_data["TicketNum"][i] < 310131.7:
ticket_bucket.append(2)
elif train_data["TicketNum"][i] >= 310131.7 and train_data["TicketNum"][i] <= 620263.4:
ticket_bucket.append(0)
else:
ticket_bucket.append(1)
train_data.insert(loc=0, column="TicketCat", value=ticket_bucket)
le = preprocessing.LabelEncoder()
le.fit(["female", "male"])
train_data["Sex"] = le.fit_transform(train_data["Sex"])
train_data.head(n=5)
title_map = dict({
" Capt": 0,
" Don": 1,
" Dona": 1,
" Jonkheer": 2,
" Rev": 3,
" Mr": 4,
" Dr": 5,
" Col": 6,
" Major": 7,
" Master": 8,
" Miss": 9,
" Mrs": 10,
" Mme": 11,
" Sir": 12,
" Ms": 13,
" Lady": 14,
" Mlle": 15,
" the Countess": 16
})
title_encode = []
for i in train_data["Title"]:
title_encode.append(title_map[i])
train_data["Title"] = title_encode
train_data.head(n=5)
train_data = pd.get_dummies(train_data, columns=["Embarked"], dummy_na=True)
train_data = train_data.fillna(-999)
train_age_full = train_data[train_data["Age"] >= 0][["PassengerId", "Age"]]
pridict_age_full = train_data[train_data["Age"] < 0][["PassengerId", "Age"]]
train_age = np.array(train_data[train_data["Age"] >= 0]["Age"])
train_sibsp = np.array(train_data[train_data["Age"] >= 0]["SibSp"])
train_sibsp = np.reshape(train_sibsp, (train_sibsp.shape[0], 1))
predict_sibsp = np.array(train_data[train_data["Age"] < 0]["SibSp"])
predict_sibsp = np.reshape(predict_sibsp, (predict_sibsp.shape[0], 1))
neigh = KNeighborsRegressor(n_neighbors=5)
neigh.fit(train_sibsp, train_age)
predict_age = neigh.predict(predict_sibsp)
pridict_age_full["Age"] = predict_age
new_age = pd.concat([train_age_full, pridict_age_full], axis=0)
new_age = new_age.rename(columns={"Age": "NewAge"})
train_data = pd.merge(train_data, new_age, on="PassengerId")
y_train = train_data["Survived"]
train_data = train_data.drop(["Age", "FamilyName", "PassengerId", "Survived", "Name", "Cabin", "Ticket"], axis=1)
y_train = np.array(y_train)
X_train = np.array(train_data)
y_train = np.reshape(y_train, (y_train.shape[0],))
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
return X_train, y_train
def model(n_estimator, criterion, max_features, max_depth):
""" train and get the score of the random forest model """
rf = RandomForestClassifier(
# integer 100
n_estimators=int(n_estimator),
# "gini" or "entropy"
criterion=criterion,
# float 0.4
max_features=float(max_features),
# integer 7
max_depth=int(max_depth),
# random_state=4,
)
# rf.fit(Xtrain, ytrain)
X_train, y_train = data_preprocess()
scores = cross_val_score(rf, X_train, y_train, cv=10)
return
|
np.mean(scores)
|
numpy.mean
|
from graphik.utils.utils import flatten
from urdfpy import URDF
from liegroups import SE3, SO3
import numpy as np
import trimesh
import pyrender
# from graphik.robots.revolute import Revolute3dChain, Revolute3dTree
from graphik.robots.robot_base import RobotRevolute
import graphik
from operator import itemgetter
class RobotURDF(object):
def __init__(self, fname):
self.fname = fname
self.urdf = URDF.load(fname)
self.urdf_ind_to_q, self.q_to_urdf_ind = self.joint_map()
self.n_q_joints = len(self.q_to_urdf_ind)
self.n_urdf_joints = len(self.urdf_ind_to_q)
self.ee_joints = None
self.T_zero = self.extract_T_zero_from_URDF(frame="joint")
self.scene = None
self.parents = None
# self.parents = self.get_parents()
def joint_map(self):
urdf_ind_to_q = {}
q_to_urdf_ind = {}
q_to_names = {}
q_ind = 1
label = "p{0}"
# for j, joint in enumerate(self.urdf.joints):
for joint in self.urdf.actuated_joints:
j = self.urdf.joints.index(joint)
urdf_ind_to_q[j] = label.format(q_ind)
q_to_urdf_ind[label.format(q_ind)] = j
# urdf_ind_to_q[j] = q_ind
# q_to_urdf_ind[q_ind] = j
q_ind += 1
return urdf_ind_to_q, q_to_urdf_ind
def find_first_joint(self):
"""
Finds the first joint who's parent link is 'world' link. ASSUMES URDF has a link named 'world'!
"""
world_link = self.find_link_by_name("world")
joint = self.find_actuated_joints_with_parent_link(world_link)
return joint[0]
def find_actuated_joints_with_parent_link(self, link):
parent_joints = []
for joint in self.urdf.joints:
if joint.parent == link.name:
if not (joint in self.urdf.actuated_joints):
joints = self.find_actuated_joints_with_parent_link(
self.find_link_by_name(joint.child)
)
parent_joints.extend(joints)
else:
parent_joints.append(joint)
return parent_joints
def find_joints_with_parent_link(self, link):
parent_joints = []
for joint in self.urdf.joints:
if joint.parent == link.name:
parent_joints.append(joint)
return parent_joints
def find_joints_actuated_child_joints(self, joint):
child_link = self.find_link_by_name(joint.child)
children_joints = self.find_actuated_joints_with_parent_link(child_link)
return children_joints
def find_joints_child_joints_from_list(self, joint, joints):
child_link = self.find_link_by_name(joint.child)
children_joints = []
for j in joints:
parent_link = self.find_link_by_name(j.parent)
if child_link == parent_link:
children_joints.append(j)
return children_joints
def get_parents(self, joints):
base_joint = self.find_first_joint()
if not (base_joint in joints):
raise ("Base joint not in joints")
label_base = "p{0}"
# parents = {'p0': [label_base.format(joints.index(base_joint))]}
parents = {}
for joint in joints:
children = self.find_joints_child_joints_from_list(joint, joints)
if children == []:
child_labels = []
else:
child_labels = [label_base.format(joints.index(cj)) for cj in children]
parent_label = label_base.format(joints.index(joint))
parents[parent_label] = child_labels
self.parents = parents
def actuated_joint_index(self, joint):
try:
return self.urdf.actuated_joints.index(joint)
except ValueError:
raise ("joint not an actuated joint")
def find_link_by_name(self, name):
for link in self.urdf.links:
if link.name == name:
return link
return None
def find_joint_by_name(self, name):
for joint in self.urdf.joints:
if joint.name == name:
return joint
return None
def extract_T_zero_from_URDF(self, q=None, frame="joint"):
"""
T is located at the joint's origin, the rotation such that
z_hat points along the joint axis.
"""
if q is not None:
urdf_q = self.map_to_urdf_ind(q)
cfg = urdf_q
else:
cfg = {}
fk = self.urdf.link_fk(cfg=cfg)
T = {}
for joint in self.urdf.actuated_joints:
# get child link frame
child_link = self.find_link_by_name(joint.child)
T_link = SE3.from_matrix(fk[child_link])
if frame == "joint":
# URDF frames are aligned with the links
# An additional rotation needs to be applied
# to align the Z axis with the joint frame
joint_axis = joint.axis
T_joint_axis = get_T_from_joint_axis(joint_axis)
T_joint = np.dot(T_link.as_matrix(), T_joint_axis.inv().as_matrix())
T[joint] = SE3.from_matrix(T_joint)
else:
T[joint] = T_link
ee_joints = self.find_end_effector_joints()
for ee_joint in ee_joints:
ee_link = self.find_link_by_name(ee_joint.child)
T[ee_joint] = SE3.from_matrix(fk[ee_link])
return T
def find_end_effector_joints(self):
"""
Finds end-effector joints. Assumes that the end effector frame has
a fixed joint.
Returns
-------
ee_joints : list
List of urdfpy joints that correspond to the End Effectors
"""
ee_joints = []
for joint in self.urdf.joints:
child_joints = self.find_joints_actuated_child_joints(joint)
if child_joints == []:
# child_link = self.find_link_by_name(joint.child)
# ee_joint = self.find_joints_with_parent_link(child_link)
# if ee_joint == []:
# # No fixed joint for ee
# raise("There is an end effector joint that isn't a fixed frame")
# ee_joints.extend(ee_joint)
ee_joints.append(joint)
self.ee_joints = ee_joints
return ee_joints
def map_to_urdf_ind(self, q):
"""
maps a dictionary so the keys (joint ind) in q map to the correct
joint indices in URDF representation
"""
q_keys = list(q.keys())
urdf_ind = itemgetter(*q_keys)(self.q_to_urdf_ind)
names = [self.urdf.joints[i].name for i in urdf_ind]
# urdf_q = dict(zip(urdf_ind, list(q.values())))
urdf_q = dict(zip(names, list(q.values())))
return urdf_q
def visualize(
self,
q=None,
with_frames=True,
with_balls=True,
with_robot=True,
transparency=None,
):
self.make_scene(
q=q,
with_frames=with_frames,
with_balls=with_balls,
with_robot=with_robot,
transparency=transparency,
)
v = pyrender.Viewer(self.scene, use_raymond_lighting=True)
def make_scene(
self,
q=None,
with_frames=True,
with_balls=True,
with_robot=True,
transparency=None,
):
if q is not None:
urdf_q = self.map_to_urdf_ind(q)
# cfg = list(urdf_q.values())
cfg = urdf_q
else:
cfg = {}
if with_robot:
robot_scene = self.urdf.show(
cfg=cfg, return_scene=True, transparency=transparency
)
if self.scene is None:
self.scene = robot_scene
else:
for node in robot_scene.get_nodes():
self.scene.add_node(node)
Ts_dict = self.extract_T_zero_from_URDF(q=q)
Ts = []
for T in Ts_dict:
T_zero = Ts_dict[T]
Ts.append(T_zero)
if with_frames:
path = graphik.robots.__path__[0] + "/urdfs/meshes/frame.dae"
self.scene = view_dae(path, Ts, scene=self.scene, return_scene_only=True)
if with_balls:
path = graphik.robots.__path__[0] + "/urdfs/meshes/redball.dae"
self.scene = view_dae(path, Ts, scene=self.scene, return_scene_only=True)
def joint_limits(self):
ub = {}
lb = {}
for joint in self.urdf.actuated_joints:
ubi = np.clip(joint.limit.upper, -np.pi, np.pi)
lbi = np.clip(joint.limit.lower, -np.pi, np.pi)
label = "p{0}"
joint_label = label.format(self.actuated_joint_index(joint))
ub[joint_label] = ubi
lb[joint_label] = lbi
return ub, lb
def get_graphik_labels(self, joints):
"""
Assigned joint labels according to the p{i}, where i is the joint
index. The first joint has label p0
Parameters
----------
joints : list
list of urdfpy joints
Returns
-------
labels : list
list of the labels
"""
n = len(joints)
label = "p{0}"
labels = [label.format(i) for i in range(n)]
return labels
def make_Revolute3d(self, ub, lb):
# if all the child lists have len 1, then chain, otherwise tree
params = {}
# assign parents
joints = list(self.T_zero.keys())
self.get_parents(joints)
params["parents"] = self.parents
# Assign Transforms
T_labels = self.get_graphik_labels(joints)
T_zero = dict(zip(T_labels, self.T_zero.values()))
T0 = T_zero["p0"]
for key, val in T_zero.items():
T_zero[key] = T0.inv().dot(val)
# T_zero['root'] = SE3.identity()
params["T_zero"] = T_zero
l = 0
for cl in self.parents.values():
l += len(cl)
if l == len(self.parents.keys()) - 1:
# number of children == number of joints
# ub, lb = self.joint_limits()
params["ub"] = ub
params["lb"] = lb
# return Revolute3dChain(params)
return RobotRevolute(params)
else:
# return Revolute3dTree(params)
return RobotRevolute(params)
def view_dae(dae: str, T_zero: list, scene=None, return_scene_only=False, colour=None):
if scene is None:
scene = pyrender.Scene()
frame_tm = trimesh.load(dae)
material = None
if colour is not None:
for tm in frame_tm.dump():
colors, texcoords, material = pyrender.Mesh._get_trimesh_props(tm)
if colour == "red":
material.baseColorFactor = np.array([1.0, 0.0, 0.0, 1.0])
elif colour == "green":
material.baseColorFactor = np.array([0.0, 1.0, 0.0, 1.0])
elif colour == "blue":
material.baseColorFactor = np.array([0.0, 0.0, 1.0, 1.0])
else:
raise ("colour not implemented")
# frame_tm = trimesh.load('graphik/robots/urdfs/meshes/frame.dae')
meshes = pyrender.Mesh.from_trimesh(frame_tm.dump(), material=material)
for T in T_zero:
scene.add(meshes, pose=T.as_matrix())
if return_scene_only:
return scene
else:
v = pyrender.Viewer(scene, use_raymond_lighting=True)
return scene
def skew(x):
x = flatten(x)
X = np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
return X
def plot_balls_from_points(
points: np.array, scene=None, return_scene_only=False, colour=None
):
"""
Plot red balls at each point in the nx3 array points
Parameters
----------
points : np.array
nx3 array of points to plot the balls
scene : pyrender.Scene
The scene to add the balls to. If scene=None, then a new scene will be
created
return_scene_only : bool
If True, will only return the scene and not plot the points. If False,
will plot the points and return the scene.
Returns
-------
scene
"""
dae = graphik.robots.__path__[0] + "/urdfs/meshes/redball.dae"
n, _ = points.shape
T = []
for i in range(n):
T_id = np.eye(4)
T_id[0:3, 3] = points[i, :]
T_zero = SE3.from_matrix(T_id)
T.append(T_zero)
scene = view_dae(
dae, T, scene=scene, return_scene_only=return_scene_only, colour=colour
)
return scene
def get_T_from_joint_axis(axis: str, switch=False):
"""
Take in the axis string from urdf "X X X" and return the rotation matrix
assoicated with that axis
"""
norm = np.linalg.norm
z_hat = np.array([0, 0, 1])
if switch:
sgn = -1.0
else:
sgn = 1.0
if all(np.isclose(axis, -z_hat)):
R = SO3.rotx(np.pi).as_matrix()
elif not all(np.isclose(axis, z_hat)):
rot_axis = np.cross(axis, z_hat)
# rot_axis = np.cross(z_hat, axis)
ang = -np.arcsin(norm(rot_axis) / (norm(axis) * norm(z_hat)))
rot_axis = normalize(rot_axis)
rot_axis = rot_axis.reshape(3, 1)
R = (
np.eye(3) * np.cos(ang)
+ (1 - np.cos(ang)) * np.dot(rot_axis, rot_axis.transpose())
- np.sin(ang) * skew(rot_axis)
)
else:
R = np.eye(3)
T = np.eye(4)
T[0:3, 0:3] = R
T[0:3, 3] = np.zeros(3)
T = SE3.from_matrix(T)
return T
def normalize(x):
return x /
|
np.linalg.norm(x)
|
numpy.linalg.norm
|
"""
handles sampling data from ondisk file at runtime
"""
from typing import List, Dict, Any, Optional
from collections import defaultdict
from os.path import expanduser as expand
import time
import pickle
import gzip
import itertools
import numpy as np
import torch
from ulfs.params import Params
from texrel.texturizer import Texturizer
class TexRelDataset(object):
def __init__(
self,
ds_filepath_templ: str,
ds_refs: List[str],
ds_seed: int,
ds_texture_size: int,
ds_background_noise: float,
ds_mean: float,
ds_mean_std: float,
ds_val_refs: Optional[List[str]] = None,
):
"""
ds_filepath_templ: str
filepath for each data file, templatized with `{ds_ref}`, e.g.
"~/data/texrel/{ds_ref}.dat"
ds_refs: List[str]
List of ds_refs of the datafiles we want to load
ds_seed: int
seed used for initializing the textures and colors
ds_texture_size: int
how big to make each texture, each texture will have
its width and height set to ds_texture_size
ds_background_noise: float
how much gaussian noise to add to the background colors of each
generated image. this noise is applied per-pixel
ds_mean: float
the mean brightness of the background, [0-1]
ds_mean_std: float
standard deviation of noise. this is applied over entire images
(cf ds_background_noise, which is per-pixel)
ds_val_refs: Optional[List[str]] = None
List of ds_refs to use for validation and test sets. If None,
then the ones in `ds_refs` will be used for validation and test
"""
print('ds-refs', ds_refs)
print('ds-val-refs', ds_val_refs)
texture_size = ds_texture_size
background_noise = ds_background_noise
self.ds_mean_std = ds_mean_std
self.background_noise = background_noise
self.metas: List[Any] = []
dsref_name2id: Dict[str, int] = {}
self.datas_by_dsref = {} # eg {'ds63': {'train': {'N': ..., 'input_shapes': ..., ...}}}
self.datas: Dict[str, Dict[str, Any]] = {} # {'train': {'N': ..., 'input_shapes': ..., ...}}
if ds_val_refs is None:
ds_val_refs = list(ds_refs)
all_ds_refs = list(set(ds_refs) | set(ds_val_refs))
for ds_ref in all_ds_refs:
"""
ds_ref, eg dsref64
"""
dsref_name2id[ds_ref] = len(dsref_name2id)
print(f'loading {ds_ref} ...', end='', flush=True)
start_time = time.time()
filepath = ds_filepath_templ.format(ds_ref=ds_ref)
with gzip.open(expand(filepath), 'rb') as f:
d = pickle.load(f) # type: ignore
_meta = d['meta']
load_time = time.time() - start_time
print(f' done in {load_time:.1f}s')
version = _meta.get('version', 'v1')
print(' ', 'data format version', version)
self.metas.append(Params(d['meta']))
self.datas_by_dsref[ds_ref] = d['data']
# split_name is eg 'train', 'holdout'
for split_name, data in d['data'].items():
_ds_refs = ds_refs if split_name == 'train' else ds_val_refs
if ds_ref not in _ds_refs:
continue
print(' ', split_name, ds_ref, end='', flush=True)
_N = data['inner_train_labels'].shape[1]
print(' N', _N)
if split_name not in self.datas:
self.datas[split_name] = defaultdict(list)
for k2, v in data.items():
"""
k2, eg 'N', 'input_labels', 'input_shapes', ...
"""
self.datas[split_name][k2].append(v)
dsrefs_t = torch.full((_N, ), fill_value=dsref_name2id[ds_ref], dtype=torch.int64)
self.datas[split_name]['dsrefs_t'].append(dsrefs_t)
datas_new: Dict[str, Dict[str, Any]] = {}
for split_name, data in self.datas.items():
datas_new[split_name] = {}
d = datas_new[split_name]
d['N'] =
|
np.sum(data['N'])
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
from random import randint, random
from numpy.core.fromnumeric import diagonal
'''
Load data from file
'''
def load_data(filename):
file = open(filename, 'r')
tmp_str = file.readline()
tmp_arr = tmp_str[:-1].split(' ')
N = int(tmp_arr[0])
n_row = int(tmp_arr[1])
n_col = int(tmp_arr[2])
print('N=%d, row=%d, col=%d' %(N,n_row,n_col))
data = np.zeros([N, n_row * n_col + 1])
for n in range(N):
tmp_str = file.readline()
tmp_arr = tmp_str[:-1].split(' ')
for i in range(n_row * n_col + 1):
data[n][i] = int(tmp_arr[i])
file.close()
return N, n_row, n_col, data
'''
Sigmoid function
'''
def sigmoid(s):
large=30
if s<-large: s=-large
if s>large: s=large
return (1 / (1 + np.exp(-s)))
'''
Cost/Loss funtion
'''
def cost(X, Y, N, W, b, v, c):
epsi = 1.e-12
sum = 0
for n in range(N):
prediction = predict(X[n], J, W, b, v, c)
# verify if the value of the prediction is in [1e-12, 1-1e-12]
if prediction < epsi:
prediction = epsi
if prediction > 1 - epsi:
prediction = 1 - epsi
sum += Y[n] * np.log(prediction) + (1 - Y[n]) * np.log(1 - prediction)
E = - sum / N
return E
'''
Calculate h
'''
def get_h(x, W, b):
# x, w, b -> b + w * x -> s
s = b +
|
np.dot(W, x)
|
numpy.dot
|
import testing as tm
import pytest
import numpy as np
import xgboost as xgb
import json
from pathlib import Path
dpath = Path('demo/data')
def test_aft_survival_toy_data():
# See demo/aft_survival/aft_survival_viz_demo.py
X =
|
np.array([1, 2, 3, 4, 5])
|
numpy.array
|
#
# Created by: <NAME>, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas =
|
np.array([4., 3., 2., 0])
|
numpy.array
|
# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import os
import numpy as np
import numpy.testing as nt
from .mpi import MPITestCase
from ..tod import sim_focalplane as sfp
def generate_hex(npix, width, poltype, fwhm):
if poltype == "qu":
pol_a = sfp.hex_pol_angles_qu(npix)
pol_b = sfp.hex_pol_angles_qu(npix, offset=90.0)
elif poltype == "radial":
pol_a = sfp.hex_pol_angles_radial(npix)
pol_b = sfp.hex_pol_angles_radial(npix, offset=90.0)
dets_a = sfp.hex_layout(npix, width, "", "A", pol_a)
dets_b = sfp.hex_layout(npix, width, "", "B", pol_b)
dets = dict()
dets.update(dets_a)
dets.update(dets_b)
# Pol color different for A/B detectors
detpolcolor = dict()
detpolcolor.update({x: "red" for x in dets_a.keys()})
detpolcolor.update({x: "blue" for x in dets_b.keys()})
# set the label to just the detector name
detlabels = {x: x for x in dets.keys()}
# fwhm and face color the same
detfwhm = {x: fwhm for x in dets.keys()}
# cycle through some colors just for fun
pclr = [
(1.0, 0.0, 0.0, 0.1),
(1.0, 0.5, 0.0, 0.1),
(0.25, 0.5, 1.0, 0.1),
(0.0, 0.75, 0.0, 0.1),
]
detcolor = {y: pclr[(x // 2) % 4] for x, y in enumerate(sorted(dets.keys()))}
# split out quaternions for plotting
detquats = {x: dets[x]["quat"] for x in dets.keys()}
return dets, detquats, detfwhm, detcolor, detpolcolor, detlabels
def generate_rhombus(npix, width, fwhm, prefix, center):
pol_a = sfp.rhomb_pol_angles_qu(npix)
pol_b = sfp.rhomb_pol_angles_qu(npix, offset=90.0)
dets_a = sfp.rhombus_layout(npix, width, prefix, "A", pol_a, center=center)
dets_b = sfp.rhombus_layout(npix, width, prefix, "B", pol_b, center=center)
dets = dict()
dets.update(dets_a)
dets.update(dets_b)
# Pol color different for A/B detectors
detpolcolor = dict()
detpolcolor.update({x: "red" for x in dets_a.keys()})
detpolcolor.update({x: "blue" for x in dets_b.keys()})
# set the label to just the detector name
detlabels = {x: x for x in dets.keys()}
# fwhm and face color the same
detfwhm = {x: fwhm for x in dets.keys()}
# cycle through some colors just for fun
pclr = [
(1.0, 0.0, 0.0, 0.1),
(1.0, 0.5, 0.0, 0.1),
(0.25, 0.5, 1.0, 0.1),
(0.0, 0.75, 0.0, 0.1),
]
detcolor = {y: pclr[(x // 2) % 4] for x, y in enumerate(sorted(dets.keys()))}
# split out quaternions for plotting
detquats = {x: dets[x]["quat"] for x in dets.keys()}
return dets, detquats, detfwhm, detcolor, detpolcolor, detlabels
class SimFocalplaneTest(MPITestCase):
def setUp(self):
self.outdir = "toast_test_output"
self.rank = 0
if self.comm is not None:
self.rank = self.comm.rank
if self.rank == 0:
if not os.path.isdir(self.outdir):
os.mkdir(self.outdir)
def test_cart_quat(self):
xincr = np.linspace(-5.0, 5.0, num=10, endpoint=True)
yincr = np.linspace(-5.0, 5.0, num=10, endpoint=True)
offsets = list()
for x in xincr:
for y in yincr:
ang = 3.6 * (x - xincr[0]) * (y - yincr[0])
offsets.append([x, y, ang])
quats = sfp.cartesian_to_quat(offsets)
detquats = {"{}".format(x): y for x, y in enumerate(quats)}
fwhm = {x: 30.0 for x in detquats.keys()}
outfile = os.path.join(self.outdir, "out_test_cart2quat.png")
if self.rank == 0:
sfp.plot_focalplane(detquats, 12.0, 12.0, outfile, fwhm=fwhm)
return
def test_hex_nring(self):
result = {
1: 1,
7: 2,
19: 3,
37: 4,
61: 5,
91: 6,
127: 7,
169: 8,
217: 9,
271: 10,
331: 11,
397: 12,
}
for npix, check in result.items():
test = sfp.hex_nring(npix)
nt.assert_equal(test, check)
return
def test_vis_hex_small(self):
dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = generate_hex(
7, 5.0, "qu", 15.0
)
outfile = os.path.join(self.outdir, "out_test_vis_hex_small.png")
if self.rank == 0:
sfp.plot_focalplane(
detquats,
6.0,
6.0,
outfile,
fwhm=detfwhm,
facecolor=detcolor,
polcolor=detpolcolor,
labels=detlabels,
)
return
def test_vis_hex_small_rad(self):
dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = generate_hex(
7, 5.0, "radial", 15.0
)
outfile = os.path.join(self.outdir, "out_test_vis_hex_small_rad.png")
if self.rank == 0:
sfp.plot_focalplane(
detquats,
6.0,
6.0,
outfile,
fwhm=detfwhm,
facecolor=detcolor,
polcolor=detpolcolor,
labels=detlabels,
)
return
def test_vis_hex_medium(self):
dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = generate_hex(
91, 5.0, "qu", 10.0
)
outfile = os.path.join(self.outdir, "out_test_vis_hex_medium.png")
if self.rank == 0:
sfp.plot_focalplane(
detquats,
6.0,
6.0,
outfile,
fwhm=detfwhm,
facecolor=detcolor,
polcolor=detpolcolor,
labels=detlabels,
)
return
def test_vis_hex_large(self):
dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = generate_hex(
217, 5.0, "qu", 5.0
)
outfile = os.path.join(self.outdir, "out_test_vis_hex_large.png")
if self.rank == 0:
sfp.plot_focalplane(
detquats,
6.0,
6.0,
outfile,
fwhm=detfwhm,
facecolor=detcolor,
polcolor=detpolcolor,
labels=detlabels,
)
return
def test_vis_rhombus(self):
sixty = np.pi / 3.0
thirty = np.pi / 6.0
rtthree = np.sqrt(3.0)
rdim = 8
rpix = rdim ** 2
hexwidth = 5.0
rwidth = hexwidth / rtthree
# angular separation of rhombi
margin = 0.60 * hexwidth
centers = [
|
np.array([0.5 * margin, 0.0, 0.0])
|
numpy.array
|
import unittest
from delo import DistributionUtilities
import numpy as np
import inspect
import unittest
def getVerbosity():
"""Return the verbosity setting of the currently running unittest
program, or 0 if none is running.
when `python -m unittest` is called, this function should return 1. when `python -m unittest -v`, then 2
"""
frame = inspect.currentframe()
while frame:
self = frame.f_locals.get('self')
if isinstance(self, unittest.TestProgram):
return self.verbosity
frame = frame.f_back
return 0
class TestChoppedNormal(unittest.TestCase):
def setUp(self):
rng_for_seed = np.random.default_rng() # should work for any seed
seed = rng_for_seed.integers(1, 100000)
if getVerbosity() > 1:
print(f"Test ChoppedNormal on seed = {seed}")
self.seed = seed
self.size = 1000
def test_in_bounds(self):
rng = np.random.default_rng(self.seed)
drawned = DistributionUtilities.chopped_normal(rng, self.size, np.ones(self.size))
self.assertTrue(np.all(drawned <= 1))
self.assertTrue(np.all(drawned >= 0))
drawned = DistributionUtilities.chopped_normal(rng, self.size, np.zeros(self.size))
self.assertTrue(np.all(drawned <= 1))
self.assertTrue(np.all(drawned >= 0))
def test_reproducibility(self):
rng1 = np.random.default_rng(self.seed)
rng2 = np.random.default_rng(self.seed)
drawned1 = DistributionUtilities.chopped_normal(rng1, self.size)
drawned2 = DistributionUtilities.chopped_normal(rng2, self.size)
self.assertTrue(np.all(drawned1 == drawned2))
def test_zero_variation(self):
rng = np.random.default_rng(self.seed)
np.testing.assert_array_equal(DistributionUtilities.chopped_normal(rng, self.size, variation=0),
0.5 * np.ones(self.size))
class TestChoppedCauchy(unittest.TestCase):
def setUp(self):
rng_for_seed = np.random.default_rng() # should work for any seed
seed = rng_for_seed.integers(1, 100000)
if getVerbosity() > 1:
print(f"Test ChoppedCauchy on seed = {seed}")
self.seed = seed
self.size = 1000
def test_in_bounds(self):
rng = np.random.default_rng(self.seed)
drawned = DistributionUtilities.chopped_cauchy(rng, self.size, np.ones(self.size))
self.assertTrue(
|
np.all(drawned <= 1)
|
numpy.all
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import unittest
import os
import numpy as np
from auspex.analysis import fits, qubit_fits, resonator_fits
import matplotlib.pyplot as plt
class FitAssertion(object):
def assertFitInterval(self, p0, name, fit, tol=5):
low = fit.fit_params[name] - tol*fit.fit_errors[name]
high = fit.fit_params[name] + tol*fit.fit_errors[name]
test = (low < p0 < high)
if not test:
raise AssertionError(f"Fit parameter {name}: {p0} is outside of interval ({low}, {high}).")
class TestResonatorFit(unittest.TestCase, FitAssertion):
def test_CircleFit(self):
#[tau, a, alpha, fr, phi0, Ql, Qc, Qi]
Qi = 6.23e5
Qc = 2e5
Ql = 1/(1/Qi + np.real(1/Qc))
f0 = 6.86
kappa = f0/Ql
p0 = [(1/f0)*0.9734, 0.8, np.pi*0.09, f0, np.pi*0.123, Ql, Qc]
x = np.linspace(f0 - 8*kappa, f0+7*kappa, 1601)
y = resonator_fits.ResonatorCircleFit._model(x, *p0)
noise = 1.0 + np.random.randn(y.size) * np.median(y)/20
y *= noise
fit = resonator_fits.ResonatorCircleFit(x, y, make_plots=False)
#print(fit.fit_params)
#print(fit.fit_errors)
try:
self.assertFitInterval(f0, "fr", fit, tol=10)
self.assertFitInterval(Qi, "Qi", fit, tol=2)
self.assertFitInterval(Ql, "Ql", fit, tol=2)
except AssertionError as e:
print("Resonator fit tests failed. Perhaps re-run?")
print(str(e))
except:
pass
class TestFitMethods(unittest.TestCase, FitAssertion):
def test_LorentzFit(self):
p0 = [3, 0.25, 0.4, 1.0]
x = np.linspace(-1, 1, 201)
y = fits.LorentzFit._model(x, *p0)
noise = np.random.randn(y.size) * np.max(y)/10
y += noise
fit = fits.LorentzFit(x, y, make_plots=False)
self.assertFitInterval(p0[0], "A", fit)
self.assertFitInterval(p0[1], "b", fit)
self.assertFitInterval(p0[2], "c", fit)
def test_GaussianFit(self):
p0 = [0.23, 3.1, 0.54, 0.89]
x = np.linspace(-4, 4, 201)
y = fits.GaussianFit._model(x, *p0)
noise =
|
np.random.randn(y.size)
|
numpy.random.randn
|
"""Tests the methods in ddnn.py."""
# pylint: disable=import-error
import numpy as np
import torch
from pysyrenn import ReluLayer, FullyConnectedLayer, ArgMaxLayer
from pysyrenn import HardTanhLayer, MaxPoolLayer, StridedWindowData
try:
from external.bazel_python.pytest_helper import main
IN_BAZEL = True
except ImportError:
IN_BAZEL = False
from prdnn.ddnn import DDNN
def test_compute():
"""Tests that it works for a simple example."""
activation_layers = [
FullyConnectedLayer(
|
np.eye(2)
|
numpy.eye
|
"""
Finite-difference solver for Laplace equation:
u_xx + u_yy = 0.
Boundary conditions:
u(x, y) = bound(x, y).
"""
import numpy as np
from scipy import linalg
from pdepy import steady, utils
@utils.validate_method(valid_methods=["ic"])
def solve(axis, conds, method="ic"):
"""
Methods
-------
* ic: implicit central
Parameters
----------
axis : array_like
Axis 'x' and 'y'; [x, y], each element should be an array_like.
conds : array_like
Boundary conditions; [bound_x0, bound_xf, bound_y0, bound_yf],
each element should be a scalar or an array_like of size 'x.size'
for 'bound_y' and 'y.size' for 'bound_x'.
method : string | optional
Finite-difference method.
Returns
-------
u : ndarray
A 2-D ndarray; u[x, y].
"""
u = steady.set_u(*axis, *conds)
consts = _cal_constants(*axis)
_implicit(u, *axis, *consts)
return u
def _implicit(u, x, y, 𝛂, β):
"""Métodos de diferenças finitas implícitos."""
xn, yn = x.size, y.size
mat = _set_mat(𝛂, β, xn - 1, yn - 1)
vec = _set_vec(𝛂, β, u)
x = linalg.solve(mat, vec)
u[1:-1, 1:-1] = np.reshape(x, (xn - 2, yn - 2), "F")
def _set_mat(𝛂, β, xn, yn):
"""Monta a matriz do sistema em '_implicit()'."""
n = (xn - 1) * (yn - 1)
main = np.full(n, -2 * (𝛂 + β))
sub1 = np.full(n - 1, β)
sub2 = np.full(n - xn + 1, 𝛂)
sub1[xn - 2 : -1 : xn - 1] = 0
return (
np.diag(main)
+ np.diag(sub1, 1)
+
|
np.diag(sub1, -1)
|
numpy.diag
|
import numpy as np
from scipy.stats import norm
from scipy.sparse import diags
from numba import njit, prange
###################
# 1. construction #
###################
def find_lin_prob(xi,x):
""" takes in xi, and finds two points in either side of xi and
return the indices of them, y, and associated probabilites p """
# a. pre-allocate output containers
p = np.zeros(2)
y = np.zeros(2)
n = x.shape
# b. calculate indices and probabilities
if n == 1:
p[0] = 1
p[1] = 0
y[0] = 0
y[1] = 0
LocL = np.argmax(x[xi>x])
if xi <= x[0]:
y[0] = 1
y[1] = 2
p[0] = 1
p[1] = 0
elif LocL >= n:
LocL = n-2
y[0] = n-2
y[1] = n-1
p[0] = 0
p[1] = 1
elif x[LocL+1] == x[LocL]:
y[0] = LocL
y[1] = LocL+1
p[0] = 0.5
p[1] = 0.5
else:
y[0] = LocL
y[1] = LocL+1
p[1] = (xi - x[LocL])/np.real(x[LocL+1]-x[LocL])
p[0] = 1-p[1]
return y,p
def symmetric_power_grid(n,k,width,center):
""" gives a grid spaced between center-width and center+width based on the interval [-1,1]
with a function x^(1/k) on either side k = 1 is linear, k = 0 is L-shaped """
# a. pre-allocate solution vectors
x = np.linspace(-1,1,n)
z = np.zeros(n)
# b. generate grid
if n < 2:
print('n must be at least 2 to make grids')
return
if n == 2:
y = [center-width,center+width]
return
for i in range(n):
if x[i] > 0:
z[i] = x[i]**(1.0/k)
elif x[i] == 0:
z[i] = 0.0
elif x[i] < 0:
z[i] = -((-x[i])**(1.0/k))
y = center + width*z
return y
def income_grid(Nz,k,z_width):
zgrid = symmetric_power_grid(Nz,k,z_width/2,0)
dzgrid = np.diff(zgrid)
return zgrid, dzgrid
def jump_matrix(Nz,zgrid,dzgrid,lambda_z,rho,sigma):
# a. pre-allocate solution containers
jump = np.zeros((Nz,Nz))
eye = np.identity(Nz)
# b. insert into jump matrix
for i in range(Nz):
for j in range(Nz):
if j==0:
jump[i,j] = norm.cdf(zgrid[j]+0.5*dzgrid[j],rho*zgrid[i],sigma)
elif j > 0 and j < Nz-1:
jump[i,j] = norm.cdf(zgrid[j]+0.5*dzgrid[j],rho*zgrid[i],sigma) - norm.cdf(zgrid[j]-0.5*dzgrid[j-1],rho*zgrid[i],sigma)
elif j == Nz-1:
jump[i,j] = 1.0 - norm.cdf(zgrid[j]-0.5*dzgrid[j-1],rho*zgrid[i],sigma)
jump[i,:] = jump[i,:]/np.sum(jump[i,:])
jump = lambda_z*(jump-eye)
return jump
def drift_matrix(Nz,zgrid,beta,delta,DriftPointsVersion=2):
# a. pre-allocate container for solution
drift = np.zeros((Nz,Nz))
# b. insert into drift matrix
for i in range(Nz):
if zgrid[i] != 0:
ii,_ = find_lin_prob((1.0-beta*delta)*zgrid[i],zgrid)
drift[i,i] = -1
if zgrid[i] < 0:
drift[i,i] = drift[i,i] + (zgrid[int(ii[1])] - (1.0-beta)*zgrid[i])/(zgrid[int(ii[1])]-zgrid[i])
drift[i,int(ii[1])] = (-zgrid[i] + (1.0-beta)*zgrid[i])/(zgrid[int(ii[1])]-zgrid[i])
elif zgrid[i] > 0.0:
drift[i,int(ii[0])] = (zgrid[i] - (1.0-beta)*zgrid[i])/(zgrid[i]-zgrid[int(ii[0])])
drift[i,i] = drift[i,i] + (-zgrid[int(ii[0])] + (1.0-beta)*zgrid[i])/(zgrid[i]-zgrid[int(ii[0])])
return drift
def ergodic_dist(Nz,z_markov,delta=1):
""" find the ergodic distribution of a single income process component """
# a. allocate containers
zdist = np.zeros(Nz)
zdist[int((Nz+1)/2)-1] = 1
eye = np.eye(Nz)
# b. prepare solution matrices
mat = eye - delta*z_markov
matinv = np.linalg.inv(mat)
# c. find ergodic distribution by iteration
it = 1
lerr = 1
while lerr > 1e-14 and it < 1000:
zdist_update = np.matmul(zdist,matinv)
zdist_update[abs(zdist_update)<1.0e-20_8] = 0.0_8
zdist_update = zdist_update/np.sum(zdist_update)
lerr = np.amax(abs(zdist_update-zdist))
zdist = zdist_update
it = it + 1
return zdist
def combined_process(Nz1,Nz2,z1grid,z2grid,z1dist,z2dist,z1_markov,z2_markov,dt,beta=100):
# a. allocate containers for solution
eye1 = np.identity(Nz1)
eye2 = np.identity(Nz2)
eye = np.identity(Nz1*Nz2)
lzgrid_combined = np.zeros(Nz1*Nz2)
lztrans_dt_combined = np.zeros((Nz1*Nz2,Nz1*Nz2))
lzmarkov_combined = np.zeros((Nz1*Nz2,Nz1*Nz2))
lzdist_combined = np.zeros(Nz1*Nz2)
zgrid_combined = np.zeros(Nz1*Nz2)
ztrans_dt_combined = np.zeros((Nz1*Nz2,Nz1*Nz2))
zmarkov_combined = np.zeros((Nz1*Nz2,Nz1*Nz2))
zdist_combined = np.zeros(Nz1*Nz2)
# b. combined process function
z1trans = dt*z1_markov + eye1
z2trans = dt*z2_markov + eye2
# c. transition matrix corresponding to dt (e.g. dt = 0.25 yields a quarterly matrix)
# i. pre-allocate solution
z1trans_dt = z1trans.copy()
z2trans_dt = z2trans.copy()
# ii. normalize trans_dt according to dt
for i in range(0,int(np.floor(1.0/dt))-1):
z1trans_dt = np.matmul(z1trans_dt,z1trans)
z2trans_dt = np.matmul(z2trans_dt,z2trans)
# d. get combined grid and transition matrix
i = -1
for i1 in range(Nz1):
for i2 in range(Nz2):
i = i + 1
lzgrid_combined[i] = z1grid[i1] + z2grid[i2]
lzdist_combined[i] = z1dist[i1]*z2dist[i2]
j = -1
for j1 in range(Nz1):
for j2 in range(Nz2):
j = j + 1
lztrans_dt_combined[i,j] = z1trans_dt[i1,j1]*z2trans_dt[i2,j2]
if i1==j1 and i2==j2: lzmarkov_combined[i,j] = z1_markov[i1,j1] + z2_markov[i2,j2]
if i1==j1 and i2!=j2: lzmarkov_combined[i,j] = z2_markov[i2,j2]
if i1!=j1 and i2==j2: lzmarkov_combined[i,j] = z1_markov[i1,j1]
if i1!=j1 and i2!=j2: lzmarkov_combined[i,j] = 0
# e. sort into ascending order
# i. generate sorted grid indices
iorder = np.argsort(lzgrid_combined)
# ii. sort combined grid, ytrans_dt_combined and combined markov matrix
for i in range(Nz1*Nz2):
zgrid_combined[i] = lzgrid_combined[iorder[i]]
zdist_combined[i] = lzdist_combined[iorder[i]]
for j in range(Nz1*Nz2):
ztrans_dt_combined[i,j] = lztrans_dt_combined[iorder[i],iorder[j]]
zmarkov_combined[i,j] = lzmarkov_combined[iorder[i],iorder[j]]
# f. fix up rounding in markov matrix
zmarkov_combined = zmarkov_combined - np.diag(np.sum(zmarkov_combined,axis=1))
# g. find ergodic distribution by iteration
# i. prepare solution matrices
z1dist_ = z1dist.copy()
z1dist_[0] = 0.1
ii = ((Nz1+1)/2-1)*Nz2 + (Nz2+1)/2
zdist_combined[int(ii)] = 0.9
mat = eye - beta*zmarkov_combined
matinv = np.linalg.inv(mat)
# ii. compute ergodic distribution
it = 1
err = 1
while err>1e-15 and it<1000:
zdist_combined_update = np.matmul(zdist_combined,matinv)
zdist_combined_update[abs(zdist_combined_update)<1.0e-20_8] = 0.0_8
zdist_combined_update = zdist_combined_update/np.sum(zdist_combined_update)
err = np.amax(abs(zdist_combined_update-zdist_combined))
zdist_combined = zdist_combined_update
it = it + 1
# iii. fix up rounding in ergodic distribution
zdist_combined = zdist_combined/np.sum(zdist_combined)
return zgrid_combined, ztrans_dt_combined, zmarkov_combined, zdist_combined
def construct_jump_drift(par):
# a. income process component grids
par.grid_z1, dz1grid = income_grid(par.Nz1,par.kz_1,par.z1_width)
par.grid_z2, dz2grid = income_grid(par.Nz2,par.kz_2,par.z2_width)
# b. jump matrices
z1_jump = jump_matrix(par.Nz1,par.grid_z1,dz1grid,par.lambda1,par.rho1,par.sigma1)
z2_jump = jump_matrix(par.Nz2,par.grid_z2,dz2grid,par.lambda2,par.rho2,par.sigma2)
# c. drift (decay) matrices
z1_drift = drift_matrix(par.Nz1,par.grid_z1,par.beta1,par.DeltaIncome)
z2_drift = drift_matrix(par.Nz2,par.grid_z2,par.beta2,par.DeltaIncome)
# d. add jumps and drift
par.z1_markov = z1_jump + z1_drift
par.z2_markov = z2_jump + z2_drift
# e. ergodic distributions
par.z1dist = ergodic_dist(par.Nz1,par.z1_markov)
par.z2dist = ergodic_dist(par.Nz2,par.z2_markov)
# f. combined process
zgrid_combined, ztrans_dt_combined, zmarkov_combined, zdist_combined = combined_process(
par.Nz1,par.Nz2,
par.grid_z1,par.grid_z2,
par.z1dist,par.z2dist,
par.z1_markov,par.z2_markov,
par.dt)
return zgrid_combined, ztrans_dt_combined, zmarkov_combined, zdist_combined
def split_markov_matrix(par,zmarkov_combined):
# a. get center diagonal
switch_diag = np.diag(zmarkov_combined,0)
# b. get off-diagonals
switch_off_ = zmarkov_combined.copy()
np.fill_diagonal(switch_off_, 0)
# c. create off diagonal matrix
# i. preallocate diagonal and offset lists
diagonals = []
offsets = []
# ii. get upper diagonals
it = par.Nab
for iz in range(1,par.Nz):
diagonals.append(np.repeat(np.diag(zmarkov_combined,iz),par.Nab))
offsets.append(it)
it = it + par.Nab
# iii. get lower diagonals
it = par.Nab
for iz in range(1,par.Nz):
diagonals.append(np.repeat(np.diag(zmarkov_combined,-iz),par.Nab))
offsets.append(-it)
it = it + par.Nab
# iv. generate sparse matrix for off diagonals
switch_off = diags(diagonals = diagonals,
offsets = offsets,
shape = (par.Nzab,par.Nzab),format='csc')
return switch_diag, switch_off, switch_off_
def stretch_markov_matrix(Nab,Nzab,Nz,zmarkov_combined):
# a. preallocate diagonal lists
diagonals = []
offsets = []
# b. get diagonals and offsets
# i. center diagonal
diagonals.append(np.repeat(np.diag(zmarkov_combined,0),Nab))
offsets.append(0)
# ii. upper diagonals
it = Nab
for iz in range(1,Nz): # parallel
diagonals.append(np.repeat(np.diag(zmarkov_combined,iz),Nab))
offsets.append(it)
it = it + Nab
# iii. lower diagonals
it = Nab
for iz in range(1,Nz): # parallel
diagonals.append(np.repeat(np.diag(zmarkov_combined,-iz),Nab))
offsets.append(-it)
it = it + Nab
# c. generate sparse switch matrix
switch = diags(diagonals = diagonals,
offsets = offsets,
shape = (Nzab,Nzab),format='csc')
return switch
#################
# 2. simulation #
#################
@njit
def choice(p,r):
i = 0
while r > p[i]:
i = i + 1
return i
@njit(parallel=True,fastmath=True)
def sim_est(par,nsim,seed=2019):
np.random.seed(seed) # set seed
# a. define simulation parameters and pre-allocate solution containers
Tburn = np.floor(100.0/par.dt)+1
Tsim = int(Tburn + np.floor(24.0/par.dt)+1) # 24 quarters
Tann = int((Tsim-Tburn)*par.dt/4)
z1rand = np.random.normal(0,1,size=(nsim,Tsim))
z2rand = np.random.normal(0,1,size=(nsim,Tsim))
z1jumpI = np.random.poisson(par.lambda1*par.dt,size=(nsim,Tsim))
z2jumpI = np.random.poisson(par.lambda2*par.dt,size=(nsim,Tsim))
z1sim = np.zeros((nsim,Tsim))
z2sim = np.zeros((nsim,Tsim))
zsim = np.zeros((nsim,Tsim))
zannsim = np.zeros((nsim,Tann))
zlevsim = np.zeros((nsim,Tsim))
zannlevsim = np.zeros((nsim,Tann))
# b. get variance of each process for initial distribution
if par.rho1 != 1.0:
if par.beta1 == 0.0: lssvar1 = (par.sigma1**2) / (1.0 - par.rho1**2)
if par.beta1 != 0.0: lssvar1 = par.lambda1*(par.sigma1**2) / (2.0*par.beta1 + par.lambda1*(1.0 - par.rho1**2))
elif par.rho1 == 1.0:
lssvar1 = (par.sigma1**2) / (1.0 - 0.99**2)
if par.beta2 == 0.0: lssvar2 = (par.sigma2**2) / (1.0 - par.rho2**2)
if par.beta2 != 0.0: lssvar2 = par.lambda2*(par.sigma2**2) / (2.0*par.beta2 + par.lambda2*(1.0 - par.rho2**2))
# c. simulate n income paths in dt increments
for i_n in prange(nsim):
# i. draw initial distribution from normal distribution with same mean and variance
z1sim[i_n,0] = np.sqrt(lssvar1)*z1rand[i_n,0]
z2sim[i_n,0] = np.sqrt(lssvar2)*z2rand[i_n,0]
zsim[i_n,0] = z1sim[i_n,0] + z2sim[i_n,0]
# ii. loop over time
for i_t in range(Tsim-1):
if z1jumpI[i_n,i_t] == 1:
z1sim[i_n,i_t+1] = par.rho1*z1sim[i_n,i_t] + par.sigma1*z1rand[i_n,i_t+1]
else:
z1sim[i_n,i_t+1] = (1 - par.dt*par.beta1)*z1sim[i_n,i_t]
if z2jumpI[i_n,i_t] == 1:
z2sim[i_n,i_t+1] = par.rho2*z2sim[i_n,i_t] + par.sigma2*z2rand[i_n,i_t+1]
else:
z2sim[i_n,i_t+1] = (1 - par.dt*par.beta2)*z2sim[i_n,i_t]
zsim[i_n,i_t+1] = z1sim[i_n,i_t+1] + z2sim[i_n,i_t+1]
zlevsim[i_n,:] = np.exp(zsim[i_n,:])
# iii. aggregate to annual income
for i_t in range(Tann):
step = np.floor(4.0/par.dt)
it1 = int(Tburn + step*i_t)
itN = int(it1 + step)
zannlevsim[i_n,i_t] = np.sum(zlevsim[i_n,it1:itN+1])
zannsim[i_n,:] = np.log(zannlevsim[i_n,:])
return zannsim, zannlevsim
@njit(parallel=True,fastmath=True)
def sim_disc(par,nsim,seed=2019):
np.random.seed(seed) # set seed
# a. define simulation parameters and pre-allocate solution containers
Tsim = int(np.floor(24.0/par.dt))+1 # 24 quarters
Tann = int(Tsim*par.dt/4)
zsim = np.zeros((nsim,Tsim))
zlevsim = np.zeros((nsim,Tsim))
zannsim = np.zeros((nsim,Tann))
zannlevsim = np.zeros((nsim,Tann))
z1simI = np.zeros((nsim,Tsim))
z2simI = np.zeros((nsim,Tsim))
eye1 = np.identity(np.int64(par.Nz1))
eye2 = np.identity(np.int64(par.Nz2))
z1trans = par.dt*par.z1_markov + eye1
z2trans = par.dt*par.z2_markov + eye2
z1distcum = np.cumsum(par.z1dist)
z2distcum = np.cumsum(par.z2dist)
z1transcum =
|
np.zeros(z1trans.shape)
|
numpy.zeros
|
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Post-rule to transform anomaly scores to follow a standard normal distribution.
"""
import logging
from typing import List, Tuple
import numpy as np
from scipy.stats import norm
from scipy.interpolate import PchipInterpolator
from merlion.post_process.base import PostRuleBase
from merlion.utils import TimeSeries, UnivariateTimeSeries
logger = logging.getLogger(__name__)
class AnomScoreCalibrator(PostRuleBase):
"""
Learns a monotone function which reshapes an input sequence of anomaly scores,
to follow a standard normal distribution. This makes the anomaly scores from
many diverse models interpretable as z-scores.
"""
def __init__(self, max_score: float, abs_score: bool = True, anchors: List[Tuple[float, float]] = None):
"""
:param max_score: the maximum possible uncalibrated score
:param abs_score: whether to consider the absolute values of the
anomaly scores, rather than the raw value.
:param anchors: a sequence of (x, y) pairs mapping an uncalibrated
anomaly score to a calibrated anomaly score. Optional, as this
will be set by `AnomScoreCalibrator.train`.
"""
self.max_score = max_score
self.abs_score = abs_score
self.anchors = anchors
@property
def anchors(self):
return self._anchors
@anchors.setter
def anchors(self, anchors):
"""
:return: a sequence of (x, y) pairs mapping an uncalibrated
anomaly score to a calibrated anomaly score.
"""
if anchors is None or len(anchors) < 2:
self._anchors = None
self.interpolator = None
else:
self._anchors = anchors
self.interpolator = PchipInterpolator(*zip(*anchors))
def train(self, anomaly_scores: TimeSeries, retrain_calibrator=False) -> TimeSeries:
"""
:param anomaly_scores: `TimeSeries` of raw anomaly scores that we will use
to train the calibrator.
:param retrain_calibrator: Whether to re-train the calibrator on a new
sequence of anomaly scores, if it has already been trained once.
In practice, we find better results if this is ``False``.
"""
if self.interpolator is not None and not retrain_calibrator:
return self(anomaly_scores)
x = anomaly_scores.to_pd().values[:, 0]
if self.abs_score:
x = np.abs(x)
targets = [0, 0, 0.5, 1, 1.5, 2]
inputs = np.quantile(x, 2 * norm.cdf(targets) - 1).tolist()
# ub is an upper bound on E[max(X_1, ..., X_n)], for X_i ~ N(0, 1)
ub = self.expected_max(len(x), ub=True)
x_max = x.max()
if self.max_score < x_max:
logger.warning(
f"Obtained max score of {x_max:.2f}, but self.max_score "
f"is only {self.max_score:.2f}. Updating self.max_score "
f"to {x_max * 2:.2f}."
)
self.max_score = x_max * 2
if ub > 4:
targets.append(ub)
inputs.append(x.max())
targets.append(ub + 1)
inputs.append(min(self.max_score, 2 * x_max))
else:
targets.append(5)
inputs.append(min(self.max_score, 2 * x_max))
targets = np.asarray(targets)
inputs = np.asarray(inputs)
valid = np.concatenate(([True], np.abs(inputs[1:] - inputs[:-1]) > 1e-8))
self.anchors = list(zip(inputs[valid], targets[valid]))
return self(anomaly_scores)
@staticmethod
def expected_max(n, ub=False):
"""
:meta private:
"""
if ub:
return np.sqrt(2 * np.log(n))
g = np.euler_gamma
return (1 - g) * norm.ppf(1 - 1 / n) + g * norm.ppf(1 - 1 / np.e / n)
def __call__(self, anomaly_scores: TimeSeries) -> TimeSeries:
if self.interpolator is None:
return anomaly_scores
x = anomaly_scores.to_pd().values[:, 0]
b = self.anchors[-1][0]
m = self.interpolator.derivative()(self.anchors[-1][0])
if self.abs_score:
vals = np.maximum(self.interpolator(np.abs(x)), 0) * np.sign(x)
idx =
|
np.abs(x)
|
numpy.abs
|
import numpy as np
import pickle
import sys
from copy import copy
from keras.models import load_model
np.random.seed(50)
eps = 1E-5
class NeuralNetworkStruct(object):
def __init__(self ,layers_sizes=[], load_weights = False, input_bounds = None):
# num_lasers includes the output layer
if(len(layers_sizes) == 0):
return
self.num_layers = len(layers_sizes)
self.image_size = layers_sizes[0]
self.output_size = layers_sizes[-1]
self.num_hidden_neurons = sum(layers_sizes[1:-1])
self.layers_sizes = layers_sizes
self.input_min = np.zeros(self.image_size)
self.input_max = np.zeros(self.image_size)
self.input_mean = np.zeros(self.image_size)
self.input_range = np.zeros(self.image_size)
self.out_mean = 0
self.out_range = 0
self.input_bound = input_bounds
self.nonlin_relus = []
self.active_relus = []
self.inactive_relus = []
if(input_bounds is None):
self.input_bound = np.ones((self.layers_sizes[0]+1,2))
self.input_bound[:-1,0] = -1E10
self.input_bound[:-1,1] = 1E10
self.layers = [None]*self.num_layers
if(load_weights):
self.model = load_model("model/my_model.h5")
#input layer
in_bound = self.input_bound[:-1,:]
self.layers[0] = {'idx':0, 'num_nodes':self.image_size, 'weights': [], 'type':'input','lb':in_bound[:,0].reshape((-1,1)),
'ub':in_bound[:,1].reshape((-1,1)),
'Relu_lb': in_bound[:,0].reshape((-1,1)), 'Relu_ub': in_bound[:,1].reshape((-1,1))}
for index in range(self.num_layers):
if(index == 0):
continue
self.layers[index] = {'idx':index, 'num_nodes': layers_sizes[index], 'weights': []}
self.layers[index]['type'] = 'hidden'
if load_weights:
self.layers[index]['weights'] = self.model.get_weights()[2*index].T
self.layers[index]['bias'] = self.model.get_weights()[2*index + 1]
else:
self.layers[index]['weights'] = np.random.normal(scale=2.0, size=(layers_sizes[index], layers_sizes[index-1]))
self.layers[index]['bias'] = np.random.normal(scale=0.5, size=(layers_sizes[index],1))
# self.__compute_IA_bounds()
# self.__compute_sym_bounds()
self.layers[self.num_layers-1]['type'] = 'output'
def recompute_bounds(self,layers_mask):
self.nonlin_relus = []
self.active_relus = []
self.inactive_relus = []
I = np.zeros((self.image_size ,self.image_size+ 1))
np.fill_diagonal(I,1)
layer_sym = SymbolicInterval(I,I,self.input_bound)
for layer_idx in range(1,len(self.layers)):
layer = self.layers[layer_idx]
weights = (layer['weights'],layer['bias'])
layer_sym = layer_sym.forward_linear(weights)
layer['in_sym'] = layer_sym
layer['in_lb'] = layer_sym.concrete_Mlower_bound(layer_sym.lower,layer_sym.interval)
layer['in_ub'] = layer_sym.concrete_Mupper_bound(layer_sym.upper,layer_sym.interval)
if(layer['type'] == 'hidden'):
active_neurons = []
inactive_neurons = []
if(layers_mask is not None):
layer_mask = layers_mask[layer_idx-1]
active_neurons = np.where(layer_mask == 1)[0]
inactive_neurons = np.where(layer_mask == 0)[0]
self.active_relus += [[layer_idx,idx] for idx in active_neurons]
self.inactive_relus += [[layer_idx,idx] for idx in inactive_neurons]
layer_sym,error_vec = layer_sym.forward_relu(layer = layer_idx,nonlin_relus = self.nonlin_relus,inact_relus=self.inactive_relus,act_relus= self.active_relus)
# layer_sym.lower[active_neurons] = layer_sym.upper[active_neurons] = layer['in_sym'].upper[active_neurons]
layer_sym.lower[active_neurons] = copy(layer['in_sym'].upper[active_neurons])
layer_sym.upper[active_neurons] = copy(layer['in_sym'].upper[active_neurons])
layer_sym.upper[inactive_neurons] = 0
layer_sym.lower[inactive_neurons] = 0
layer['conc_lb'] = np.maximum(0,layer_sym.concrete_Mlower_bound(layer_sym.lower,layer_sym.interval))
layer['conc_ub'] = np.maximum(0,layer_sym.concrete_Mupper_bound(layer_sym.upper,layer_sym.interval))
else:
layer['conc_lb'] = layer['in_lb']
layer['conc_ub'] = layer['in_ub']
layer['Relu_sym'] = layer_sym
def __compute_sym_bounds(self):
#first layer Symbolic interval
self.nonlin_relus = []
self.active_relus = []
self.inactive_relus = []
W = self.layers[1]['weights']
b = self.layers[1]['bias'].reshape((-1,1))
input_bounds = np.hstack((self.layers[0]['lb'],self.layers[0]['ub']))
input_bounds = np.vstack((input_bounds,np.ones(2)))
input_sym = SymbolicInterval(np.hstack((W,b)),np.hstack((W,b)),input_bounds)
self.layers[1]['in_sym'] = input_sym
self.layers[1]['in_lb'] = input_sym.concrete_Mlower_bound(input_sym.lower,input_sym.interval)
self.layers[1]['in_ub'] = input_sym.concrete_Mupper_bound(input_sym.upper,input_sym.interval)
# self.layers[1]['Relu_sym'] = input_sym
input_sym,error_vec = input_sym.forward_relu(layer = 1,nonlin_relus = self.nonlin_relus,inact_relus=self.inactive_relus,act_relus= self.active_relus)
self.layers[1]['conc_lb'] = input_sym.concrete_Mlower_bound(input_sym.lower,input_sym.interval)
self.layers[1]['conc_ub'] = input_sym.concrete_Mupper_bound(input_sym.upper,input_sym.interval)
self.layers[1]['Relu_sym'] = input_sym
for layer_idx,layer in enumerate(self.layers):
if(layer_idx < 2):
continue
weights = (layer['weights'],layer['bias'])
input_sym = input_sym.forward_linear(weights)
layer['in_lb'] = input_sym.concrete_Mlower_bound(input_sym.lower,input_sym.interval)
layer['in_ub'] = input_sym.concrete_Mupper_bound(input_sym.upper,input_sym.interval)
layer['in_sym'] = input_sym
if(layer['type'] == 'hidden'):
input_sym,error_vec = input_sym.forward_relu(layer = layer_idx,nonlin_relus = self.nonlin_relus, inact_relus=self.inactive_relus,act_relus= self.active_relus)
layer['Relu_sym'] = input_sym
layer['conc_lb'] = input_sym.concrete_Mlower_bound(input_sym.lower,input_sym.interval)
layer['conc_ub'] = input_sym.concrete_Mupper_bound(input_sym.upper,input_sym.interval)
sorted(self.nonlin_relus)
def update_bounds(self,layer_idx,neuron_idx,bounds,layers_mask = None):
input_sym = self.layers[layer_idx]['Relu_sym']
if(np.all(bounds[0] - input_sym.lower <= eps) and np.all(bounds[1] - input_sym.upper <= eps)):
return
input_sym.lower[neuron_idx] = bounds[0]
input_sym.upper[neuron_idx] = bounds[1]
self.layers[layer_idx]['conc_lb'][neuron_idx] = input_sym.concrete_lower_bound(input_sym.lower[neuron_idx],input_sym.interval)
self.layers[layer_idx]['conc_ub'][neuron_idx] = input_sym.concrete_upper_bound(input_sym.upper[neuron_idx],input_sym.interval)
for idx,layer in self.layers.items():
if(idx < layer_idx + 1):
continue
if(layers_mask is None):
mask = 1
else:
mask = layers_mask[idx-1]
weights = (layer['weights'],layer['bias'])
input_sym = input_sym.forward_linear(weights)
layer['in_lb'] = input_sym.concrete_Mlower_bound(input_sym.lower,input_sym.interval)
layer['in_ub'] = input_sym.concrete_Mupper_bound(input_sym.upper,input_sym.interval)
if(layer['type'] == 'hidden'):
input_sym,error_vec = input_sym.forward_relu(input_sym)
input_sym.lower *= mask
input_sym.upper *= mask
layer['Relu_sym'] = input_sym
layer['conc_lb'] = input_sym.concrete_Mlower_bound(input_sym.lower,input_sym.interval)
layer['conc_ub'] = input_sym.concrete_Mupper_bound(input_sym.upper,input_sym.interval)
def __compute_IA_bounds(self):
for index in range(self.num_layers):
if(self.layers[index]['type'] != 'input'):
W = self.layers[index]['weights']
b = self.layers[index]['bias']
prev_lb = self.layers[index-1]['Relu_lb']
prev_ub = self.layers[index-1]['Relu_ub']
self.layers[index]['lb'] = (np.maximum(0,W).dot(prev_lb) + np.minimum(0,W).dot(prev_ub) + b).reshape((-1,1))
self.layers[index]['ub'] = (np.maximum(0,W).dot(prev_ub) + np.minimum(0,W).dot(prev_lb) + b).reshape((-1,1))
if(self.layers[index]['type'] is not 'output'):
self.layers[index]['Relu_lb'] = np.maximum(0,self.layers[index]['lb']).reshape((-1,1))
self.layers[index]['Relu_ub'] = np.maximum(0,self.layers[index]['ub']).reshape((-1,1))
def set_weights(self,Weights,biases):
for index in range(self.num_layers):
if(index == 0):
continue
self.layers[index]['weights'] = Weights[index - 1]
self.layers[index]['bias'] = biases[index - 1].reshape((-1,1))
self.__compute_IA_bounds()
self.__compute_sym_bounds()
def __set_stats(self,stats):
self.input_min = np.array(stats['min'])
self.input_max = np.array(stats['max'])
self.input_mean = np.array(stats['mean'][:-1])
self.input_range = np.array(stats['range'][:-1])
self.out_mean = stats['mean'][-1]
self.out_range = stats['range'][-1]
def set_target(self,target):
last_layer_W = self.layers[self.num_layers-1]['weights']
last_layer_b = self.layers[self.num_layers-1]['bias']
target_w = copy(last_layer_W[target])
target_b = copy(last_layer_b[target])
for idx in range(len(last_layer_W)):
last_layer_W[idx] -= target_w
last_layer_b[idx] -= target_b
def set_bounds(self,input_bounds):
self.input_bound = input_bounds
self.layers[0]['lb'] = input_bounds[:,0].reshape((-1,1))
self.layers[0]['ub'] = input_bounds[:,1].reshape((-1,1))
self.layers[0]['Relu_lb'] = input_bounds[:,0].reshape((-1,1))
self.layers[0]['Relu_ub'] = input_bounds[:,1].reshape((-1,1))
self.__compute_IA_bounds()
self.__compute_sym_bounds()
def get_phases(self, input):
#input shapes N*D where N is the batch size and D is the dim of input point
phases = []
prev = input
for index in range(self.num_layers):
if(index == 0):
continue
W = self.layers[index]['weights']
b = self.layers[index]['bias']
net = prev @ W.T + b.T
phases.append(net > 1E-5)
if(self.layers[index]['type'] == 'output'):
prev = net
else:
prev = np.maximum(0,net)
return phases, prev
def eval_and_update_Lip(self, input):
#input shapes N*D where N is the batch size and D is the dim of input point
# phases = []
prev = input
max_diff = (self.input_bound[:,1] - self.input_bound[:,0]).flatten()
vol = np.prod(max_diff)
dims = self.image_size
radius = 0.5 * (dims**0.5) * (vol/len(input))**(1/dims)
L = np.eye(self.layers[1]['weights'].shape[1])
for index in range(self.num_layers):
if(index == 0):
continue
W = self.layers[index]['weights']
b = self.layers[index]['bias']
W_ = copy(W)
relu_ub = self.layers[index]['conc_ub']
in_active = np.where(relu_ub <= 0)[0]
W_[in_active] = 0
L = np.matmul(W_,L)
net = prev @ W.T + b.T
# phases.append(net > 1E-5)
if(self.layers[index]['type'] == 'output'):
prev = net
else:
prev = np.maximum(0,net)
f_max = np.max(net,axis = 0)
f_min = np.min(net,axis = 0)
L_LB = np.linalg.norm(L,ord = 2)
self.layers[index]['L_ub'] = f_max + L_LB * radius
self.layers[index]['L_lb'] = f_min - L_LB * radius
return prev
def evaluate(self,input):
prev = input
for index in range(self.num_layers):
if(index == 0):
continue
W = self.layers[index]['weights']
b = self.layers[index]['bias']
net = W.dot(prev) + b
if(self.layers[index]['type'] == 'output'):
prev = net
else:
prev = np.maximum(0,net)
return prev
def normalize_input(self,val):
ret = np.zeros_like(val)
for inputIndex in range(len(val)):
in_min = self.input_min[inputIndex]
in_max = self.input_max[inputIndex]
in_mean = self.input_mean[inputIndex]
in_range = self.input_range[inputIndex]
if ( val[inputIndex] < in_min ):
val[inputIndex] = in_min
elif ( val[inputIndex] > in_max ):
val[inputIndex] = in_max
ret[inputIndex] = ( val[inputIndex] - in_mean ) / in_range
return ret
def normalize_output(self,val):
ret = np.zeros_like(val)
out_mean = self.out_mean
out_range = self.out_range
ret = ( val - out_mean ) / out_range
return ret
def unnormalize_input(self,inputIndex, val):
in_mean = self.input_mean[inputIndex]
in_range = self.input_range[inputIndex]
return (val * in_range) + in_mean
def parse_network(self, model_file,type = 'Acas'):
with open(model_file,'r') as f:
start_idx = 4
if(type == 'mnist'):
start_idx = 2
model_fmt_file = f.readlines()
layers_sizes = list(map(int,model_fmt_file[start_idx][:-2].split(',')))
f.close()
W = []
biases =[]
start_idx = 10
if(type == 'mnist'):
start_idx = 3
for idx in range(1, len(layers_sizes)):
source = layers_sizes[idx-1]
target = layers_sizes[idx]
layer_weights = np.zeros((target,source))
layer_bias = np.zeros(target)
for row in range(target):
weights = np.array(list(map(float,model_fmt_file[start_idx].split(',')[:-1])))
layer_weights[row] = weights
start_idx +=1
for row in range(target):
bias = float(model_fmt_file[start_idx].split(',')[0])
layer_bias[row] = bias
start_idx +=1
W.append(layer_weights)
biases.append(layer_bias)
#Read min and max for inputs
mins = list(map(float,model_fmt_file[6].split(',')[:-1]))
maxs = list(map(float,model_fmt_file[7].split(',')[:-1]))
means = list(map(float,model_fmt_file[8].split(',')[:-1]))
ranges = list(map(float,model_fmt_file[9].split(',')[:-1]))
stats = {'min' :mins, 'max':maxs,'mean':means,'range':ranges}
self.__init__(layers_sizes)
self.set_weights(W,biases)
self.__set_stats(stats)
# return layers_sizes,W,biases,stats
def compute_L_LB(self):
norm = copy(self.layers[1]['weights'])
relu_ub = self.layers[1]['conc_ub']
in_active = np.where(relu_ub <= 0)[0]
norm[in_active] = 0
for i in range(2,self.num_layers):
relu_ub = self.layers[i]['conc_ub']
in_active = np.where(relu_ub <=0)[0]
W = copy(self.layers[i]['weights'])
W[in_active] = 0
norm = np.matmul(W,norm)
L = np.linalg.norm(norm,ord = 2)
return L
def compute_L_UB(self):
W = np.copy(self.layers[1]['weights'])
relu_ub = self.layers[1]['conc_ub']
in_active = np.where(relu_ub <= 0)[0]
W[in_active] = 0
L = np.linalg.norm(W,ord = 2)
for i in range(2,self.num_layers):
relu_ub = self.layers[i]['conc_ub']
in_active = np.where(relu_ub <=0)[0]
W = np.copy(self.layers[i]['weights'])
W[in_active] = 0
L = L * np.linalg.norm(W,ord = 2)
return L
class SymbolicInterval(object):
def __init__(self, low, upp, interval = None):
self.lower = low
self.upper = upp
if(interval is not None):
self.interval = interval
else:
self.interval = np.zeros((self.lower.shape[1]-1,2))
def forward_linear(self, weights):
W,b = weights
out_upp = np.atleast_2d(np.matmul(np.maximum(W,0),self.upper) + np.matmul(np.minimum(W,0),self.lower))
out_low = np.atleast_2d(np.matmul(np.maximum(W,0),self.lower) + np.matmul(np.minimum(W,0),self.upper))
out_upp[:,-1] += b.flatten()
out_low[:,-1]+= b.flatten()
return SymbolicInterval(out_low,out_upp,self.interval)
def forward_relu(self,layer = -1,nonlin_relus = [],inact_relus = [],act_relus = []):
relu_lower_equtions = copy(self.lower)
relu_upper_equations = copy(self.upper)
error_vec = np.zeros(len(relu_lower_equtions))
for row in range(relu_lower_equtions.shape[0]):
relu_lower_eq = relu_lower_equtions[row]
relu_upper_eq = relu_upper_equations[row]
lower_lb = self.concrete_lower_bound(relu_lower_eq, self.interval)
lower_ub = self.concrete_upper_bound(relu_lower_eq, self.interval)
upper_lb = self.concrete_lower_bound(relu_upper_eq, self.interval)
upper_ub = self.concrete_upper_bound(relu_upper_eq, self.interval)
if(lower_lb >= 0):
act_relus.append([layer,row])
elif(upper_ub <= 0):
relu_lower_eq[:] = 0
relu_upper_eq[:] = 0
inact_relus.append([layer,row])
else:
nonlin_relus.append([layer,row])
if(abs(lower_lb) > abs(upper_ub) or lower_ub <= eps):
relu_lower_eq[:] = 0
elif(lower_ub > eps):
relu_lower_eq[:] = lower_ub * (relu_lower_eq) / (lower_ub - lower_lb)
else:
relu_lower_eq[:] = 0
if(upper_lb < eps):
relu_upper_eq[:] = upper_ub * (relu_upper_eq) / (upper_ub - upper_lb)
relu_upper_eq[-1] -= upper_ub* upper_lb / (upper_ub - upper_lb)
error_vec[row] -= upper_ub* upper_lb / (upper_ub - upper_lb)
return SymbolicInterval(relu_lower_equtions,relu_upper_equations, self.interval),np.diagflat(error_vec)
def concrete_lower_bound(self, equation, interval):
#Get indices of coeff >0
p_idx = np.where(equation[:-1] > 0)[0]
n_idx = np.where(equation[:-1] <= 0)[0]
lb = equation[p_idx].dot(interval[p_idx,0]) + equation[n_idx].dot(interval[n_idx,1]) + equation[-1]
return lb
def concrete_upper_bound(self, equation, interval):
p_idx = np.where(equation[:-1] > 0)[0]
n_idx = np.where(equation[:-1] <= 0)[0]
ub = equation[p_idx].dot(interval[p_idx,1]) + equation[n_idx].dot(interval[n_idx,0]) + equation[-1]
return ub
def concrete_Mlower_bound(self, equations, interval):
lb = []
for equation in equations:
lb.append(self.concrete_lower_bound(equation,interval))
return
|
np.array(lb)
|
numpy.array
|
import pytest
from numba import vectorize, cuda
import numpy as np
from copy import deepcopy
import sgp4.sgp4_g as si
import sgp4.propagation as prop
from sgp4.earth_gravity import wgs72
@pytest.fixture(scope='function')
def client():
class Client:
whichconst = np.array([
wgs72.tumin, # tumin
wgs72.mu, # mu
wgs72.radiusearthkm, # radiusearthkm
wgs72.xke,
wgs72.j2,
wgs72.j3,
wgs72.j4,
wgs72.j3oj2
],
dtype=np.float64
)
return Client()
def test_gstime():
""" test the gstime function """
@vectorize(['float64(float64)'], target='cuda')
def timer(jdut1):
return si.gstime(jdut1)
jdut1 = np.linspace(2450545.0, 2458603, 20, dtype=np.float64)
result = timer(jdut1)
expected = [prop.gstime(jd1) for jd1 in jdut1]
assert np.allclose(result, expected)
def test_dscom(client):
""" test the dscome function """
@cuda.jit('void(float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:, :])')
def sample_function(epoch, ep, argpp, tc, inclp, nodep, np_, out):
idx = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(idx, epoch.shape[0], stride):
si._dscom(
epoch[i],
ep[i],
argpp[i],
tc[i],
inclp[i],
nodep[i],
np_[i],
out[i, :]
)
# some sample input data
epoch = 20630.332154440228
ep = 0.6877146
argpp = 4.621022739372039
tc = 0.0
inclp = 1.119778813470034
nodep = 4.87072001413786
np_ = 0.008748547019630239
n = 50
m = 81
# the input arrays
epoch_array = np.ones((n, ), dtype=np.float64) * epoch
ep_array = np.ones((n, ), dtype=np.float64) * ep
argpp_array = np.ones((n, ), dtype=np.float64) * argpp
tc_array = np.ones((n, ), dtype=np.float64) * tc
inclp_array = np.ones((n, ), dtype=np.float64) * inclp
nodep_array = np.ones((n, ), dtype=np.float64) * nodep
np_array = np.ones((n, ), dtype=np.float64) * np_
# the data output array
out = np.zeros((n, m), dtype=np.float64)
sample_function(
epoch_array,
ep_array,
argpp_array,
tc_array,
inclp_array,
nodep_array,
np_array,
out
)
expected = np.array(
prop._dscom(epoch, ep, argpp, tc, inclp, nodep, np_, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
dtype=np.float64
)
assert np.allclose(out, expected)
def test_dpper():
""" test the _dpper function """
@cuda.jit('void(float64[:, :], float64[:, :])')
def sample_function(satrec, out):
idx = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(idx, satrec.shape[0], stride):
si._dpper(
satrec[i, :],
out[i, :]
)
# the data for creating the dscom data
epoch = 20630.332154440228
# ep = 0.6877146
# argpp = 4.621022739372039
tc = 0.0
# inclp = 1.119778813470034
# nodep = 4.87072001413786
np_ = 0.008748547019630239
# for the case when init is False
inclo = 0.16573297511087753
init = 0
ep = 0.0270971
inclp = 0.16573297511087753
nodep = 5.465934884933242
argpp = 5.716345999363128
mp = 0.537730706551697
afspc_mode = 0
t_ = 1844345.0
n = 20
m = 5
dscom_array = np.array(
[
prop._dscom(epoch, ep, argpp, tc, inclp, nodep, np_, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
for _ in range(n)]
)
class Satrec:
e3 = deepcopy(dscom_array[0, 7])
ee2 = deepcopy(dscom_array[0, 8])
peo = deepcopy(dscom_array[0, 12])
pgho = deepcopy(dscom_array[0, 13])
pho = deepcopy(dscom_array[0, 14])
pinco = deepcopy(dscom_array[0, 15])
plo = deepcopy(dscom_array[0, 16])
se2 = deepcopy(dscom_array[0, 18])
se3 = deepcopy(dscom_array[0, 19])
sgh2 = deepcopy(dscom_array[0, 20])
sgh3 = deepcopy(dscom_array[0, 21])
sgh4 = deepcopy(dscom_array[0, 22])
sh2 = deepcopy(dscom_array[0, 23])
sh3 = deepcopy(dscom_array[0, 24])
si2 = deepcopy(dscom_array[0, 25])
si3 = deepcopy(dscom_array[0, 26])
sl2 = deepcopy(dscom_array[0, 27])
sl3 = deepcopy(dscom_array[0, 28])
sl4 = deepcopy(dscom_array[0, 29])
t = deepcopy(t_)
xgh2 = deepcopy(dscom_array[0, 56])
xgh3 = deepcopy(dscom_array[0, 57])
xgh4 = deepcopy(dscom_array[0, 58])
xh2 = deepcopy(dscom_array[0, 59])
xh3 = deepcopy(dscom_array[0, 60])
xi2 = deepcopy(dscom_array[0, 61])
xi3 = deepcopy(dscom_array[0, 62])
xl2 = deepcopy(dscom_array[0, 63])
xl3 = deepcopy(dscom_array[0, 64])
xl4 = deepcopy(dscom_array[0, 65])
zmol = deepcopy(dscom_array[0, 79])
zmos = deepcopy(dscom_array[0, 80])
@staticmethod
def satrec_array():
satrec_array = np.zeros((100,))
satrec_array[46] = dscom_array[0, 7] # e3
satrec_array[47] = dscom_array[0, 8] # ee2
satrec_array[48] = dscom_array[0, 12] # peo
satrec_array[49] = dscom_array[0, 13] # pgho
satrec_array[50] = dscom_array[0, 14] # pho
satrec_array[51] = dscom_array[0, 15] # pinco
satrec_array[52] = dscom_array[0, 16] # plo
satrec_array[53] = dscom_array[0, 18] # se2
satrec_array[54] = dscom_array[0, 19] # se3
satrec_array[55] = dscom_array[0, 20] # sgh2
satrec_array[56] = dscom_array[0, 21] # sgh3
satrec_array[57] = dscom_array[0, 22] # sgh4
satrec_array[58] = dscom_array[0, 23] # sh2
satrec_array[59] = dscom_array[0, 24] # sh3
satrec_array[60] = dscom_array[0, 25] # si2
satrec_array[61] = dscom_array[0, 26] # si3
satrec_array[62] = dscom_array[0, 27] # sl2
satrec_array[63] = dscom_array[0, 28] # sl3
satrec_array[64] = dscom_array[0, 29] # sl4
satrec_array[67] = dscom_array[0, 56] # xgh2
satrec_array[68] = dscom_array[0, 57] # xgh3
satrec_array[69] = dscom_array[0, 58] # xgh4
satrec_array[70] = dscom_array[0, 59] # xh2
satrec_array[71] = dscom_array[0, 60] # xh3
satrec_array[72] = dscom_array[0, 61] # xi2
satrec_array[73] = dscom_array[0, 62] # xi3
satrec_array[74] = dscom_array[0, 63] # xl2
satrec_array[75] = dscom_array[0, 64] # xl3
satrec_array[76] = dscom_array[0, 65] # xl4
satrec_array[79] = dscom_array[0, 79] # zmol
satrec_array[80] = dscom_array[0, 80] # zmos
satrec_array[15] = t_ # t
satrec_array[91] = afspc_mode # afspc_mode
satrec_array[93] = init # init
return satrec_array
satrec = Satrec()
satrec_array = np.array([Satrec.satrec_array() for _ in range(n)], dtype=np.float64)
out_ref = np.array([[ep, inclp, nodep, argpp, mp] for _ in range(n)], dtype=np.float64)
out = deepcopy(out_ref)
sample_function(satrec_array, out)
expected = prop._dpper(satrec, inclo, 'n', ep, inclp, nodep, argpp, mp, False)
assert np.allclose(expected, out)
# test a case when the init and afspc_mode is True
satrec_array1 = deepcopy(satrec_array)
satrec_array1[:, 91] = 1
satrec_array1[:, 93] = 1
out1 = deepcopy(out_ref)
sample_function(satrec_array1, out1)
expected1 = prop._dpper(satrec, inclo, 'y', ep, inclp, nodep, argpp, mp, True)
assert np.allclose(expected1, out1)
def test_initl(client):
""" test the initl function """
@cuda.jit('void(float64[:, :], float64[:], float64[:], float64[:], int8[:], float64[:, :])')
def sample_function(which_const, ecco, epoch, inclo, afspc_mode, out):
idx = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(idx, ecco.shape[0], stride):
si._initl(
which_const[i, :],
ecco[i],
epoch[i],
inclo[i],
afspc_mode[i],
out[i, :]
)
ecco = 0.1
epoch = 18441.78495062003
inclo = 0.5980929187319208
afspc_mode = 0
no = 0.04722944544077857
method = 0
n = 20
m = 15
ecco_array = np.ones((n,), dtype=np.float64) * ecco
epoch_array = np.ones((n,), dtype=np.float64) * epoch
inclo_array = np.ones((n,), dtype=np.float64) * inclo
afspc_mode_array = np.ones((n,), dtype=np.int8) * afspc_mode
whichconst_array = np.array([client.whichconst for _ in range(n)], dtype=np.float64)
out = np.zeros((n, m), dtype=np.float64)
out[:, 0] = no
out[:, 1] = method
sample_function(whichconst_array, ecco_array, epoch_array, inclo_array, afspc_mode_array, out)
whichconst_ = wgs72
expected = list(prop._initl(0, whichconst_, ecco, epoch, inclo, no, 'n', False))
expected[1] = 0 if expected[1] == 'n' else 1
assert np.allclose(expected, out)
# test the case when the flags are True
afspc_mode_array1 = np.ones((n,), dtype=np.float64)
out1 = np.zeros_like(out, dtype=np.float64)
out1[:, 0] = no
out1[:, 1] = method
sample_function(whichconst_array, ecco_array, epoch_array, inclo_array, afspc_mode_array1, out1)
expected1 = list(prop._initl(0, whichconst_, ecco, epoch, inclo, no, 'y', True))
expected1[1] = 0 if expected1[1] == 'n' else 1
assert np.allclose(expected1, out1)
def test_dsinit(client):
"""
[
cosim 0
emsq 1
argpo 2
s1 3
s2 4
s3 5
s4 6
s5 7
sinim 8
ss1 9
ss2 10
ss3 11
ss4 12
ss5 13
sz1 14
sz3 15
sz11 16
sz13 17
sz21 18
sz23 19
sz31 20
sz33 21
t 22
tc 23
gsto 24
mo 25
mdot 26
no 27
nodeo 28
nodedot 29
xpidot 30
z1 31
z3 32
z11 33
z13 34
z21 35
z23 36
z31 37
z33 38
ecco 39
eccsq 40
em 41
argpm 42
inclm 43
mm 44
nm 45
nodem 46
irez 47
atime 48
d2201 49
d2211 50
d3210 51
d3222 52
d4410 53
d4422 54
d5220 55
d5232 56
d5421 57
d5433 58
dedt 59
didt 60
dmdt 61
dnodt 62
domdt 63
del1 64
del2 65
del3 66
xfact 67
xlamo 68
xli 69
xni 70
]
[
em, 0
argpm, 1
inclm, 2
mm, 3
nm, 4
nodem, 5
irez, 6
atime, 7
d2201, 8
d2211, 9
d3210, 10
d3222, 11
d4410, 12
d4422, 13
d5220, 14
d5232, 15
d5421, 16
d5433, 17
dedt, 18
didt, 19
dmdt, 20
dndt, 21
dnodt, 22
domdt, 23
del1, 24
del2, 25
del3, 26
xfact, 27
xlamo, 28
xli, 29
xni, 30
]
"""
@cuda.jit('void(float64[:, :], float64[:, :], float64[:, :])')
def sample_function(which_const, dsinit_in, out):
idx = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(idx, dsinit_in.shape[0], stride):
si._dsinit(
which_const[i, :],
dsinit_in[i, :],
out[i, :]
)
n = 20
m_in = 41
m_out = 31
argpm = 0.0
argpo = 3.623303527140228
atime = 0.0
cosim = 0.9800539401920249
d2201 = 0.0
d2211 = 0.0
d3210 = 0.0
d3222 = 0.0
d4410 = 0.0
d4422 = 0.0
d5220 = 0.0
d5232 = 0.0
d5421 = 0.0
d5433 = 0.0
dedt = 0.0
del1 = 0.0
del2 = 0.0
del3 = 0.0
didt = 0.0
dmdt = 0.0
dnodt = 0.0
domdt = 0.0
ecco = 0.1450506
eccsq = 0.02103967656036
em = 0.1450506
emsq = 0.02103967656036
gsto = 1.7160270840712997
inclm = 0.200063601497606
irez = 0
mdot = 0.005246109831442361
mm = 0.0
mo = 2.5121396588580382
nm = 0.005245868658927085
no = 0.005245868658927085
nodedot = -2.5397114508943806e-07
nodem = 0.0
nodeo = 4.766670465450965
s1 = -0.00019684669188710442
s2 = -4.6208539892862366e-05
s3 = 9.143969877776298e-05
s4 = 9.047265431838011e-05
s5 = 0.010150047372442628
sinim = 0.1987316640952998
ss1 = -0.0012255625682065598
ss2 = -0.00028769321079904646
ss3 = 0.0005693012719481262
ss4 = 0.0005632804773904462
ss5 = 0.017590824941402683
sz1 = 13.139035148723423
sz11 = 2.3891150762100812
sz13 = -0.06532930148080213
sz21 = -0.25766266752885947
sz23 = -0.7103439559618776
sz3 = 4.721576706991561
sz31 = 7.986387467635543
sz33 = -1.1664757543866426
t = 0.0
tc = 0.0
xfact = 0.0
xlamo = 0.0
xli = 0.0
xni = 0.0
xpidot = 2.38732468766333e-07
z1 = 6.6680010237276335
z11 = 1.630805643792089
z13 = 0.9259764788331608
z21 = 0.828684206949739
z23 = -2.05015105615014
z3 = 9.879517039032041
z31 = 1.2489443628199304
z33 = 4.701299585848115
dndt = 0.0
dsinit_in = np.array([
cosim, emsq, argpo, s1, s2, s3, s4, s5, sinim, ss1, ss2, ss3, ss4, ss5, sz1, sz3, sz11, sz13, sz21, sz23,
sz31, sz33, t, tc, gsto, mo, mdot, no, nodeo, nodedot, xpidot, z1, z3, z11, z13, z21, z23, z31, z33, ecco,
eccsq
],
dtype=np.float64
)
out = np.array([
em, argpm, inclm, mm, nm, nodem, irez, atime, d2201, d2211, d3210, d3222, d4410, d4422, d5220,
d5232, d5421, d5433, dedt, didt, dmdt, dndt, dnodt, domdt, del1, del2, del3, xfact, xlamo, xli, xni,
],
dtype=np.float64
)
dsinit_in_array = np.array([dsinit_in for _ in range(n)], dtype=np.float64)
out_array = np.array([out for _ in range(n)], dtype=np.float64)
which_const_array = np.array([client.whichconst for _ in range(n)], dtype=np.float64)
sample_function(which_const_array, dsinit_in_array, out_array)
expected = prop._dsinit(
wgs72, cosim, emsq, argpo, s1, s2, s3, s4, s5, sinim, ss1, ss2, ss3, ss4, ss5, sz1, sz3, sz11, sz13, sz21, sz23,
sz31, sz33, t, tc, gsto, mo, mdot, no, nodeo, nodedot, xpidot, z1, z3, z11, z13, z21, z23, z31, z33, ecco,
eccsq, em, argpm, inclm, mm, nm, nodem, irez, atime, d2201, d2211, d3210, d3222, d4410, d4422, d5220, d5232,
d5421, d5433, dedt, didt, dmdt, dnodt, domdt, del1, del2, del3, xfact, xlamo, xli, xni
)
assert np.allclose(expected, out_array)
def test_dspace():
""" test the _dspace function """
@cuda.jit('void(float64[:, :], float64[:], float64[:, :])')
def sample_function(satrec, tc, out):
idx = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(idx, tc.shape[0], stride):
si._dspace(
satrec[i, :],
tc[i],
out[i, :]
)
argpm = 0.0
argpo = 3.623303527140228
atime = 0.0
d2201 = 0.0
d2211 = 0.0
d3210 = 0.0
d3222 = 0.0
d4410 = 0.0
d4422 = 0.0
d5220 = 0.0
d5232 = 0.0
d5421 = 0.0
d5433 = 0.0
dedt = 0.0
del1 = 0.0
del2 = 0.0
del3 = 0.0
didt = 0.0
dmdt = 0.0
dnodt = 0.0
domdt = 0.0
em = 0.1450506
gsto = 1.7160270840712997
inclm = 0.200063601497606
irez = 0
mm = 0.0
nm = 0.005245868658927085
no = 0.005245868658927085
nodem = 0.0
t = 0.0
tc = 0.0
xfact = 0.0
xlamo = 0.0
xli = 0.0
xni = 0.0
argpdot = 0.001
# define the satrec input array
satrec = np.zeros((100,), dtype=np.float64)
satrec[27] = irez
satrec[28] = d2201
satrec[29] = d2211
satrec[30] = d3210
satrec[31] = d3222
satrec[32] = d4410
satrec[33] = d4422
satrec[34] = d5220
satrec[35] = d5232
satrec[36] = d5421
satrec[37] = d5433
satrec[38] = dedt
satrec[39] = del1
satrec[40] = del2
satrec[41] = del3
satrec[42] = didt
satrec[43] = dmdt
satrec[44] = dnodt
satrec[45] = domdt
satrec[86] = argpo
satrec[12] = argpdot
satrec[65] = gsto
satrec[66] = xfact
satrec[77] = xlamo
satrec[89] = no
satrec[15] = t
# define the output array
out = np.zeros((10,), dtype=np.float64)
out[0] = atime
out[1] = em
out[2] = argpm
out[3] = inclm
out[4] = xli
out[5] = mm
out[6] = xni
out[7] = nodem
out[9] = nm
n = 20
satrec_array = np.array([satrec for _ in range(n)], dtype=np.float64)
out_array = np.array([out for _ in range(n)], dtype=np.float64)
tc_array = np.array([tc for _ in range(n)], dtype=np.float64)
sample_function(satrec_array, tc_array, out_array)
result = np.array(prop._dspace(
irez, d2201, d2211, d3210, d3222, d4410, d4422, d5220, d5232, d5421, d5433, dedt, del1, del2, del3, didt,
dmdt, dnodt, domdt, argpo, argpdot, t, tc, gsto, xfact, xlamo, no, atime, em, argpm, inclm, xli, mm, xni,
nodem, nm
), dtype=np.float64)
assert np.allclose(result, out)
def test_sgp4_init(client):
"""
isimp 0
method 1
aycof 2
con41 3
cc1 4
cc4 5
cc5 6
d2 7
d3 8
d4 9
delmo 10
eta 11
argpdot 12
omgcof 13
sinmao 14
t 15
t2cof 16
t3cof 17
t4cof 18
t5cof 19
x1mth2 20
x7thm1 21
mdot 22
nodedot 23
xlcof 24
xmcof 25
nodecf 26
irez 27
d2201 28
d2211 29
d3210 30
d3222 31
d4410 32
d4422 33
d5220 34
d5232 35
d5421 36
d5433 37
dedt 38
del1 39
del2 40
del3 41
didt 42
dmdt 43
dnodt 44
domdt 45
e3 46
ee2 47
peo 48
pgho 49
pho 50
pinco 51
plo 52
se2 53
se3 54
sgh2 55
sgh3 56
sgh4 57
sh2 58
sh3 59
si2 60
si3 61
sl2 62
sl3 63
sl4 64
gsto 65
xfact 66
xgh2 67
xgh3 68
xgh4 69
xh2 70
xh3 71
xi2 72
xi3 73
xl2 74
xl3 75
xl4 76
xlamo 77
xlmth2 78
zmol 79
zmos 80
atime 81
xli 82
xni 83
bstar 84
ecco 85
argpo 86
inclo 87
mo 88
no 89
nodeo 90
afspc_mode 91
error 92
init 93
x 94
y 95
z 96
u 97
v 98
w 99
"""
# @cuda.jit('void(float64[:, :], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:, :])')
# def sample_function(whichconst, afspc_mode, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo, out):
# idx = cuda.grid(1)
# stride = cuda.gridsize(1)
# for i in range(idx, afspc_mode.shape[0], stride):
# si.sgp4_init_g(
# whichconst[i, :], afspc_mode[i], epoch[i], xbstar[i], xecco[i], xargpo[i], xinclo[i], xmo[i], xno[i],
# xnodeo[i], out[i, :]
# )
n = 25
m = 100
afspc_mode = 1
epoch = 18441.78495062003
satn = 5
satrec = np.zeros((n, m), dtype=np.float64)
xargpo = 5.790416027488515
xbstar = 2.8098e-05
xecco = 0.1859667
xinclo = 0.5980929187319208
xmo = 0.3373093125574321
xno = 0.04722944544077857
xnodeo = 6.08638547138321
which_const_array = np.array([client.whichconst for _ in range(n)], dtype=np.float64)
afspc_mode_a = np.ones((n,)) * afspc_mode
epoch_a = np.ones_like(afspc_mode_a) * epoch
satn_a = np.ones_like(afspc_mode_a) * satn
xargpo_a = np.ones_like(afspc_mode_a) * xargpo
xbstar_a = np.ones_like(afspc_mode_a) * xbstar
xecco_a = np.ones_like(afspc_mode_a) * xecco
xinclo_a = np.ones_like(afspc_mode_a) * xinclo
xmo_a = np.ones_like(afspc_mode_a) * xmo
xno_a = np.ones_like(afspc_mode_a) * xno
xnodeo_a = np.ones_like(afspc_mode_a) * xnodeo
si.sgp4_init(
which_const_array,
afspc_mode_a,
epoch_a,
xbstar_a,
xecco_a,
xargpo_a,
xinclo_a,
xmo_a,
xno_a,
xnodeo_a,
satrec
)
from sgp4.model import Satellite
expected = Satellite()
expected.whichconst = client.whichconst
prop.sgp4init(client.whichconst, afspc_mode, satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno,
xnodeo, expected)
assert np.allclose(satrec[:, 0], expected.isimp)
assert np.allclose(satrec[:, 1], 1 if expected.method == 'd' else 0) # method
assert np.allclose(satrec[:, 2], expected.aycof)
assert np.allclose(satrec[:, 3], expected.con41)
assert np.allclose(satrec[:, 4], expected.cc1)
assert np.allclose(satrec[:, 5], expected.cc4)
assert np.allclose(satrec[:, 6], expected.cc5)
assert np.allclose(satrec[:, 7], expected.d2)
assert np.allclose(satrec[:, 8], expected.d3)
assert np.allclose(satrec[:, 9], expected.d4)
assert np.allclose(satrec[:, 10], expected.delmo)
assert np.allclose(satrec[:, 11], expected.eta)
assert np.allclose(satrec[:, 12], expected.argpdot)
assert np.allclose(satrec[:, 13], expected.omgcof)
assert np.allclose(satrec[:, 14], expected.sinmao)
assert np.allclose(satrec[:, 15], expected.t)
assert np.allclose(satrec[:, 16], expected.t2cof)
assert np.allclose(satrec[:, 17], expected.t3cof)
assert np.allclose(satrec[:, 18], expected.t4cof)
assert np.allclose(satrec[:, 19], expected.t5cof)
assert np.allclose(satrec[:, 20], expected.x1mth2)
assert np.allclose(satrec[:, 21], expected.x7thm1)
assert np.allclose(satrec[:, 22], expected.mdot)
assert np.allclose(satrec[:, 23], expected.nodedot)
assert np.allclose(satrec[:, 24], expected.xlcof)
assert np.allclose(satrec[:, 25], expected.xmcof)
assert np.allclose(satrec[:, 26], expected.nodecf)
assert np.allclose(satrec[:, 27], expected.irez)
assert np.allclose(satrec[:, 28], expected.d2201)
assert np.allclose(satrec[:, 29], expected.d2211)
assert np.allclose(satrec[:, 30], expected.d3210)
assert np.allclose(satrec[:, 31], expected.d3222)
assert np.allclose(satrec[:, 32], expected.d4410)
assert np.allclose(satrec[:, 33], expected.d4422)
assert np.allclose(satrec[:, 34], expected.d5220)
assert np.allclose(satrec[:, 35], expected.d5232)
assert np.allclose(satrec[:, 36], expected.d5421)
assert np.allclose(satrec[:, 37], expected.d5433)
assert np.allclose(satrec[:, 38], expected.dedt)
assert np.allclose(satrec[:, 39], expected.del1)
assert np.allclose(satrec[:, 40], expected.del2)
assert np.allclose(satrec[:, 41], expected.del3)
assert np.allclose(satrec[:, 42], expected.didt)
assert np.allclose(satrec[:, 43], expected.dmdt)
assert np.allclose(satrec[:, 44], expected.dnodt)
assert np.allclose(satrec[:, 45], expected.domdt)
assert np.allclose(satrec[:, 46], expected.e3)
assert np.allclose(satrec[:, 47], expected.ee2)
assert np.allclose(satrec[:, 48], expected.peo)
assert np.allclose(satrec[:, 49], expected.pgho)
assert np.allclose(satrec[:, 50], expected.pho)
assert np.allclose(satrec[:, 51], expected.pinco)
assert np.allclose(satrec[:, 52], expected.plo)
assert np.allclose(satrec[:, 53], expected.se2)
assert
|
np.allclose(satrec[:, 54], expected.se3)
|
numpy.allclose
|
import numpy as np
import math
from scipy.special import gamma
import scipy
import scipy.ndimage
def paired_product(new_im):
shift1 = np.roll(new_im.copy(), 1, axis=1)
shift2 = np.roll(new_im.copy(), 1, axis=0)
shift3 = np.roll(np.roll(new_im.copy(), 1, axis=0), 1, axis=1)
shift4 = np.roll(np.roll(new_im.copy(), 1, axis=0), -1, axis=1)
H_img = shift1 * new_im
V_img = shift2 * new_im
D1_img = shift3 * new_im
D2_img = shift4 * new_im
return (H_img, V_img, D1_img, D2_img)
def gen_gauss_window(lw, sigma):
sd = np.float32(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * np.float32(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
def estimateggdparam(vec):
gam = np.asarray([x / 1000.0 for x in range(200, 10000, 1)])
r_gam = (gamma(1.0/gam)*gamma(3.0/gam))/((gamma(2.0/gam))**2)
# print(np.mean(vec))
sigma_sq = np.mean(vec**2) #-(np.mean(vec))**2
sigma = np.sqrt(sigma_sq)
E = np.mean(np.abs(vec))
rho = sigma_sq / (E**2 + 1e-6)
array_position = (np.abs(rho - r_gam)).argmin()
alphaparam = gam[array_position]
return alphaparam, sigma
def compute_image_mscn_transform(image, C=1, avg_window=None, extend_mode='constant'):
if avg_window is None:
avg_window = gen_gauss_window(3, 7.0/6.0)
assert len(np.shape(image)) == 2
h, w = np.shape(image)
mu_image = np.zeros((h, w), dtype=np.float32)
var_image = np.zeros((h, w), dtype=np.float32)
image = np.array(image).astype('float32')
scipy.ndimage.correlate1d(image, avg_window, 0, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(mu_image, avg_window, 1, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(image**2, avg_window, 0, var_image, mode=extend_mode)
scipy.ndimage.correlate1d(var_image, avg_window, 1, var_image, mode=extend_mode)
var_image = np.sqrt(np.abs(var_image - mu_image**2))
return (image - mu_image)/(var_image + C)
def extract_subband_feats(mscncoefs):
# alpha_m, = extract_ggd_features(mscncoefs)
alpha_m, sigma = estimateggdparam(mscncoefs)
pps1, pps2, pps3, pps4 = paired_product(mscncoefs)
alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)
alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)
alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)
alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)
return np.array([
alpha_m, sigma,
alpha1, N1, lsq1**2, rsq1**2, # (V)
alpha2, N2, lsq2**2, rsq2**2, # (H)
alpha3, N3, lsq3**2, rsq3**2, # (D1)
alpha4, N4, lsq4**2, rsq4**2, # (D2)
])
def aggd_features(imdata):
# Flatten imdata
imdata.shape = (len(imdata.flat),)
imdata2 = imdata*imdata
left_data = imdata2[imdata < 0]
right_data = imdata2[imdata >= 0]
left_mean_sqrt = 0
right_mean_sqrt = 0
if len(left_data) > 0:
left_mean_sqrt = np.sqrt(np.average(left_data))
if len(right_data) > 0:
right_mean_sqrt = np.sqrt(np.average(right_data))
if right_mean_sqrt != 0:
gamma_hat = left_mean_sqrt/right_mean_sqrt
else:
gamma_hat = np.inf
# Solve r-hat norm
imdata2_mean =
|
np.mean(imdata2)
|
numpy.mean
|
#!/usr/bin/env python -u
'''
pDMET: Density Matrix Embedding theory for Periodic Systems
Copyright (C) 2018 <NAME>. All Rights Reserved.
A few functions in pDMET are modifed from QC-DMET Copyright (C) 2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
import numpy as np
def get_occ_r(nelec, mo_energy_kpts):
''' Get occupation numbers at k-point for a KRHF wf, modified from pbc/scf/krhf.py'''
nkpts = len(mo_energy_kpts)
nocc = nelec // 2
mo_energy = np.sort(np.hstack(mo_energy_kpts))
fermi = mo_energy[nocc-1]
mo_occ_kpts = []
for mo_e in mo_energy_kpts:
mo_occ_kpts.append((mo_e <= fermi).astype(np.double) * 2)
return np.asarray(mo_occ_kpts)
def get_occ_ro(nelec, mo_energy_kpts):
''' Get occupation numbers at k-point for a KROHF wf, modified from pbc/scf/krohf.py'''
if hasattr(mo_energy_kpts[0], 'mo_ea'):
mo_ea_kpts = [x.mo_ea for x in mo_energy_kpts]
mo_eb_kpts = [x.mo_eb for x in mo_energy_kpts]
else:
mo_ea_kpts = mo_eb_kpts = mo_energy_kpts
nkpts = len(mo_energy_kpts)
nocc_a = nelec[0] * nkpts
nocc_b = nelec[1] * nkpts
mo_energy_kpts1 = np.hstack(mo_energy_kpts)
mo_energy = np.sort(mo_energy_kpts1)
if nocc_b > 0:
core_level = mo_energy[nocc_b-1]
else:
core_level = -1e9
if nocc_a == nocc_b:
fermi = core_level
else:
mo_ea_kpts1 = np.hstack(mo_ea_kpts)
mo_ea = np.sort(mo_ea_kpts1[mo_energy_kpts1 > core_level])
fermi = mo_ea[nocc_a - nocc_b - 1]
mo_occ_kpts = []
for k, mo_e in enumerate(mo_energy_kpts):
occ =
|
np.zeros_like(mo_e)
|
numpy.zeros_like
|
from obspy.core import read
import glob
import matplotlib.pyplot as plt
from scipy.misc import electrocardiogram
from scipy.signal import find_peaks
import numpy as np
import math
from time import process_time
import ntpath
st = process_time()
from rf import RFStream, read_rf, IterMultipleComponents, get_profile_boxes
plt.style.use('ggplot')
def calc_h_kappa(vp = 6.3,p = 0.06,w1=0.75,w2 = 0.25,outfile = "h-kappa-values.txt",data_dir_loc = "../results/dataRF", outloc="./"):
f= open(outloc+outfile,'w')
data_files = glob.glob(data_dir_loc+"/*-rf_profile_rfs.h5")
for data in data_files:
network = ntpath.basename(data).split('-')[0]
station = ntpath.basename(data).split('-')[1]
st = read_rf(data)
st = st.select(component="L")
len_trace_list=[]
for tr in st:
lentr=tr.stats.npts
len_trace_list.append(lentr)
if len(set(len_trace_list))> 1:
continue
st = st.stack()
for index,trace in enumerate(st):
errorphase = False
nbphase = 0
[xpeaks, ypeaks] = [],[]
trace.filter('bandpass', freqmin=0.005, freqmax=2)
t = trace.stats.starttime
pps = trace.stats.sampling_rate
trace.trim(t+24, t+44)
xpeaks, ypeaks = find_peaks(trace, height=0.02, distance=50)
if len(xpeaks) > 2:
if len(xpeaks) < 5:
# print('nb of peaks =',len(xpeaks))
plt.plot(trace)
plt.plot(xpeaks, trace[xpeaks], "x")
plt.plot(np.zeros_like(trace), "--", color="gray")
t0 = xpeaks[0]/pps
t1 = xpeaks[1]/pps
t2 = xpeaks[2]/pps
if len(xpeaks) > 3:
t3 = xpeaks[3]/pps
if t0 < 2.5 and t0 > 0:
t0 = t0
else:
t0 = np.NaN
errorphase = True
if t1 < 7.0 and t1 > 2.6:
t1 = t1
else:
t1 = np.NaN
errorphase = True
if t2 < 14.0 and t2 > 7.5:
t2 = t2
else:
if t3 and t3 < 14.0 and t2 > 7.5:
t2 = t3
else:
t2 = np.NaN
errorphase = True
try:
if w1+w2 != 1:
raise ValueError('Weights are not properly defined')
except ValueError as e:
exit(str(e))
# Measure the difference between theory and data:
if not errorphase:
numpoints = 1000
hs =
|
np.linspace(20,40,numpoints)
|
numpy.linspace
|
from typing import Union, List
import numpy as np
from neighborhood_analysis import get_point_neighbors
from ._utils import get_pair_count, types2int, get_pair
def leibovici(points: Union[List[float], np.ndarray],
types: Union[List[str], np.ndarray],
d: Union[int, float, None] = None,
order: bool = True,
base: Union[int, float, None] = None
) -> float:
"""Leibovici entropy
Args:
points: array, 2d array
types: array, the length should correspond to points
d: int or float, cut-off distance, default is 10
order: bool, if True, (x1, x2) and (x2, x1) is not the same
base: int or float, the log base, default is e
Returns:
float
"""
if len(points) != len(types):
raise ValueError("Array of points and types should have same length")
if base is None:
base = np.e
if d is None:
d = 10
elif isinstance(d, (int, float)):
pass
else:
raise TypeError("d should be a number.")
points = [tuple(i) for i in points]
if isinstance(types[0], str):
types = types2int(types)
neighbors = get_point_neighbors(points, r=d, labels=types)
pair = get_pair(types, neighbors)
pair_count = get_pair_count(pair, order)
v = pair_count.values()
# clean all elements that equal to zero to prevent divide by zero error
v = np.array([i for i in v if i != 0])
v = v / v.sum()
v = v *
|
np.log(1 / v)
|
numpy.log
|
import numpy as np
import torch
from . import forward_kinematics
def readCSVasFloat(filename, with_key=False):
"""
Borrowed from SRNN code. Reads a csv and returns a float matrix.
https://github.com/asheshjain399/NeuralModels/blob/master/neuralmodels/utils.py#L34
Args
filename: string. Path to the csv file
Returns
returnArray: the read data in a float32 matrix
"""
returnArray = []
lines = open(filename).readlines()
if with_key: # skip first line
lines = lines[1:]
for line in lines:
line = line.strip().split(',')
if len(line) > 0:
returnArray.append(np.array([np.float32(x) for x in line]))
returnArray = np.array(returnArray)
return returnArray
def writeCSVasFloat(filename, data_array, with_key=False, key=None):
"""
Args
filename: string. Path to the csv file
data_array: data
"""
with open(filename, 'w') as f:
if with_key:
f.write(key)
for vec in data_array:
line_to_write = ','.join(map(str, vec)) + '\n'
f.write(line_to_write)
def expmap2xyz_torch(expmap):
"""
convert expmaps to joint locations
:param expmap: N*99
:return: N*32*3
"""
parent, offset, rotInd, expmapInd = forward_kinematics._some_variables()
xyz = forward_kinematics.fkl_torch(expmap, parent, offset, rotInd, expmapInd)
return xyz
def expmap2rotmat(r):
"""
Converts an exponential map angle to a rotation matrix
Matlab port to python for evaluation purposes
I believe this is also called Rodrigues' formula
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/expmap2rotmat.m
Args
r: 1x3 exponential map
Returns
R: 3x3 rotation matrix
"""
theta = np.linalg.norm(r)
r0 = np.divide(r, theta + np.finfo(np.float32).eps)
r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3, 3)
r0x = r0x - r0x.T
R = np.eye(3, 3) + np.sin(theta) * r0x + (1 - np.cos(theta)) * (r0x).dot(r0x);
return R
def expmap2rotmat_torch(r):
"""
Converts expmap matrix to rotation
batch pytorch version ported from the corresponding method above
:param r: N*3
:return: N*3*3
"""
theta = torch.norm(r, 2, 1)
r0 = torch.div(r, theta.unsqueeze(1).repeat(1, 3) + 0.0000001)
r1 = torch.zeros_like(r0).repeat(1, 3)
r1[:, 1] = -r0[:, 2]
r1[:, 2] = r0[:, 1]
r1[:, 5] = -r0[:, 0]
r1 = r1.view(-1, 3, 3)
r1 = r1 - r1.transpose(1, 2)
n = r1.data.shape[0]
R = torch.eye(3, 3).repeat(n, 1, 1).float().cuda() + torch.mul(
torch.sin(theta).unsqueeze(1).repeat(1, 9).view(-1, 3, 3), r1) + torch.mul(
(1 - torch.cos(theta).unsqueeze(1).repeat(1, 9).view(-1, 3, 3)), torch.matmul(r1, r1))
return R
def rotmat2quat(R):
"""
Converts a rotation matrix to a quaternion
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/rotmat2quat.m#L4
Args
R: 3x3 rotation matrix
Returns
q: 1x4 quaternion
"""
rotdiff = R - R.T
r = np.zeros(3)
r[0] = -rotdiff[1, 2]
r[1] = rotdiff[0, 2]
r[2] = -rotdiff[0, 1]
sintheta = np.linalg.norm(r) / 2
r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps)
costheta = (np.trace(R) - 1) / 2
theta = np.arctan2(sintheta, costheta)
q = np.zeros(4)
q[0] = np.cos(theta / 2)
q[1:] = r0 * np.sin(theta / 2)
return q
def quat2expmap(q):
"""
Converts a quaternion to an exponential map
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args
q: 1x4 quaternion
Returns
r: 1x3 exponential map
Raises
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q) - 1) > 1e-3):
raise (ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide(q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps))
theta = 2 * np.arctan2(sinhalftheta, coshalftheta)
theta = np.mod(theta + 2 * np.pi, 2 * np.pi)
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r
def rotmat2expmap(R):
return quat2expmap(rotmat2quat(R))
def find_indices_srnn(frame_num1, frame_num2, seq_len, input_n=10):
"""
Adapted from https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/seq2seq_model.py#L478
which originaly from
In order to find the same action indices as in SRNN.
https://github.com/asheshjain399/RNNexp/blob/master/structural_rnn/CRFProblems/H3.6m/processdata.py#L325
"""
# Used a fixed dummy seed, following
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/forecastTrajectories.py#L29
SEED = 1234567890
rng = np.random.RandomState(SEED)
T1 = frame_num1 - 150
T2 = frame_num2 - 150 # seq_len
idxo1 = None
idxo2 = None
for _ in np.arange(0, 4):
idx_ran1 = rng.randint(16, T1)
idx_ran2 = rng.randint(16, T2)
# print("subact1 {}".format(idx_ran1))
# print("subact2 {}".format(idx_ran2))
idxs1 = np.arange(idx_ran1 + 50 - input_n, idx_ran1 + 50 - input_n + seq_len)
idxs2 =
|
np.arange(idx_ran2 + 50 - input_n, idx_ran2 + 50 - input_n + seq_len)
|
numpy.arange
|
import os
from time import time
import numpy as np
import pandas as pd
import scipy
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from data_gen_utils import gen_data_P1_P2_P3_Elzouka
#%% ERROR ANALYSIS ==========================================================================
# calculating relative error for spectrum
def error_integ_by_spectrum_integ(y_test, y_pred, x = []):
"""This function calculates relative error for either a spectrum or scalar target
The result is the ratio between:
the absolute error between test&pred, integrated by np.trapz
------------------------------------------------------------
the value of y, integrated by np.trapz
if the input is 1D array, the integral is omitted
"""
if len(y_test.shape) == 2: # if y_test is 2D "i.e., spectral emissivity"
if len(x) == 0:
y_test_integ = np.trapz(y_test, axis=1)
else:
y_test_integ = np.trapz(y_test, x, axis=1)
else: # if y_test is 1D "i.e., scalar emissivity"
y_test_integ = y_test
error_abs = np.abs(y_test - y_pred)
if len(y_test.shape) == 2: # if y_test is 2D "i.e., spectral emissivity"
if len(x) == 0:
error_abs_integ = np.trapz(error_abs, axis=1)
else:
error_abs_integ = np.trapz(error_abs, x, axis=1)
else: # if y_test is 1D "i.e., scalar emissivity"
error_abs_integ = error_abs
error_rel_integ = error_abs_integ/y_test_integ
return error_rel_integ,np.mean(error_rel_integ)
def RMSE(y_actual,y_pred):
return np.sqrt(mean_squared_error(y_actual,y_pred))
def calc_RMSE_MAE_MSE_Erel(y_test,y_pred, my_x, printing=True):
"""
calculate the errors; averaged and for all test-pred elements
my_x: the frequency points, required only for spectral emissivity
"""
# error metrics, averaged
r2 = r2_score(y_test,y_pred)
mae = mean_absolute_error(y_test,y_pred)
mse = mean_squared_error(y_test,y_pred)
Erel_all,Erel = error_integ_by_spectrum_integ(y_test,y_pred, my_x)
# error metrics, for all elements of test-pred
r2_all = r2_score(y_test,y_pred, multioutput='raw_values')
mae_all = mean_absolute_error(y_test,y_pred, multioutput='raw_values')
mse_all = mean_squared_error(y_test,y_pred, multioutput='raw_values')
if printing:
print("R2: {0:.8f}".format(r2))
print("MAE: {0:.8f}".format(mae))
print("MSE: {0:.8f}".format(mse))
print("Erel: {0:.8f}".format(Erel))
return r2,mae,mse,Erel, r2_all,mae_all,mse_all, Erel_all
def spectra_prediction_corrector(y):
"""To replace any negative emissivity values with 'ZEROS'"""
assert isinstance(y,np.ndarray)
y_copy = np.copy(y)
y_copy[y_copy<0] = 0
return y_copy
def z_RF_DT_DTGEN_error_folds(X_reduced,y_reduced, feature_set, feature_set_dimensions, feature_set_geom_mat, data_featurized, my_x, \
num_folds=20, test_size=0.2, n_estimators=200, n_cpus = 1, keep_spheres = True, optional_title_folders='', \
use_log_emissivity=True, display_txt_out = True, RF_or_DT__ = ['RF'], PlotTitle_extra = '', \
n_gen_to_data_ratio=150):
'''
INPUTS that is required only for DTGEN
data_featurized: all the data, with all the columns. Required only for DTGEN
n_gen_to_data_ratio : ratio between the amont of data generated for DTGEN and the training data
'''
#determine_spectral_or_scalar
if len(y_reduced.shape) == 2: # if y is 2D "i.e., spectral emissivity"
spectral_or_scalar_calc = 'spectral'
else: # if y is 1D "i.e., scalar emissivity"
spectral_or_scalar_calc = 'scalar'
index_data_here = np.array(X_reduced.index)
##get errors w.r.t material/geometry intersectionality, also get runtime
mae,rmse,r2,mse,Erel = [],[],[],[],[]
mae_matgeom,rmse_matgeom,r2_matgeom,mse_matgeom,Erel_matgeom,ntest_matgeom,ntrain_matgeom={},{},{},{},{},{},{}
mats = ['Material_SiO2','Material_Au','Material_SiN']
mats_colors = ['b','r','m']
geoms = ['Geometry_TriangPrismIsosc','Geometry_parallelepiped','Geometry_wire','Geometry_sphere']
train_time_feat,pred_time_feat = [],[]
metric_list = ['mae', 'r2', 'mse', 'rmse', 'Erel', 'ntest', 'ntrain']
All_errors = {}
pred_time = {}
train_time = {}
for predictor in ['RF', 'DT', 'DTGEN'] :
pred_time[predictor] = []
train_time[predictor] = []
All_errors[predictor] = {}
for metric in metric_list:
All_errors[predictor][metric + '_matgeom'] = {}
for i in range(num_folds):
X_train,X_test,y_train,y_test = train_test_split(X_reduced,y_reduced,test_size=test_size,stratify=X_reduced[feature_set_geom_mat])
if use_log_emissivity:
y_train = np.log(y_train)
y_train[y_train<-25] = -25
for RF_or_DT in RF_or_DT__:
print('Analyzing the error for {0}, running training for {1}th time out of {2} times =========='.format(RF_or_DT, i, num_folds))
if RF_or_DT == 'RF' or RF_or_DT == 'DTGEN':
estimator = RandomForestRegressor(n_estimators,n_jobs=n_cpus, criterion='mse')
estimator_type='RF'
elif RF_or_DT == 'DT':
estimator = DecisionTreeRegressor()
estimator_type='DT'
start_time = time()
estimator.fit(X_train,y_train)
end_time = time()
train_time_feat.append((10**6*(end_time-start_time))/float(len(X_train)))
train_time[estimator_type].append((end_time-start_time))
start_time = time()
if use_log_emissivity:
y_pred = np.exp(estimator.predict(X_test))
else:
y_pred = spectra_prediction_corrector(estimator.predict(X_test))
end_time = time()
pred_time_feat.append((10**6*(end_time-start_time))/float(len(X_test)))
pred_time[estimator_type].append((end_time-start_time))
r2_here = r2_score(y_test,y_pred)
mae_here = mean_absolute_error(y_test,y_pred)
mse_here = mean_squared_error(y_test,y_pred)
rmse_here = RMSE(y_test,y_pred)
_,Erel_here = error_integ_by_spectrum_integ(y_test, y_pred, my_x)
r2.append(r2_here)
mae.append(mae_here)
mse.append(mse_here)
rmse.append(rmse_here)
Erel.append(Erel_here)
error_dict = All_errors[estimator_type]
# errors broken by material and geometry
for m in mats:
for g in geoms:
formal_material = m.split('_')[1]
formal_geom = g.split('_')[1]
key_str = formal_material + ' ' + formal_geom
idx = (X_test[m]==1)&(X_test[g]==1)
ntest = sum(idx) # number of test data
idx_train = (X_train[m]==1)&(X_train[g]==1)
ntrain = sum(idx_train) # number of training data
if i == 0:
for metric in metric_list:
error_dict[metric+'_matgeom'][key_str] = []
if sum(idx)!=0:
error_dict['mae_matgeom'][key_str].append(
mean_absolute_error(y_test[idx],y_pred[idx]))
error_dict['r2_matgeom'][key_str].append(
r2_score(y_test[idx],y_pred[idx]))
error_dict['mse_matgeom'][key_str].append(
mean_squared_error(y_test[idx],y_pred[idx]))
error_dict['rmse_matgeom'][key_str].append(
RMSE(y_test[idx],y_pred[idx]))
_, Erel_here = error_integ_by_spectrum_integ(y_test[idx], y_pred[idx], my_x)
error_dict['Erel_matgeom'][key_str].append(Erel_here)
error_dict['ntest_matgeom'][key_str].append(ntest)
error_dict['ntrain_matgeom'][key_str].append(ntrain)
# DTGEN
if RF_or_DT == 'DTGEN' and estimator_type == 'RF':
start_time_DTGEN_train = time()
X_train_all_columns = data_featurized.loc[X_train.index,:]
start_time = time()
n_gen = int(len(X_train_all_columns) * n_gen_to_data_ratio)
X_gen = gen_data_P1_P2_P3_Elzouka(X_train_all_columns,n_gen);
X_gen = pd.DataFrame(X_gen,columns=X_train.columns).astype(np.float64)
X_gen = X_gen[feature_set]
end_time = time()
print('done generating input features for DTGEN in {0} seconds'.format(end_time-start_time))
time_DTGEN_feature_creation = start_time - end_time
# predicting emissivity using RF for the generated data ------------------
start_time = time()
if use_log_emissivity:
y_gen = np.exp(estimator.predict(X_gen))
else:
y_gen = spectra_prediction_corrector(estimator.predict(X_gen))
end_time = time()
print('done predicting emissivity using the input features using RF in {0} seconds'.format(end_time-start_time))
time_DTGEN_label_creation = start_time - end_time
# adding the generated emissivity to original training emissivity ------------------
if use_log_emissivity:
X_new_train,y_new_train = pd.concat([X_gen,X_train]),np.concatenate([np.log(y_gen),y_train])
else:
X_new_train,y_new_train = pd.concat([X_gen,X_train]),np.concatenate([y_gen,y_train])
# creating a single decision tree trained on generated and original training emissivity
dt_gen = DecisionTreeRegressor(min_samples_leaf=3)
start_time = time()
dt_gen.fit(X_new_train,y_new_train)
end_time_DTGEN_train = time()
train_time['DTGEN'].append((end_time_DTGEN_train-start_time_DTGEN_train))
start_time = time()
if use_log_emissivity:
y_pred_dtgen = np.exp(dt_gen.predict(X_test))
y_new_train = np.exp(y_new_train)
else:
y_pred_dtgen = dt_gen.predict(X_test)
end_time = time()
pred_time['DTGEN'].append((end_time-start_time))
#print("DTGEN error analysis")
#dt_gen_r2,dt_gen_mae,dt_gen_mse,dt_gen_Erel, dt_gen_r2_all,dt_gen_mae_all,dt_gen_mse_all,dt_gen_Erel_all = calc_RMSE_MAE_MSE_Erel(y_test,y_pred_dtgen, my_x)
y_pred = y_pred_dtgen
# errors broken by material and geometry
estimator_type = 'DTGEN'
for m in mats:
for g in geoms:
formal_material = m.split('_')[1]
formal_geom = g.split('_')[1]
idx = (X_test[m]==1)&(X_test[g]==1) ; ntest = sum(idx) # number of test data
idx_train = (X_train[m]==1)&(X_train[g]==1) ; ntrain = sum(idx_train) # number of training data
if i == 0:
All_errors[estimator_type]['mae_matgeom'][formal_material+' '+formal_geom] = []
All_errors[estimator_type]['r2_matgeom'][formal_material+' '+formal_geom] = []
All_errors[estimator_type]['mse_matgeom'][formal_material+' '+formal_geom] = []
All_errors[estimator_type]['rmse_matgeom'][formal_material+' '+formal_geom] =[]
All_errors[estimator_type]['Erel_matgeom'][formal_material+' '+formal_geom] =[]
All_errors[estimator_type]['ntest_matgeom'][formal_material+' '+formal_geom] = []
All_errors[estimator_type]['ntrain_matgeom'][formal_material+' '+formal_geom] =[]
if sum(idx)!=0:
All_errors[estimator_type]['mae_matgeom'][formal_material+' '+formal_geom].append(mean_absolute_error(y_test[idx],y_pred[idx]))
All_errors[estimator_type]['r2_matgeom'][formal_material+' '+formal_geom].append(r2_score(y_test[idx],y_pred[idx]))
All_errors[estimator_type]['mse_matgeom'][formal_material+' '+formal_geom].append(mean_squared_error(y_test[idx],y_pred[idx]))
All_errors[estimator_type]['rmse_matgeom'][formal_material+' '+formal_geom].append(RMSE(y_test[idx],y_pred[idx]))
_,Erel_here=error_integ_by_spectrum_integ(y_test[idx], y_pred[idx], my_x)
All_errors[estimator_type]['Erel_matgeom'][formal_material+' '+formal_geom].append(Erel_here)
All_errors[estimator_type]['ntest_matgeom'][formal_material+' '+formal_geom].append(ntest)
All_errors[estimator_type]['ntrain_matgeom'][formal_material+' '+formal_geom].append(ntrain)
## saving the errors
############# Saving the data #####################################
savefolder = optional_title_folders+'/'
filename_mat = 'inference_'+spectral_or_scalar_calc+'_'+RF_or_DT+'_errors_averaged_over_{0}_runs'.format(num_folds)
os.makedirs(savefolder, exist_ok=True)
# Save in Matlab format
dict_to_save = {}
#variable_name_list = ['mae','mse','r2','rmse','Erel','mae_matgeom','rmse_matgeom','r2_matgeom','mse_matgeom','Erel_matgeom','matlab_data_path', 'feature_set', 'num_folds', 'index_data_here', 'ntrain_matgeom', 'ntest_matgeom']
variable_name_list = ['All_errors', 'train_time', 'pred_time']
for variable_name in variable_name_list:
if variable_name in locals():
dict_to_save[variable_name] = locals()[variable_name] # here, we use "LOCALS()", rather than "GLOBALS()"
elif variable_name in globals():
dict_to_save[variable_name] = globals()[variable_name] # here, we use "LOCALS()", rather than "GLOBALS()"
else:
print("{0} does not exist in local or global variables".format(variable_name))
scipy.io.savemat(savefolder+filename_mat+'.mat', dict_to_save)
if display_txt_out:
print(feature_set_dimensions)
print('R^2: {0:.6f} pm {1:.6f}'.format(
|
np.average(r2)
|
numpy.average
|
"""Tests for module bregman on OT with bregman projections """
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
from itertools import product
import numpy as np
import pytest
import ot
from ot.backend import torch
@pytest.mark.parametrize("verbose, warn", product([True, False], [True, False]))
def test_sinkhorn(verbose, warn):
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10, verbose=verbose, warn=warn)
# check constraints
np.testing.assert_allclose(
u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
np.testing.assert_allclose(
u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
with pytest.warns(UserWarning):
ot.sinkhorn(u, u, M, 1, stopThr=0, numItermax=1)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized",
"sinkhorn_epsilon_scaling",
"greenkhorn",
"sinkhorn_log"])
def test_convergence_warning(method):
# test sinkhorn
n = 100
a1 = ot.datasets.make_1D_gauss(n, m=30, s=10)
a2 = ot.datasets.make_1D_gauss(n, m=40, s=10)
A = np.asarray([a1, a2]).T
M = ot.utils.dist0(n)
with pytest.warns(UserWarning):
ot.sinkhorn(a1, a2, M, 1., method=method, stopThr=0, numItermax=1)
if method in ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"]:
with pytest.warns(UserWarning):
ot.barycenter(A, M, 1, method=method, stopThr=0, numItermax=1)
with pytest.warns(UserWarning):
ot.sinkhorn2(a1, a2, M, 1, method=method, stopThr=0, numItermax=1)
def test_not_impemented_method():
# test sinkhorn
w = 10
n = w ** 2
rng = np.random.RandomState(42)
A_img = rng.rand(2, w, w)
A_flat = A_img.reshape(n, 2)
a1, a2 = A_flat.T
M_flat = ot.utils.dist0(n)
not_implemented = "new_method"
reg = 0.01
with pytest.raises(ValueError):
ot.sinkhorn(a1, a2, M_flat, reg, method=not_implemented)
with pytest.raises(ValueError):
ot.sinkhorn2(a1, a2, M_flat, reg, method=not_implemented)
with pytest.raises(ValueError):
ot.barycenter(A_flat, M_flat, reg, method=not_implemented)
with pytest.raises(ValueError):
ot.bregman.barycenter_debiased(A_flat, M_flat, reg,
method=not_implemented)
with pytest.raises(ValueError):
ot.bregman.convolutional_barycenter2d(A_img, reg,
method=not_implemented)
with pytest.raises(ValueError):
ot.bregman.convolutional_barycenter2d_debiased(A_img, reg,
method=not_implemented)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"])
def test_nan_warning(method):
# test sinkhorn
n = 100
a1 = ot.datasets.make_1D_gauss(n, m=30, s=10)
a2 = ot.datasets.make_1D_gauss(n, m=40, s=10)
M = ot.utils.dist0(n)
reg = 0
with pytest.warns(UserWarning):
# warn set to False to avoid catching a convergence warning instead
ot.sinkhorn(a1, a2, M, reg, method=method, warn=False)
def test_sinkhorn_stabilization():
# test sinkhorn
n = 100
a1 = ot.datasets.make_1D_gauss(n, m=30, s=10)
a2 = ot.datasets.make_1D_gauss(n, m=40, s=10)
M = ot.utils.dist0(n)
reg = 1e-5
loss1 = ot.sinkhorn2(a1, a2, M, reg, method="sinkhorn_log")
loss2 = ot.sinkhorn2(a1, a2, M, reg, tau=1, method="sinkhorn_stabilized")
np.testing.assert_allclose(
loss1, loss2, atol=1e-06) # cf convergence sinkhorn
@pytest.mark.parametrize("method, verbose, warn",
product(["sinkhorn", "sinkhorn_stabilized",
"sinkhorn_log"],
[True, False], [True, False]))
def test_sinkhorn_multi_b(method, verbose, warn):
# test sinkhorn
n = 10
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
b = rng.rand(n, 3)
b = b / np.sum(b, 0, keepdims=True)
M = ot.dist(x, x)
loss0, log = ot.sinkhorn(u, b, M, .1, method=method, stopThr=1e-10,
log=True)
loss = [ot.sinkhorn2(u, b[:, k], M, .1, method=method, stopThr=1e-10,
verbose=verbose, warn=warn) for k in range(3)]
# check constraints
np.testing.assert_allclose(
loss0, loss, atol=1e-4) # cf convergence sinkhorn
def test_sinkhorn_backends(nx):
n_samples = 100
n_features = 2
rng = np.random.RandomState(0)
x = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_features)
a = ot.utils.unif(n_samples)
M = ot.dist(x, y)
G = ot.sinkhorn(a, a, M, 1)
ab = nx.from_numpy(a)
M_nx = nx.from_numpy(M)
Gb = ot.sinkhorn(ab, ab, M_nx, 1)
np.allclose(G, nx.to_numpy(Gb))
def test_sinkhorn2_backends(nx):
n_samples = 100
n_features = 2
rng = np.random.RandomState(0)
x = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_features)
a = ot.utils.unif(n_samples)
M = ot.dist(x, y)
G = ot.sinkhorn(a, a, M, 1)
ab = nx.from_numpy(a)
M_nx = nx.from_numpy(M)
Gb = ot.sinkhorn2(ab, ab, M_nx, 1)
np.allclose(G, nx.to_numpy(Gb))
def test_sinkhorn2_gradients():
n_samples = 100
n_features = 2
rng = np.random.RandomState(0)
x = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_features)
a = ot.utils.unif(n_samples)
M = ot.dist(x, y)
if torch:
a1 = torch.tensor(a, requires_grad=True)
b1 = torch.tensor(a, requires_grad=True)
M1 = torch.tensor(M, requires_grad=True)
val = ot.sinkhorn2(a1, b1, M1, 1)
val.backward()
assert a1.shape == a1.grad.shape
assert b1.shape == b1.grad.shape
assert M1.shape == M1.grad.shape
def test_sinkhorn_empty():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, method="sinkhorn_log",
verbose=True, log=True)
# check constraints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True)
# check constraints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10,
method='sinkhorn_stabilized', verbose=True, log=True)
# check constraints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn(
[], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling',
verbose=True, log=True)
# check constraints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
# test empty weights greenkhorn
ot.sinkhorn([], [], M, 1, method='greenkhorn', stopThr=1e-10, log=True)
@pytest.skip_backend("jax")
def test_sinkhorn_variants(nx):
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
ub = nx.from_numpy(u)
M_nx = nx.from_numpy(M)
G = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10)
Gl = nx.to_numpy(ot.sinkhorn(ub, ub, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
G0 = nx.to_numpy(ot.sinkhorn(ub, ub, M_nx, 1, method='sinkhorn', stopThr=1e-10))
Gs = nx.to_numpy(ot.sinkhorn(ub, ub, M_nx, 1, method='sinkhorn_stabilized', stopThr=1e-10))
Ges = nx.to_numpy(ot.sinkhorn(
ub, ub, M_nx, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10))
G_green = nx.to_numpy(ot.sinkhorn(ub, ub, M_nx, 1, method='greenkhorn', stopThr=1e-10))
# check values
np.testing.assert_allclose(G, G0, atol=1e-05)
np.testing.assert_allclose(G, Gl, atol=1e-05)
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Ges, atol=1e-05)
np.testing.assert_allclose(G0, G_green, atol=1e-5)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized",
"sinkhorn_epsilon_scaling",
"greenkhorn",
"sinkhorn_log"])
@pytest.skip_arg(("nx", "method"), ("jax", "sinkhorn_epsilon_scaling"), reason="jax does not support sinkhorn_epsilon_scaling", getter=str)
@pytest.skip_arg(("nx", "method"), ("jax", "greenkhorn"), reason="jax does not support greenkhorn", getter=str)
def test_sinkhorn_variants_dtype_device(nx, method):
n = 100
x = np.random.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
ub = nx.from_numpy(u, type_as=tp)
Mb = nx.from_numpy(M, type_as=tp)
Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
nx.assert_same_dtype_device(Mb, Gb)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"])
def test_sinkhorn2_variants_dtype_device(nx, method):
n = 100
x = np.random.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
ub = nx.from_numpy(u, type_as=tp)
Mb = nx.from_numpy(M, type_as=tp)
lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
nx.assert_same_dtype_device(Mb, lossb)
@pytest.skip_backend("jax")
def test_sinkhorn_variants_multi_b(nx):
# test sinkhorn
n = 50
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
b = rng.rand(n, 3)
b = b / np.sum(b, 0, keepdims=True)
M = ot.dist(x, x)
ub = nx.from_numpy(u)
bb = nx.from_numpy(b)
M_nx = nx.from_numpy(M)
G = ot.sinkhorn(u, b, M, 1, method='sinkhorn', stopThr=1e-10)
Gl = nx.to_numpy(ot.sinkhorn(ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
G0 = nx.to_numpy(ot.sinkhorn(ub, bb, M_nx, 1, method='sinkhorn', stopThr=1e-10))
Gs = nx.to_numpy(ot.sinkhorn(ub, bb, M_nx, 1, method='sinkhorn_stabilized', stopThr=1e-10))
# check values
np.testing.assert_allclose(G, G0, atol=1e-05)
np.testing.assert_allclose(G, Gl, atol=1e-05)
np.testing.assert_allclose(G0, Gs, atol=1e-05)
@pytest.skip_backend("jax")
def test_sinkhorn2_variants_multi_b(nx):
# test sinkhorn
n = 50
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
b = rng.rand(n, 3)
b = b / np.sum(b, 0, keepdims=True)
M = ot.dist(x, x)
ub = nx.from_numpy(u)
bb = nx.from_numpy(b)
M_nx = nx.from_numpy(M)
G = ot.sinkhorn2(u, b, M, 1, method='sinkhorn', stopThr=1e-10)
Gl = nx.to_numpy(ot.sinkhorn2(ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
G0 = nx.to_numpy(ot.sinkhorn2(ub, bb, M_nx, 1, method='sinkhorn', stopThr=1e-10))
Gs = nx.to_numpy(ot.sinkhorn2(ub, bb, M_nx, 1, method='sinkhorn_stabilized', stopThr=1e-10))
# check values
np.testing.assert_allclose(G, G0, atol=1e-05)
np.testing.assert_allclose(G, Gl, atol=1e-05)
np.testing.assert_allclose(G0, Gs, atol=1e-05)
def test_sinkhorn_variants_log():
# test sinkhorn
n = 50
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G0, log0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10, log=True)
Gl, logl = ot.sinkhorn(u, u, M, 1, method='sinkhorn_log', stopThr=1e-10, log=True)
Gs, logs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True)
Ges, loges = ot.sinkhorn(
u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10, log=True,)
G_green, loggreen = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10, log=True)
# check values
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Gl, atol=1e-05)
np.testing.assert_allclose(G0, Ges, atol=1e-05)
np.testing.assert_allclose(G0, G_green, atol=1e-5)
@pytest.mark.parametrize("verbose, warn", product([True, False], [True, False]))
def test_sinkhorn_variants_log_multib(verbose, warn):
# test sinkhorn
n = 50
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
b = rng.rand(n, 3)
b = b / np.sum(b, 0, keepdims=True)
M = ot.dist(x, x)
G0, log0 = ot.sinkhorn(u, b, M, 1, method='sinkhorn', stopThr=1e-10, log=True)
Gl, logl = ot.sinkhorn(u, b, M, 1, method='sinkhorn_log', stopThr=1e-10, log=True,
verbose=verbose, warn=warn)
Gs, logs = ot.sinkhorn(u, b, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True,
verbose=verbose, warn=warn)
# check values
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Gl, atol=1e-05)
@pytest.mark.parametrize("method, verbose, warn",
product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"],
[True, False], [True, False]))
def test_barycenter(nx, method, verbose, warn):
n_bins = 100 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
A_nx = nx.from_numpy(A)
M_nx = nx.from_numpy(M)
weights_nx = nx.from_numpy(weights)
reg = 1e-2
if nx.__name__ == "jax" and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.barycenter(A_nx, M_nx, reg, weights, method=method)
else:
# wasserstein
bary_wass_np = ot.bregman.barycenter(A, M, reg, weights, method=method, verbose=verbose, warn=warn)
bary_wass, _ = ot.bregman.barycenter(A_nx, M_nx, reg, weights_nx, method=method, log=True)
bary_wass = nx.to_numpy(bary_wass)
np.testing.assert_allclose(1, np.sum(bary_wass))
np.testing.assert_allclose(bary_wass, bary_wass_np)
ot.bregman.barycenter(A_nx, M_nx, reg, log=True)
@pytest.mark.parametrize("method, verbose, warn",
product(["sinkhorn", "sinkhorn_log"],
[True, False], [True, False]))
def test_barycenter_debiased(nx, method, verbose, warn):
n_bins = 100 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
A_nx = nx.from_numpy(A)
M_nx = nx.from_numpy(M)
weights_nx = nx.from_numpy(weights)
# wasserstein
reg = 1e-2
if nx.__name__ == "jax" and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.barycenter_debiased(A_nx, M_nx, reg, weights, method=method)
else:
bary_wass_np = ot.bregman.barycenter_debiased(A, M, reg, weights, method=method,
verbose=verbose, warn=warn)
bary_wass, _ = ot.bregman.barycenter_debiased(A_nx, M_nx, reg, weights_nx, method=method, log=True)
bary_wass = nx.to_numpy(bary_wass)
np.testing.assert_allclose(1, np.sum(bary_wass), atol=1e-3)
np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-5)
ot.bregman.barycenter_debiased(A_nx, M_nx, reg, log=True, verbose=False)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"])
def test_convergence_warning_barycenters(method):
w = 10
n_bins = w ** 2 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
A_img = A.reshape(2, w, w)
A_img /= A_img.sum((1, 2))[:, None, None]
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
reg = 0.1
with pytest.warns(UserWarning):
ot.bregman.barycenter_debiased(A, M, reg, weights, method=method, numItermax=1)
with pytest.warns(UserWarning):
ot.bregman.barycenter(A, M, reg, weights, method=method, numItermax=1)
with pytest.warns(UserWarning):
ot.bregman.convolutional_barycenter2d(A_img, reg, weights,
method=method, numItermax=1)
with pytest.warns(UserWarning):
ot.bregman.convolutional_barycenter2d_debiased(A_img, reg, weights,
method=method, numItermax=1)
def test_barycenter_stabilization(nx):
n_bins = 100 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
A_nx = nx.from_numpy(A)
M_nx = nx.from_numpy(M)
weights_b = nx.from_numpy(weights)
# wasserstein
reg = 1e-2
bar_np = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn", stopThr=1e-8, verbose=True)
bar_stable = nx.to_numpy(ot.bregman.barycenter(
A_nx, M_nx, reg, weights_b, method="sinkhorn_stabilized",
stopThr=1e-8, verbose=True
))
bar = nx.to_numpy(ot.bregman.barycenter(
A_nx, M_nx, reg, weights_b, method="sinkhorn",
stopThr=1e-8, verbose=True
))
np.testing.assert_allclose(bar, bar_stable)
np.testing.assert_allclose(bar, bar_np)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"])
def test_wasserstein_bary_2d(nx, method):
size = 20 # size of a square image
a1 = np.random.rand(size, size)
a1 += a1.min()
a1 = a1 / np.sum(a1)
a2 = np.random.rand(size, size)
a2 += a2.min()
a2 = a2 / np.sum(a2)
# creating matrix A containing all distributions
A = np.zeros((2, size, size))
A[0, :, :] = a1
A[1, :, :] = a2
A_nx = nx.from_numpy(A)
# wasserstein
reg = 1e-2
if nx.__name__ == "jax" and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.convolutional_barycenter2d(A_nx, reg, method=method)
else:
bary_wass_np = ot.bregman.convolutional_barycenter2d(A, reg, method=method)
bary_wass = nx.to_numpy(ot.bregman.convolutional_barycenter2d(A_nx, reg, method=method))
np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3)
np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3)
# help in checking if log and verbose do not bug the function
ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"])
def test_wasserstein_bary_2d_debiased(nx, method):
size = 20 # size of a square image
a1 = np.random.rand(size, size)
a1 += a1.min()
a1 = a1 / np.sum(a1)
a2 = np.random.rand(size, size)
a2 += a2.min()
a2 = a2 / np.sum(a2)
# creating matrix A containing all distributions
A = np.zeros((2, size, size))
A[0, :, :] = a1
A[1, :, :] = a2
A_nx = nx.from_numpy(A)
# wasserstein
reg = 1e-2
if nx.__name__ == "jax" and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.convolutional_barycenter2d_debiased(A_nx, reg, method=method)
else:
bary_wass_np = ot.bregman.convolutional_barycenter2d_debiased(A, reg, method=method)
bary_wass = nx.to_numpy(ot.bregman.convolutional_barycenter2d_debiased(A_nx, reg, method=method))
np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3)
np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3)
# help in checking if log and verbose do not bug the function
ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True)
def test_unmix(nx):
n_bins = 50 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
a = ot.datasets.make_1D_gauss(n_bins, m=30, s=10)
# creating matrix A containing all distributions
D =
|
np.vstack((a1, a2))
|
numpy.vstack
|
#!/usr/bin/env python
# Copyright 2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
ddCOSMO TDA, TDHF, TDDFT gradients
The implementaitons are based on modules
pyscf.grad.tdrhf
pyscf.grad.tdrks
pyscf.grad.tduhf
pyscf.grad.tduks
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import gto
from pyscf import scf
from pyscf import dft
from pyscf import df
from pyscf.dft import numint
from pyscf.solvent import ddcosmo
from pyscf.solvent import ddcosmo_grad
from pyscf.solvent._attach_solvent import _Solvation
from pyscf.grad import rks as rks_grad
from pyscf.grad import tdrks as tdrks_grad
from pyscf.grad import tduks as tduks_grad
from pyscf.scf import cphf, ucphf
def make_grad_object(grad_method):
'''For grad_method in vacuum, add nuclear gradients of solvent pcmobj'''
# Zeroth order method object must be a solvation-enabled method
assert isinstance(grad_method.base, _Solvation)
if grad_method.base.with_solvent.frozen:
raise RuntimeError('Frozen solvent model is not avialbe for energy gradients')
grad_method_class = grad_method.__class__
class WithSolventGrad(grad_method_class):
def __init__(self, grad_method):
self.__dict__.update(grad_method.__dict__)
self.de_solvent = None
self.de_solute = None
self._keys = self._keys.union(['de_solvent', 'de_solute'])
def grad_elec(self, xy, singlet, atmlst=None):
if isinstance(self.base._scf, dft.uks.UKS):
return tduks_grad_elec(self, xy, atmlst, self.max_memory, self.verbose)
elif isinstance(self.base._scf, dft.rks.RKS):
return tdrks_grad_elec(self, xy, singlet, atmlst, self.max_memory, self.verbose)
elif isinstance(self.base._scf, scf.uhf.UHF):
return tduhf_grad_elec(self, xy, atmlst, self.max_memory, self.verbose)
elif isinstance(self.base._scf, scf.hf.RHF):
return tdrhf_grad_elec(self, xy, singlet, atmlst, self.max_memory, self.verbose)
# TODO: if moving to python3, change signature to
# def kernel(self, *args, dm=None, atmlst=None, **kwargs):
def kernel(self, *args, **kwargs):
dm = kwargs.pop('dm', None)
if dm is None:
dm = self.base._scf.make_rdm1(ao_repr=True)
self.de_solvent = ddcosmo_grad.kernel(self.base.with_solvent, dm)
self.de_solute = grad_method_class.kernel(self, *args, **kwargs)
self.de = self.de_solute + self.de_solvent
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s (+%s) gradients ---------------',
self.base.__class__.__name__,
self.base.with_solvent.__class__.__name__)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
return self.de
def _finalize(self):
# disable _finalize. It is called in grad_method.kernel method
# where self.de was not yet initialized.
pass
return WithSolventGrad(grad_method)
def tdrhf_grad_elec(td_grad, x_y, singlet=True, atmlst=None,
max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tdrhf.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
nocc = (mo_occ>0).sum()
nvir = nmo - nocc
x, y = x_y
xpy = (x+y).reshape(nocc,nvir).T
xmy = (x-y).reshape(nocc,nvir).T
orbv = mo_coeff[:,nocc:]
orbo = mo_coeff[:,:nocc]
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
dvv = numpy.einsum('ai,bi->ab', xpy, xpy) + numpy.einsum('ai,bi->ab', xmy, xmy)
doo =-numpy.einsum('ai,aj->ij', xpy, xpy) - numpy.einsum('ai,aj->ij', xmy, xmy)
dmxpy = reduce(numpy.dot, (orbv, xpy, orbo.T))
dmxmy = reduce(numpy.dot, (orbv, xmy, orbo.T))
dmzoo = reduce(numpy.dot, (orbo, doo, orbo.T))
dmzoo+= reduce(numpy.dot, (orbv, dvv, orbv.T))
vj, vk = mf.get_jk(mol, (dmzoo, dmxpy+dmxpy.T, dmxmy-dmxmy.T), hermi=0)
if with_solvent.equilibrium_solvation:
vj[:2] += mf.with_solvent._B_dot_x((dmzoo, dmxpy+dmxpy.T))
else:
vj[0] += mf.with_solvent._B_dot_x(dmzoo)
veff0doo = vj[0] * 2 - vk[0]
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 - vk[1]
else:
veff = -vk[1]
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff = -vk[2]
veff0mom = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mom[nocc:,nocc:], xmy) * 2
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
# set singlet=None, generate function for CPHF type response kernel
vresp = mf.gen_response(singlet=None, hermi=1)
def fvind(x): # For singlet, closed shell ground state
dm = reduce(numpy.dot, (orbv, x.reshape(nvir,nocc)*2, orbo.T))
v1ao = vresp(dm+dm.T)
return reduce(numpy.dot, (orbv.T, v1ao, orbo)).ravel()
z1 = cphf.solve(fvind, mo_energy, mo_occ, wvo,
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
z1 = z1.reshape(nvir,nocc)
time1 = log.timer('Z-vector using CPHF solver', *time0)
z1ao = reduce(numpy.dot, (orbv, z1, orbo.T))
veff = vresp(z1ao+z1ao.T)
im0 = numpy.zeros((nmo,nmo))
im0[:nocc,:nocc] = reduce(numpy.dot, (orbo.T, veff0doo+veff, orbo))
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mop[nocc:,:nocc], xpy)
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,nocc:] = numpy.einsum('ci,ai->ac', veff0mop[nocc:,:nocc], xpy)
im0[nocc:,nocc:]+= numpy.einsum('ci,ai->ac', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,:nocc] = numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy)*2
im0[nocc:,:nocc]+= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy)*2
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[nocc:]
dm1 = numpy.zeros((nmo,nmo))
dm1[:nocc,:nocc] = doo
dm1[nocc:,nocc:] = dvv
dm1[nocc:,:nocc] = z1
dm1[:nocc,:nocc] += numpy.eye(nocc)*2 # for ground state
im0 = reduce(numpy.dot, (mo_coeff, im0+zeta*dm1, mo_coeff.T))
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1doo = z1ao + dmzoo
oo0 = reduce(numpy.dot, (orbo, orbo.T))
vj, vk = td_grad.get_jk(mol, (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T,
dmxmy-dmxmy.T))
vj = vj.reshape(-1,3,nao,nao)
vk = vk.reshape(-1,3,nao,nao)
if singlet:
vhf1 = vj * 2 - vk
else:
vhf1 = numpy.vstack((vj[:2]*2-vk[:2], -vk[2:]))
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
h1ao[:,p0:p1] += vhf1[0,:,p0:p1]
h1ao[:,:,p0:p1] += vhf1[0,:,p0:p1].transpose(0,2,1)
# oo0*2 for doubly occupied orbitals
de[k] = numpy.einsum('xpq,pq->x', h1ao, oo0) * 2
de[k] += numpy.einsum('xpq,pq->x', h1ao, dmz1doo)
de[k] -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
de[k] -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
de[k] += numpy.einsum('xij,ij->x', vhf1[1,:,p0:p1], oo0[p0:p1])
de[k] += numpy.einsum('xij,ij->x', vhf1[2,:,p0:p1], dmxpy[p0:p1,:]) * 2
de[k] += numpy.einsum('xij,ij->x', vhf1[3,:,p0:p1], dmxmy[p0:p1,:]) * 2
de[k] += numpy.einsum('xji,ij->x', vhf1[2,:,p0:p1], dmxpy[:,p0:p1]) * 2
de[k] -= numpy.einsum('xji,ij->x', vhf1[3,:,p0:p1], dmxmy[:,p0:p1]) * 2
de += _grad_solvent(with_solvent, oo0*2, dmz1doo, dmxpy*2, singlet)
log.timer('TDHF nuclear gradients', *time0)
return de
def tdrks_grad_elec(td_grad, x_y, singlet=True, atmlst=None,
max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tdrks.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
nocc = (mo_occ>0).sum()
nvir = nmo - nocc
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
x, y = x_y
xpy = (x+y).reshape(nocc,nvir).T
xmy = (x-y).reshape(nocc,nvir).T
orbv = mo_coeff[:,nocc:]
orbo = mo_coeff[:,:nocc]
dvv = numpy.einsum('ai,bi->ab', xpy, xpy) + numpy.einsum('ai,bi->ab', xmy, xmy)
doo =-numpy.einsum('ai,aj->ij', xpy, xpy) - numpy.einsum('ai,aj->ij', xmy, xmy)
dmxpy = reduce(numpy.dot, (orbv, xpy, orbo.T))
dmxmy = reduce(numpy.dot, (orbv, xmy, orbo.T))
dmzoo = reduce(numpy.dot, (orbo, doo, orbo.T))
dmzoo+= reduce(numpy.dot, (orbv, dvv, orbv.T))
mem_now = lib.current_memory()[0]
max_memory = max(2000, td_grad.max_memory*.9-mem_now)
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 3, raise_error=True)
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
# dm0 = mf.make_rdm1(mo_coeff, mo_occ), but it is not used when computing
# fxc since rho0 is passed to fxc function.
rho0, vxc, fxc = ni.cache_xc_kernel(mf.mol, mf.grids, mf.xc,
[mo_coeff]*2, [mo_occ*.5]*2, spin=1)
f1vo, f1oo, vxc1, k1ao = \
tdrks_grad._contract_xc_kernel(td_grad, mf.xc, dmxpy,
dmzoo, True, True, singlet, max_memory)
if abs(hyb) > 1e-10:
dm = (dmzoo, dmxpy+dmxpy.T, dmxmy-dmxmy.T)
vj, vk = mf.get_jk(mol, dm, hermi=0)
if with_solvent.equilibrium_solvation:
vj[:2] += mf.with_solvent._B_dot_x((dmzoo, dmxpy+dmxpy.T))
else:
vj[0] += mf.with_solvent._B_dot_x(dmzoo)
vk *= hyb
if abs(omega) > 1e-10:
vk += mf.get_k(mol, dm, hermi=0, omega=omega) * (alpha-hyb)
veff0doo = vj[0] * 2 - vk[0] + f1oo[0] + k1ao[0] * 2
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 - vk[1] + f1vo[0] * 2
else:
veff = -vk[1] + f1vo[0] * 2
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff = -vk[2]
veff0mom = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mom[nocc:,nocc:], xmy) * 2
else:
vj = mf.get_j(mol, (dmzoo, dmxpy+dmxpy.T), hermi=1)
if with_solvent.equilibrium_solvation:
vj[:2] += mf.with_solvent._B_dot_x((dmzoo, dmxpy+dmxpy.T))
else:
vj[0] += mf.with_solvent._B_dot_x(dmzoo)
veff0doo = vj[0] * 2 + f1oo[0] + k1ao[0] * 2
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 + f1vo[0] * 2
else:
veff = f1vo[0] * 2
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff0mom = numpy.zeros((nmo,nmo))
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
# set singlet=None, generate function for CPHF type response kernel
vresp = mf.gen_response(singlet=None, hermi=1)
def fvind(x):
dm = reduce(numpy.dot, (orbv, x.reshape(nvir,nocc)*2, orbo.T))
v1ao = vresp(dm+dm.T)
return reduce(numpy.dot, (orbv.T, v1ao, orbo)).ravel()
z1 = cphf.solve(fvind, mo_energy, mo_occ, wvo,
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
z1 = z1.reshape(nvir,nocc)
time1 = log.timer('Z-vector using CPHF solver', *time0)
z1ao = reduce(numpy.dot, (orbv, z1, orbo.T))
veff = vresp(z1ao+z1ao.T)
im0 = numpy.zeros((nmo,nmo))
im0[:nocc,:nocc] = reduce(numpy.dot, (orbo.T, veff0doo+veff, orbo))
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mop[nocc:,:nocc], xpy)
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,nocc:] = numpy.einsum('ci,ai->ac', veff0mop[nocc:,:nocc], xpy)
im0[nocc:,nocc:]+= numpy.einsum('ci,ai->ac', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,:nocc] = numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy)*2
im0[nocc:,:nocc]+= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy)*2
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[nocc:]
dm1 = numpy.zeros((nmo,nmo))
dm1[:nocc,:nocc] = doo
dm1[nocc:,nocc:] = dvv
dm1[nocc:,:nocc] = z1
dm1[:nocc,:nocc] += numpy.eye(nocc)*2 # for ground state
im0 = reduce(numpy.dot, (mo_coeff, im0+zeta*dm1, mo_coeff.T))
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1doo = z1ao + dmzoo
oo0 = reduce(numpy.dot, (orbo, orbo.T))
if abs(hyb) > 1e-10:
dm = (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T, dmxmy-dmxmy.T)
vj, vk = td_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10:
with mol.with_range_coulomb(omega):
vk += td_grad.get_k(mol, dm) * (alpha-hyb)
vj = vj.reshape(-1,3,nao,nao)
vk = vk.reshape(-1,3,nao,nao)
if singlet:
veff1 = vj * 2 - vk
else:
veff1 = numpy.vstack((vj[:2]*2-vk[:2], -vk[2:]))
else:
vj = td_grad.get_j(mol, (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T))
vj = vj.reshape(-1,3,nao,nao)
veff1 = numpy.zeros((4,3,nao,nao))
if singlet:
veff1[:3] = vj * 2
else:
veff1[:2] = vj[:2] * 2
fxcz1 = tdrks_grad._contract_xc_kernel(td_grad, mf.xc, z1ao, None,
False, False, True, max_memory)[0]
veff1[0] += vxc1[1:]
veff1[1] +=(f1oo[1:] + fxcz1[1:] + k1ao[1:]*2)*2 # *2 for dmz1doo+dmz1oo.T
veff1[2] += f1vo[1:] * 2
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
h1ao[:,p0:p1] += veff1[0,:,p0:p1]
h1ao[:,:,p0:p1] += veff1[0,:,p0:p1].transpose(0,2,1)
# oo0*2 for doubly occupied orbitals
e1 = numpy.einsum('xpq,pq->x', h1ao, oo0) * 2
e1 += numpy.einsum('xpq,pq->x', h1ao, dmz1doo)
e1 -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
e1 -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
e1 += numpy.einsum('xij,ij->x', veff1[1,:,p0:p1], oo0[p0:p1])
e1 += numpy.einsum('xij,ij->x', veff1[2,:,p0:p1], dmxpy[p0:p1,:]) * 2
e1 += numpy.einsum('xij,ij->x', veff1[3,:,p0:p1], dmxmy[p0:p1,:]) * 2
e1 += numpy.einsum('xji,ij->x', veff1[2,:,p0:p1], dmxpy[:,p0:p1]) * 2
e1 -= numpy.einsum('xji,ij->x', veff1[3,:,p0:p1], dmxmy[:,p0:p1]) * 2
de[k] = e1
de += _grad_solvent(with_solvent, oo0*2, dmz1doo, dmxpy*2, singlet)
log.timer('TDDFT nuclear gradients', *time0)
return de
def tduhf_grad_elec(td_grad, x_y, atmlst=None, max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tduhf.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
nao = mo_coeff[0].shape[0]
nmoa = nocca + nvira
nmob = noccb + nvirb
(xa, xb), (ya, yb) = x_y
xpya = (xa+ya).reshape(nocca,nvira).T
xpyb = (xb+yb).reshape(noccb,nvirb).T
xmya = (xa-ya).reshape(nocca,nvira).T
xmyb = (xb-yb).reshape(noccb,nvirb).T
dvva = numpy.einsum('ai,bi->ab', xpya, xpya) + numpy.einsum('ai,bi->ab', xmya, xmya)
dvvb = numpy.einsum('ai,bi->ab', xpyb, xpyb) + numpy.einsum('ai,bi->ab', xmyb, xmyb)
dooa =-numpy.einsum('ai,aj->ij', xpya, xpya) - numpy.einsum('ai,aj->ij', xmya, xmya)
doob =-numpy.einsum('ai,aj->ij', xpyb, xpyb) - numpy.einsum('ai,aj->ij', xmyb, xmyb)
dmxpya = reduce(numpy.dot, (orbva, xpya, orboa.T))
dmxpyb = reduce(numpy.dot, (orbvb, xpyb, orbob.T))
dmxmya = reduce(numpy.dot, (orbva, xmya, orboa.T))
dmxmyb = reduce(numpy.dot, (orbvb, xmyb, orbob.T))
dmzooa = reduce(numpy.dot, (orboa, dooa, orboa.T))
dmzoob = reduce(numpy.dot, (orbob, doob, orbob.T))
dmzooa+= reduce(numpy.dot, (orbva, dvva, orbva.T))
dmzoob+= reduce(numpy.dot, (orbvb, dvvb, orbvb.T))
vj, vk = mf.get_jk(mol, (dmzooa, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
dmzoob, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T), hermi=0)
vj = vj.reshape(2,3,nao,nao)
vk = vk.reshape(2,3,nao,nao)
if with_solvent.equilibrium_solvation:
dmxpy = dmxpya + dmxpyb
vj[0,:2] += mf.with_solvent._B_dot_x((dmzooa+dmzoob, dmxpy+dmxpy.T))
else:
vj[0,0] += mf.with_solvent._B_dot_x(dmzooa+dmzoob)
veff0doo = vj[0,0]+vj[1,0] - vk[:,0]
wvoa = reduce(numpy.dot, (orbva.T, veff0doo[0], orboa)) * 2
wvob = reduce(numpy.dot, (orbvb.T, veff0doo[1], orbob)) * 2
veff = vj[0,1]+vj[1,1] - vk[:,1]
veff0mopa = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0mopb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0mopa[nocca:,nocca:], xpya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0mopb[noccb:,noccb:], xpyb) * 2
veff = -vk[:,2]
veff0moma = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0momb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0moma[nocca:,nocca:], xmya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0momb[noccb:,noccb:], xmyb) * 2
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
vresp = mf.gen_response(hermi=1)
def fvind(x):
dm1 = numpy.empty((2,nao,nao))
xa = x[0,:nvira*nocca].reshape(nvira,nocca)
xb = x[0,nvira*nocca:].reshape(nvirb,noccb)
dma = reduce(numpy.dot, (orbva, xa, orboa.T))
dmb = reduce(numpy.dot, (orbvb, xb, orbob.T))
dm1[0] = dma + dma.T
dm1[1] = dmb + dmb.T
v1 = vresp(dm1)
v1a = reduce(numpy.dot, (orbva.T, v1[0], orboa))
v1b = reduce(numpy.dot, (orbvb.T, v1[1], orbob))
return numpy.hstack((v1a.ravel(), v1b.ravel()))
z1a, z1b = ucphf.solve(fvind, mo_energy, mo_occ, (wvoa,wvob),
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
time1 = log.timer('Z-vector using UCPHF solver', *time0)
z1ao = numpy.empty((2,nao,nao))
z1ao[0] = reduce(numpy.dot, (orbva, z1a, orboa.T))
z1ao[1] = reduce(numpy.dot, (orbvb, z1b, orbob.T))
veff = vresp((z1ao+z1ao.transpose(0,2,1)) * .5)
im0a = numpy.zeros((nmoa,nmoa))
im0b = numpy.zeros((nmob,nmob))
im0a[:nocca,:nocca] = reduce(numpy.dot, (orboa.T, veff0doo[0]+veff[0], orboa)) * .5
im0b[:noccb,:noccb] = reduce(numpy.dot, (orbob.T, veff0doo[1]+veff[1], orbob)) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0moma[nocca:,:nocca], xmya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,nocca:] = numpy.einsum('ci,ai->ac', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[noccb:,noccb:] = numpy.einsum('ci,ai->ac', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[nocca:,nocca:]+= numpy.einsum('ci,ai->ac', veff0moma[nocca:,:nocca], xmya) * .5
im0b[noccb:,noccb:]+= numpy.einsum('ci,ai->ac', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,:nocca] = numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya)
im0b[noccb:,:noccb] = numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb)
im0a[nocca:,:nocca]+= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya)
im0b[noccb:,:noccb]+= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb)
zeta_a = (mo_energy[0][:,None] + mo_energy[0]) * .5
zeta_b = (mo_energy[1][:,None] + mo_energy[1]) * .5
zeta_a[nocca:,:nocca] = mo_energy[0][:nocca]
zeta_b[noccb:,:noccb] = mo_energy[1][:noccb]
zeta_a[:nocca,nocca:] = mo_energy[0][nocca:]
zeta_b[:noccb,noccb:] = mo_energy[1][noccb:]
dm1a = numpy.zeros((nmoa,nmoa))
dm1b = numpy.zeros((nmob,nmob))
dm1a[:nocca,:nocca] = dooa * .5
dm1b[:noccb,:noccb] = doob * .5
dm1a[nocca:,nocca:] = dvva * .5
dm1b[noccb:,noccb:] = dvvb * .5
dm1a[nocca:,:nocca] = z1a * .5
dm1b[noccb:,:noccb] = z1b * .5
dm1a[:nocca,:nocca] += numpy.eye(nocca) # for ground state
dm1b[:noccb,:noccb] += numpy.eye(noccb)
im0a = reduce(numpy.dot, (mo_coeff[0], im0a+zeta_a*dm1a, mo_coeff[0].T))
im0b = reduce(numpy.dot, (mo_coeff[1], im0b+zeta_b*dm1b, mo_coeff[1].T))
im0 = im0a + im0b
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1dooa = z1ao[0] + dmzooa
dmz1doob = z1ao[1] + dmzoob
oo0a = reduce(numpy.dot, (orboa, orboa.T))
oo0b = reduce(numpy.dot, (orbob, orbob.T))
as_dm1 = oo0a + oo0b + (dmz1dooa + dmz1doob) * .5
vj, vk = td_grad.get_jk(mol, (oo0a, dmz1dooa+dmz1dooa.T, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
oo0b, dmz1doob+dmz1doob.T, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T))
vj = vj.reshape(2,4,3,nao,nao)
vk = vk.reshape(2,4,3,nao,nao)
vhf1a, vhf1b = vj[0] + vj[1] - vk
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
de[k] = numpy.einsum('xpq,pq->x', h1ao, as_dm1)
de[k] += numpy.einsum('xpq,pq->x', vhf1a[0,:,p0:p1], oo0a[p0:p1])
de[k] += numpy.einsum('xpq,pq->x', vhf1b[0,:,p0:p1], oo0b[p0:p1])
de[k] += numpy.einsum('xpq,qp->x', vhf1a[0,:,p0:p1], oo0a[:,p0:p1])
de[k] += numpy.einsum('xpq,qp->x', vhf1b[0,:,p0:p1], oo0b[:,p0:p1])
de[k] += numpy.einsum('xpq,pq->x', vhf1a[0,:,p0:p1], dmz1dooa[p0:p1]) * .5
de[k] += numpy.einsum('xpq,pq->x', vhf1b[0,:,p0:p1], dmz1doob[p0:p1]) * .5
de[k] += numpy.einsum('xpq,qp->x', vhf1a[0,:,p0:p1], dmz1dooa[:,p0:p1]) * .5
de[k] += numpy.einsum('xpq,qp->x', vhf1b[0,:,p0:p1], dmz1doob[:,p0:p1]) * .5
de[k] -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
de[k] -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
de[k] += numpy.einsum('xij,ij->x', vhf1a[1,:,p0:p1], oo0a[p0:p1]) * .5
de[k] += numpy.einsum('xij,ij->x', vhf1b[1,:,p0:p1], oo0b[p0:p1]) * .5
de[k] += numpy.einsum('xij,ij->x', vhf1a[2,:,p0:p1], dmxpya[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', vhf1b[2,:,p0:p1], dmxpyb[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', vhf1a[3,:,p0:p1], dmxmya[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', vhf1b[3,:,p0:p1], dmxmyb[p0:p1,:])
de[k] += numpy.einsum('xji,ij->x', vhf1a[2,:,p0:p1], dmxpya[:,p0:p1])
de[k] += numpy.einsum('xji,ij->x', vhf1b[2,:,p0:p1], dmxpyb[:,p0:p1])
de[k] -= numpy.einsum('xji,ij->x', vhf1a[3,:,p0:p1], dmxmya[:,p0:p1])
de[k] -= numpy.einsum('xji,ij->x', vhf1b[3,:,p0:p1], dmxmyb[:,p0:p1])
dm0 = oo0a + oo0b
dmz1doo = (dmz1dooa + dmz1doob) * .5
dmxpy = dmxpya + dmxpyb
de += _grad_solvent(with_solvent, dm0, dmz1doo, dmxpy)
log.timer('TDUHF nuclear gradients', *time0)
return de
def tduks_grad_elec(td_grad, x_y, atmlst=None, max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tduks.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
nao = mo_coeff[0].shape[0]
nmoa = nocca + nvira
nmob = noccb + nvirb
(xa, xb), (ya, yb) = x_y
xpya = (xa+ya).reshape(nocca,nvira).T
xpyb = (xb+yb).reshape(noccb,nvirb).T
xmya = (xa-ya).reshape(nocca,nvira).T
xmyb = (xb-yb).reshape(noccb,nvirb).T
dvva = numpy.einsum('ai,bi->ab', xpya, xpya) + numpy.einsum('ai,bi->ab', xmya, xmya)
dvvb = numpy.einsum('ai,bi->ab', xpyb, xpyb) + numpy.einsum('ai,bi->ab', xmyb, xmyb)
dooa =-numpy.einsum('ai,aj->ij', xpya, xpya) - numpy.einsum('ai,aj->ij', xmya, xmya)
doob =-numpy.einsum('ai,aj->ij', xpyb, xpyb) - numpy.einsum('ai,aj->ij', xmyb, xmyb)
dmxpya = reduce(numpy.dot, (orbva, xpya, orboa.T))
dmxpyb = reduce(numpy.dot, (orbvb, xpyb, orbob.T))
dmxmya = reduce(numpy.dot, (orbva, xmya, orboa.T))
dmxmyb = reduce(numpy.dot, (orbvb, xmyb, orbob.T))
dmzooa = reduce(numpy.dot, (orboa, dooa, orboa.T))
dmzoob = reduce(numpy.dot, (orbob, doob, orbob.T))
dmzooa+= reduce(numpy.dot, (orbva, dvva, orbva.T))
dmzoob+= reduce(numpy.dot, (orbvb, dvvb, orbvb.T))
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 3, raise_error=True)
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
# dm0 = mf.make_rdm1(mo_coeff, mo_occ), but it is not used when computing
# fxc since rho0 is passed to fxc function.
dm0 = None
rho0, vxc, fxc = ni.cache_xc_kernel(mf.mol, mf.grids, mf.xc,
mo_coeff, mo_occ, spin=1)
f1vo, f1oo, vxc1, k1ao = \
tduks_grad._contract_xc_kernel(td_grad, mf.xc, (dmxpya,dmxpyb),
(dmzooa,dmzoob), True, True, max_memory)
if abs(hyb) > 1e-10:
dm = (dmzooa, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
dmzoob, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T)
vj, vk = mf.get_jk(mol, dm, hermi=0)
vk *= hyb
if abs(omega) > 1e-10:
vk += mf.get_k(mol, dm, hermi=0, omega=omega) * (alpha-hyb)
vj = vj.reshape(2,3,nao,nao)
vk = vk.reshape(2,3,nao,nao)
if with_solvent.equilibrium_solvation:
dmxpy = dmxpya + dmxpyb
vj[0,:2] += mf.with_solvent._B_dot_x((dmzooa+dmzoob, dmxpy+dmxpy.T))
else:
vj[0,0] += mf.with_solvent._B_dot_x(dmzooa+dmzoob)
veff0doo = vj[0,0]+vj[1,0] - vk[:,0] + f1oo[:,0] + k1ao[:,0] * 2
wvoa = reduce(numpy.dot, (orbva.T, veff0doo[0], orboa)) * 2
wvob = reduce(numpy.dot, (orbvb.T, veff0doo[1], orbob)) * 2
veff = vj[0,1]+vj[1,1] - vk[:,1] + f1vo[:,0] * 2
veff0mopa = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0mopb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0mopa[nocca:,nocca:], xpya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0mopb[noccb:,noccb:], xpyb) * 2
veff = -vk[:,2]
veff0moma = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0momb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0moma[nocca:,nocca:], xmya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0momb[noccb:,noccb:], xmyb) * 2
else:
dm = (dmzooa, dmxpya+dmxpya.T,
dmzoob, dmxpyb+dmxpyb.T)
vj = mf.get_j(mol, dm, hermi=1).reshape(2,2,nao,nao)
if with_solvent.equilibrium_solvation:
dmxpy = dmxpya + dmxpyb
vj[0,:2] += mf.with_solvent._B_dot_x((dmzooa+dmzoob, dmxpy+dmxpy.T))
else:
vj[0,0] += mf.with_solvent._B_dot_x(dmzooa+dmzoob)
veff0doo = vj[0,0]+vj[1,0] + f1oo[:,0] + k1ao[:,0] * 2
wvoa = reduce(numpy.dot, (orbva.T, veff0doo[0], orboa)) * 2
wvob = reduce(numpy.dot, (orbvb.T, veff0doo[1], orbob)) * 2
veff = vj[0,1]+vj[1,1] + f1vo[:,0] * 2
veff0mopa = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0mopb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0mopa[nocca:,nocca:], xpya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0mopb[noccb:,noccb:], xpyb) * 2
veff0moma = numpy.zeros((nmoa,nmoa))
veff0momb = numpy.zeros((nmob,nmob))
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
vresp = mf.gen_response(hermi=1)
def fvind(x):
dm1 = numpy.empty((2,nao,nao))
xa = x[0,:nvira*nocca].reshape(nvira,nocca)
xb = x[0,nvira*nocca:].reshape(nvirb,noccb)
dma = reduce(numpy.dot, (orbva, xa, orboa.T))
dmb = reduce(numpy.dot, (orbvb, xb, orbob.T))
dm1[0] = dma + dma.T
dm1[1] = dmb + dmb.T
v1 = vresp(dm1)
v1a = reduce(numpy.dot, (orbva.T, v1[0], orboa))
v1b = reduce(numpy.dot, (orbvb.T, v1[1], orbob))
return numpy.hstack((v1a.ravel(), v1b.ravel()))
z1a, z1b = ucphf.solve(fvind, mo_energy, mo_occ, (wvoa,wvob),
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
time1 = log.timer('Z-vector using UCPHF solver', *time0)
z1ao = numpy.empty((2,nao,nao))
z1ao[0] = reduce(numpy.dot, (orbva, z1a, orboa.T))
z1ao[1] = reduce(numpy.dot, (orbvb, z1b, orbob.T))
veff = vresp((z1ao+z1ao.transpose(0,2,1)) * .5)
im0a = numpy.zeros((nmoa,nmoa))
im0b = numpy.zeros((nmob,nmob))
im0a[:nocca,:nocca] = reduce(numpy.dot, (orboa.T, veff0doo[0]+veff[0], orboa)) * .5
im0b[:noccb,:noccb] = reduce(numpy.dot, (orbob.T, veff0doo[1]+veff[1], orbob)) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0moma[nocca:,:nocca], xmya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,nocca:] = numpy.einsum('ci,ai->ac', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[noccb:,noccb:] = numpy.einsum('ci,ai->ac', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[nocca:,nocca:]+= numpy.einsum('ci,ai->ac', veff0moma[nocca:,:nocca], xmya) * .5
im0b[noccb:,noccb:]+= numpy.einsum('ci,ai->ac', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,:nocca] = numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya)
im0b[noccb:,:noccb] = numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb)
im0a[nocca:,:nocca]+= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya)
im0b[noccb:,:noccb]+= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb)
zeta_a = (mo_energy[0][:,None] + mo_energy[0]) * .5
zeta_b = (mo_energy[1][:,None] + mo_energy[1]) * .5
zeta_a[nocca:,:nocca] = mo_energy[0][:nocca]
zeta_b[noccb:,:noccb] = mo_energy[1][:noccb]
zeta_a[:nocca,nocca:] = mo_energy[0][nocca:]
zeta_b[:noccb,noccb:] = mo_energy[1][noccb:]
dm1a = numpy.zeros((nmoa,nmoa))
dm1b = numpy.zeros((nmob,nmob))
dm1a[:nocca,:nocca] = dooa * .5
dm1b[:noccb,:noccb] = doob * .5
dm1a[nocca:,nocca:] = dvva * .5
dm1b[noccb:,noccb:] = dvvb * .5
dm1a[nocca:,:nocca] = z1a * .5
dm1b[noccb:,:noccb] = z1b * .5
dm1a[:nocca,:nocca] += numpy.eye(nocca) # for ground state
dm1b[:noccb,:noccb] +=
|
numpy.eye(noccb)
|
numpy.eye
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from collections import OrderedDict
import os
import warnings
from astropy.modeling import models
import ccdproc
from astropy.extern import six
from astropy.stats import median_absolute_deviation
import numpy as np
from . import gui
import ipywidgets as widgets
from traitlets import Any, link
__all__ = [
'Reduction',
'Combiner',
'CosmicRaySettings',
'Slice',
'CalibrationStep',
'BiasSubtract',
'DarkSubtract',
'FlatCorrect',
'Overscan',
'Trim'
]
DEFAULT_IMAGE_UNIT = "adu"
# The dictionary below is used to map the dtype of the image being
# reduced to the dtype of the output. The assumption is that the output
# is typically some kind of floating point, but that there is no need
# for very high precision output given relatively low resolution
# input.
REDUCE_IMAGE_DTYPE_MAPPING = {
'uint8': 'float32',
'int8': 'float32',
'uint16': 'float32',
'int16': 'float32',
'float32': 'float32',
'uint32': 'float64',
'int32': 'float64',
'float64': 'float64'
}
# The limit below is used by the combining function to decide whether or
# not the image should be broken up into chunks.
DEFAULT_MEMORY_LIMIT = 4e9 # roughly 4GB
class ReducerBase(gui.ToggleGo):
"""
Base class for reduction and combination widgets that provides a couple
of properties common to both.
Parameters
----------
apply_to : dict
Key-value pair(s) that select images that will be acted on by the
widget.
destination : str
Directory in which reduced images will be stored.
"""
def __init__(self, *arg, **kwd):
self._apply_to = kwd.pop('apply_to', None)
self._destination = kwd.pop('destination', None)
super(ReducerBase, self).__init__(*arg, **kwd)
@property
def destination(self):
return self._destination
@property
def apply_to(self):
return self._apply_to
class Reduction(ReducerBase):
"""
Primary widget for performing a logical reduction step (e.g. dark
subtraction or flat correction).
"""
def __init__(self, *arg, **kwd):
allow_flat = kwd.pop('allow_flat', True)
allow_dark = kwd.pop('allow_dark', True)
allow_bias = kwd.pop('allow_bias', True)
allow_cosmic_ray = kwd.pop('allow_cosmic_ray', False)
allow_copy = kwd.pop('allow_copy_only', True)
self.image_collection = kwd.pop('input_image_collection', None)
self._master_source = kwd.pop('master_source', None)
super(Reduction, self).__init__(*arg, **kwd)
self._overscan = Overscan(description='Subtract overscan?')
self._trim = Trim(description='Trim (specify region to keep)?')
self._cosmic_ray = CosmicRaySettings()
self._bias_calib = BiasSubtract(master_source=self._master_source)
self._dark_calib = DarkSubtract(master_source=self._master_source)
self._flat_calib = FlatCorrect(master_source=self._master_source)
if allow_copy:
self._copy_only = CopyFiles()
self.add_child(self._copy_only)
else:
self._copy_only = None
self.add_child(self._overscan)
self.add_child(self._trim)
if allow_bias:
self.add_child(self._bias_calib)
if allow_dark:
self.add_child(self._dark_calib)
if allow_flat:
self.add_child(self._flat_calib)
if allow_cosmic_ray:
self.add_child(self._cosmic_ray)
if self._copy_only:
self._copy_only._state_monitor.on_trait_change(
self._disable_all_others(),
str('value')
)
def action(self):
if not self.image_collection:
raise ValueError("No images to reduce")
self.progress_bar.visible = True
self.progress_bar.layout.visbility = 'visible'
self.progress_bar.layout.display = 'flex'
# Refresh in case files have been added since the widget was created.
self.image_collection.refresh()
# Only refresh the master_source if it exists. No need to error check
# the main image_collection because a sensible error is raised if it
# does not exist.
if self._master_source:
self._master_source.refresh()
# Suppress warnings that come up here...mostly about HIERARCH keywords
warnings.filterwarnings('ignore')
try:
n_files = \
len(self.image_collection.files_filtered(**self.apply_to))
current_file = 0
for hdu, fname in self.image_collection.hdus(return_fname=True,
save_location=self.destination,
**self.apply_to):
current_file += 1
try:
unit = hdu.header['BUNIT']
except KeyError:
unit = DEFAULT_IMAGE_UNIT
ccd = ccdproc.CCDData(hdu.data, meta=hdu.header, unit=unit)
for child in self.container.children:
if not child.toggle.value:
# Nothing to do for this child, so keep going.
continue
ccd = child.action(ccd)
input_dtype = hdu.data.dtype.name
hdu_tmp = ccd.to_hdu()[0]
hdu.header = hdu_tmp.header
hdu.data = hdu_tmp.data
desired_dtype = REDUCE_IMAGE_DTYPE_MAPPING[str(input_dtype)]
if desired_dtype != hdu.data.dtype:
hdu.data = hdu.data.astype(desired_dtype)
# Workaround to ensure uint16 images are handled properly.
if 'bzero' in hdu.header:
# Check for the unsigned int16 case, and if our data type
# is no longer uint16, delete BZERO and BSCALE
header_unsigned_int = ((hdu.header['bscale'] == 1) and
(hdu.header['bzero'] == 32768))
if (header_unsigned_int and
(hdu.data.dtype != np.dtype('uint16'))):
del hdu.header['bzero'], hdu.header['bscale']
self.progress_bar.description = \
("Processed file {} of {}".format(current_file, n_files))
self.progress_bar.value = current_file/n_files
except IOError:
print("One or more of the reduced images already exists. Delete "
"those files and try again. This notebook will NOT "
"overwrite existing files.")
finally:
self.progress_bar.visible = False
self.progress_bar.layout.display = 'none'
def _disable_all_others(self):
if not self._copy_only:
return None
def handler():
all_but_copy = [c for c in self.container.children
if c is not self._copy_only]
if self._copy_only._state_monitor.value:
for child in all_but_copy:
print(child.description)
child.disabled = True
else:
for child in all_but_copy:
child.disabled = False
return handler
class Clipping(gui.ToggleContainer):
"""docstring for Clipping"""
def __init__(self, *args, **kwd):
super(Clipping, self).__init__(*args, **kwd)
self._min_max = gui.ToggleMinMax(description="Clip by min/max?")
self._sigma_clip = gui.ToggleMinMax(description="Sigma clip?")
self.add_child(self._min_max)
self.add_child(self._sigma_clip)
self.format()
@property
def min_max(self):
if self._min_max.toggle.value:
return self._min_max
else:
return False
@property
def sigma_clip(self):
if self._sigma_clip.toggle.value:
return self._sigma_clip
else:
return False
@property
def is_sane(self):
# If not selected, sanity state does not matter...
if not self.toggle.value:
return None
# It makes no sense to have selected clipping but not a clipping
# method....
sanity = (self._min_max.toggle.value or
self._sigma_clip.toggle.value)
# For min_max clipping, maximum must be greater than minimum.
if self._min_max.toggle.value:
sanity = sanity and (self._min_max.max > self._min_max.min)
# For sigma clipping there is no relationship between maximum
# and minimum because both are number of deviations above/below
# central value, but values of 0 make no sense
if self._sigma_clip.toggle.value:
sanity = (sanity and
self._sigma_clip.min != 0 and
self._sigma_clip.max != 0)
return sanity
def format(self):
super(Clipping, self).format()
self._sigma_clip.format()
self._min_max.format()
def override_str_factory(obj):
"""
Factory to create a new class for an IPYthon widget in which the
``__str__`` method is overridden with the widgets description and
value.
Parameters
----------
obj : object
An IPython widget instance
Returns
-------
new_object : IPython widget with string method overridden
"""
from copy import copy
def new_str_method(self):
return ": ".join([str(self.description), str(self.value)])
new_instance = copy(obj)
original_class = type(obj)
new_class = type(original_class.__name__,
(original_class,),
{'__str__': new_str_method})
new_instance.__class__ = new_class
return new_instance
class Combine(gui.ToggleContainer):
"""
Represent combine choices and actions.
"""
def __init__(self, *args, **kwd):
super(Combine, self).__init__(*args, **kwd)
self._combine_option = override_str_factory(
widgets.ToggleButtons(description="Combination method:",
options=['Average', 'Median'],
style={'description_width': 'initial'})
)
self.add_child(self._combine_option)
self._scaling = gui.ToggleContainer(description="Scale before combining?")
scal_desc = "Which should scale to same value?"
self._scale_by = override_str_factory(
widgets.RadioButtons(description=scal_desc,
options=['mean', 'median'],
style={'description_width': 'initial'})
)
self._scaling.add_child(self._scale_by)
self.add_child(self._scaling)
@property
def method(self):
return self._combine_option.value
@property
def scaling_func(self):
if not self._scaling.toggle.value:
return None
if self._scale_by.value == 'mean':
return lambda arr: 1/np.ma.average(arr)
elif self._scale_by.value == 'median':
return lambda arr: 1/
|
np.ma.median(arr)
|
numpy.ma.median
|
#pylint: disable=too-many-lines
"""
This module define the data structures used as input and output to the analysis module.
The require input data can be provided by constructing a Receiver object in python or by
loading an HDF5 datafile, which will populate the python class hierarchy.
"""
import itertools
import numpy as np
import scipy.interpolate as inter
import h5py
from srlife import writers
class Receiver:
""" Basic definition of the tubular receiver geometry.
A receiver is a collection of panels linked together by
an elastic spring stiffness. This stiffness can be a real number,
"rigid" or "disconnect"
Panels can be labeled by strings. By default the names
are sequential numbers.
In addition this object stores some required metadata:
1) The daily cycle period (which can be less than 24 hours
if the analysis neglects some of the night period)
2) The number of days (see #1) explicitly represented in the
analysis results.
Args:
period (float): single daily cycle period
days (int): number of daily cycles explicitly represented
panel_stiffness (float or string): panel stiffness (float) or "rigid" or "disconnect"
"""
def __init__(self, period, days, panel_stiffness):
""" Initialize a Receiver object
"""
self.period = period
self.days = days
self.panels = {}
self.stiffness = panel_stiffness
def write_vtk(self, basename):
""" Write out the receiver as individual panels with names basename_panelname
The VTK format is mostly used for additional postprocessing. The VTK
format cannot be used for input.
Args:
basename (str): base file name
"""
for n, panel in self.panels.items():
panel.write_vtk(basename + "_" + n)
def close(self, other):
""" Check to see if two objects are nearly equal.
Primarily used for testing
Args:
other (Receiver): the object to compare against
Returns:
bool: True if the receivers are similar.
"""
base = (
np.isclose(self.period, other.period)
and np.isclose(self.days, other.days)
and np.isclose(self.stiffness, other.stiffness)
)
for name, panel in self.panels.items():
if name not in other.panels:
return False
base = (base and panel.close(other.panels[name]))
return base
@property
def tubes(self):
""" Shortcut iterator over all tubes
Returns:
iterator over panels
"""
return itertools.chain(*(panel.tubes.values()
for panel in self.panels.values()))
def set_paging(self, page):
""" Tell tubes to store results on or off disk
Args:
page (bool): if true, page results to disk
"""
for i,tube in enumerate(self.tubes):
tube.set_paging(page, i)
@property
def ntubes(self):
""" Shortcut for total number of tubes
Returns:
int: Number of tubes in all panels
"""
return len(list(self.tubes))
@property
def npanels(self):
""" Number of panels in the receiver
Returns:
int: Number of panels
"""
return len(self.panels)
def add_panel(self, panel, name = None):
""" Add a panel object to the receiver
Args:
panel (Panel): panel object
name (Optional[str]): panel name, by default follows fixed scheme
"""
if not name:
name = next_name(self.panels.keys())
self.panels[name] = panel
def save(self, fobj):
""" Save to an HDF5 file
This saves a Receiver object to the HDF5 format.
Args:
fobj (str): either a h5py file object or a filename
"""
if isinstance(fobj, str):
fobj = h5py.File(fobj, 'w')
fobj.attrs['period'] = self.period
fobj.attrs['days'] = self.days
fobj.attrs['stiffness'] = self.stiffness
grp = fobj.create_group("panels")
for name, panel in self.panels.items():
sgrp = grp.create_group(name)
panel.save(sgrp)
@classmethod
def load(cls, fobj):
""" Load a Receiver from an HDF5 file
A full description of the HDF format is included in the module documentation
Args:
fobj (string): either a h5py file object or a filename
Returns:
Receiver: The constructed receiver object.
"""
if isinstance(fobj, str):
fobj = h5py.File(fobj, 'r')
res = cls(fobj.attrs['period'], fobj.attrs['days'], fobj.attrs['stiffness'])
grp = fobj["panels"]
for name in grp:
res.add_panel(Panel.load(grp[name]), name)
return res
class Panel:
""" Basic definition of a panel in a tubular receiver.
A panel is a collection of Tube object linked together by
an elastic spring stiffness. This stiffness can be a real number,
a string "disconnect" or a string "rigid"
Tubes in the panel can be labeled by strings. By default the
names are sequential numbers.
Args:
stiffness: manifold spring stiffness
"""
def __init__(self, stiffness):
""" Initialize the panel
"""
self.tubes = {}
self.stiffness = stiffness
def write_vtk(self, basename):
""" Write out the panels as individual tubes with names basename_tubename
Args:
basename (string): base file name
"""
for n, tube in self.tubes.items():
tube.write_vtk(basename + "_" + n)
def close(self, other):
""" Check to see if two objects are nearly equal.
Primarily used for testing
Args:
other (Panel): the object to compare against
Returns:
bool: true if the panels are sufficiently similar
"""
base = np.isclose(self.stiffness, other.stiffness)
for name, tube in self.tubes.items():
if name not in other.tubes:
return False
base = (base and tube.close(other.tubes[name]))
return base
@property
def ntubes(self):
""" Number of tubes in the panel
Returns:
int: number of tubes in the panel
"""
return len(self.tubes)
def add_tube(self, tube, name = None):
""" Add a tube object to the panel
Args:
tube (Tube): tube object
name (Optional[str]): Tube name, defaults to fixed scheme.
"""
if not name:
name = next_name(self.tubes.keys())
self.tubes[name] = tube
def save(self, fobj):
""" Save to an HDF5 file
Args:
fobj (h5py.Group): h5py group
"""
fobj.attrs['stiffness'] = self.stiffness
grp = fobj.create_group("tubes")
for name, tube in self.tubes.items():
sgrp = grp.create_group(name)
tube.save(sgrp)
@classmethod
def load(cls, fobj):
""" Load from an HDF5 file
Args:
fobj (h5py.Group): h5py group containing the panel
"""
res = cls(fobj.attrs['stiffness'])
grp = fobj["tubes"]
for name in grp:
res.add_tube(Tube.load(grp[name]), name)
return res
def next_name(names):
""" Determine the next numeric string name based on a list
Args:
names (list): list of current names (string)
"""
curr_ints = []
for name in names:
try:
curr_ints.append(int(name))
except ValueError:
continue
if len(curr_ints) == 0:
return str(0)
return str(max(curr_ints) + 1)
class Tube:
""" Geometry, boundary conditions, and results for a single tube.
The basic tube geometry is defined by an outer radius, thickness, and
height.
Results are given at fixed times and
on a regular polar grid defined by a number of
r, theta, and z increments. The grid points are then deduced by
linear subdivision in r between the outer radius and the
outer radius - t, 0 to 2 pi, and 0 to the tube height.
Result fields are general and provided by a list of names.
The receiver package uses the metal temperatures, stresses,
mechanical strains, and inelastic strains.
Analysis results can be provided over the full 3D grid (default),
a single 2D plane (identified by a height), or a single 1D line
(identified by a height and a theta position)
Boundary conditions may be provided in two ways, either
as fluid conditions or net heat fluxes. These are defined
in the HeatFluxBC or ConvectionBC objects below.
Args:
outer_radius (float): tube outer radius
thickness (float): tube thickness
height (float): tube height
nr (int): number of radial increments
nt (int): number of circumferential increments
nz (int): number of axial increments
T0 (Optional[float]): initial temperature
page (Optional[bool]): store results on disk if True
"""
def __init__(self, outer_radius, thickness, height, nr, nt, nz,
T0 = 0.0, page = False):
""" Initialize the tube
"""
self.r = outer_radius
self.t = thickness
self.h = height
self.nr = nr
self.nt = nt
self.nz = nz
self.abstraction = "3D"
self.times = []
self.results = {}
self.quadrature_results = {}
self.outer_bc = None
self.inner_bc = None
self.pressure_bc = None
self.T0 = T0
self.page = page
self.page_prefix = ""
def copy_results(self, other):
""" Copy the results fields from one tube to another
Parameters:
other: other tube object
"""
self.results = other.results
self.quadrature_results = other.quadrature_results
def set_paging(self, page, i):
""" Set the value of the page parameter
Parameters:
page: if true store results on disk
i: tube number to use
"""
self.page = page
self.page_prefix = str(i) + "_"
@property
def ndim(self):
""" Number of problem dimensions
Returns:
int: tube dimension
"""
if self.abstraction == "3D":
return 3
elif self.abstraction == "2D":
return 2
elif self.abstraction == "1D":
return 1
else:
raise ValueError("Tube abstraction unknown!")
@property
def dim(self):
""" Actual problem discretization
Returns:
tuple(int): tuple giving the fixed grid discretization
"""
if self.abstraction == "3D":
return (self.nr, self.nt, self.nz)
elif self.abstraction == "2D":
return (self.nr, self.nt, 1)
elif self.abstraction == "1D":
return (self.nr, 1, 1)
else:
raise ValueError("Tube abstraction unknown!")
@property
def mesh(self):
""" Calculate the problem mesh (should only be needed for I/O)
Returns:
Results of np.meshgrid over the problem discretization
"""
r = np.linspace(self.r-self.t, self.r, self.nr)
if self.ndim > 1:
t = np.linspace(0, 2*np.pi, self.nt+1)[:self.nt]
else:
t = [self.angle]
if self.ndim > 2:
z = np.linspace(0, self.h, self.nz)
else:
z = [self.plane]
return np.meshgrid(*[r,t,z], indexing = 'ij')
def element_volumes(self):
""" Calculate the element volumes
Returns:
np.array with each element volume
"""
if self.ndim == 1:
return self._volume1d()
elif self.ndim == 2:
return self._volume2d()
elif self.ndim == 3:
return self._volume3d()
else:
raise ValueError("Internal error: tube dimension is %i" % self.ndim)
def _volume1d(self):
"""
1D volume calculator
"""
r = np.linspace(self.r-self.t, self.r, self.nr)
return np.pi * (r[1:]**2.0 - r[:-1]**2.0) * self.h
def _volume2d(self):
"""
1D volume calculator
"""
r = np.linspace(self.r-self.t, self.r, self.nr)
t = np.linspace(0, 2*np.pi, self.nt+1)
theta = np.diff(t)
a = np.outer(2*r[:-1], np.sin(theta/2))
b = np.outer(2*r[1:], np.sin(theta/2))
edge = r[1:] - r[:-1]
h = np.sqrt(edge[:,None]**2.0 - ((b-a)/2)**2.0)
base = 0.5*(a+b)*h
return (base * self.h).flatten()
def _volume3d(self):
"""
3D volume calculator
"""
r = np.linspace(self.r-self.t, self.r, self.nr)
t = np.linspace(0, 2*np.pi, self.nt+1)
z = np.linspace(0, self.h, self.nz)
theta = np.diff(t)
a = np.outer(2*r[:-1], np.sin(theta/2))
b = np.outer(2*r[1:], np.sin(theta/2))
edge = r[1:] - r[:-1]
h = np.sqrt(edge[:,None]**2.0 - ((b-a)/2)**2.0)
base = 0.5*(a+b)*h
heights = np.diff(z)
return np.einsum('k,ij', heights, base).flatten()
def write_vtk(self, fname):
""" Write to a VTK file
The tube VTK files are only used for output and
postprocessign
Args:
fname (string): base filename
"""
writer = writers.VTKWriter(self, fname)
writer.write()
def make_2D(self, height):
""" Abstract the tube as 2D
Reduce to a 2D abstraction by slicing the tube at the
indicated height
Args:
height (float): the height at which to slice
"""
if height < 0.0 or height > self.h:
raise ValueError("2D slice height must be within the tube height")
self.abstraction = "2D"
self.plane = height
def make_1D(self, height, angle):
""" Abstract the tube as 1D
Reduce to a 1D abstraction along a ray given by the provided
height and angle.
Args:
height (float): the height of the ray
angle (float): the angle, in radians
"""
if height < 0.0 or height > self.h:
raise ValueError("Ray height must be within the tube height")
self.abstraction = "1D"
self.plane = height
self.angle = angle
def close(self, other):
""" Check to see if two objects are nearly equal.
Primarily used for testing
Args:
other (Tube): the object to compare against
Returns:
bool: true if the tubes are similar
"""
base = (
np.isclose(self.r, other.r)
and np.isclose(self.t, other.t)
and np.isclose(self.h, other.h)
and (self.nr == other.nr)
and (self.nt == other.nt)
and (self.nz == other.nz)
and (np.allclose(self.times, other.times)))
for name, data in self.results.items():
if name not in other.results:
return False
base = (base and np.allclose(data, other.results[name]))
if self.outer_bc:
if not other.outer_bc:
return False
base = (base and self.outer_bc.close(other.outer_bc))
if self.inner_bc:
if not other.inner_bc:
return False
base = (base and self.inner_bc.close(other.inner_bc))
if self.pressure_bc:
if not other.pressure_bc:
return False
base = (base and self.pressure_bc.close(other.pressure_bc))
base = (base and self.abstraction == other.abstraction)
if self.abstraction == "2D" or self.abstraction == "1D":
base = (base and np.isclose(self.plane, other.plane))
if self.abstraction == "1D":
base = (base and np.isclose(self.angle, other.angle))
base = (base and np.isclose(self.T0, other.T0))
return base
@property
def ntime(self):
""" Number of time steps
Returns:
int: number of time steps
"""
return len(self.times)
def set_times(self, times):
""" Set the times at which data is provided
All results arrays must provide data at these
discrete times.
Args:
times (np.array): time values
"""
for _,res in self.results.items():
if res.shape[0] != len(times):
raise ValueError("Cannot change times to provided values, will be"
" incompatible with existing results")
self.times = times
def add_results(self, name, data):
""" Add a node point result field
Args:
name (str): parameter set name
data (np.array): actual results data
"""
self._check_rdim(data.shape)
self.results[name] = self._setup_memmap(name + "_node", data.shape)
self.results[name][:] = data[:]
def add_blank_results(self, name, shape):
""" Add a blank node point result field
Args:
name (str): parameter set name
shape (tuple): required shape
"""
self._check_rdim(shape)
self.results[name] = self._setup_memmap(name + "_node", shape)
def add_quadrature_results(self, name, data):
""" Add a result at the quadrature points
Args:
name (str): parameter set name
data (np.array): actual results data
"""
if data.shape[0] != self.ntime:
raise ValueError("Quadrature data must have time axis first!")
self.quadrature_results[name] = self._setup_memmap(name + "_quad", data.shape)
self.quadrature_results[name][:] = data[:]
def add_blank_quadrature_results(self, name, shape):
""" Add a blank quadrature point result field
Args:
name (str): parameter set name
shape (tuple): required shape
"""
if shape[0] != self.ntime:
raise ValueError("Quadrature data must have time axis first!")
self.quadrature_results[name] = self._setup_memmap(name + "_quad", shape)
def _setup_memmap(self, name, shape):
""" Map array to disk if required
Args:
name: field name
shape: required shape
"""
if self.page:
return np.memmap(self.page_prefix + name + ".dat", dtype = np.float64,
mode = 'w+', shape = shape)
else:
return np.zeros(shape)
def _check_rdim(self, shape):
""" Verify the dimensions of a results array
Make sure the results array aligns with the correct dimension for the
abstraction
Args:
shape (tuple): input shape
Raises:
ValueError: If the data array shape is not correct for the problem dimensions
"""
if self.abstraction == "3D":
if shape != (self.ntime, self.nr, self.nt, self.nz):
raise ValueError("Data array shape must equal ntime x nr x nt x nz!")
elif self.abstraction == "2D":
if shape != (self.ntime, self.nr, self.nt):
raise ValueError("Data array shape must equal ntime x nr x nt!")
elif self.abstraction == "1D":
if shape != (self.ntime, self.nr):
raise ValueError("Data array shape must equal ntime x nr!")
else:
raise ValueError("Internal error: unknown abstraction type %s" %
self.abstraction)
def set_bc(self, bc, loc):
""" Set the inner or outer heat flux BC
Args:
bc (ThermalBC): boundary condition object
loc (string): location -- either "inner" or "outer" wall
"""
if loc == "inner":
if not np.isclose(bc.r, self.r - self.t) or not
|
np.isclose(bc.h, self.h)
|
numpy.isclose
|
"""
Notes
-----
Important attributes of continuous (order > 0) :class:`Field` and
:class:`SurfaceField` instances:
- `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]`
- `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]`
where `conn` is the mesh vertex connectivity, `econn` is the
region-local field connectivity.
"""
import time
import numpy as nm
from sfepy.base.base import output, assert_
import fea
from sfepy.discrete.fem.utils import prepare_remap
from sfepy.discrete.common.dof_info import expand_nodes_to_dofs
from sfepy.discrete.fem.global_interp import get_ref_coors
from sfepy.discrete.fem.facets import get_facet_dof_permutations
from sfepy.discrete.fem.fields_base import (FEField, VolumeField, SurfaceField,
H1Mixin)
from sfepy.discrete.fem.extmods.bases import evaluate_in_rc
class H1NodalMixin(H1Mixin):
def _setup_facet_orientations(self):
order = self.approx_order
self.node_desc = self.interp.describe_nodes()
edge_nodes = self.node_desc.edge_nodes
if edge_nodes is not None:
n_fp = self.gel.edges.shape[1]
self.edge_dof_perms = get_facet_dof_permutations(n_fp, self.igs,
order)
face_nodes = self.node_desc.face_nodes
if face_nodes is not None:
n_fp = self.gel.faces.shape[1]
self.face_dof_perms = get_facet_dof_permutations(n_fp, self.igs,
order)
def _setup_edge_dofs(self):
"""
Setup edge DOF connectivity.
"""
if self.node_desc.edge is None:
return 0, None, None
return self._setup_facet_dofs(1, self.node_desc.edge,
self.edge_dof_perms,
self.n_vertex_dof)
def _setup_face_dofs(self):
"""
Setup face DOF connectivity.
"""
if self.node_desc.face is None:
return 0, None, None
return self._setup_facet_dofs(self.domain.shape.tdim - 1,
self.node_desc.face,
self.face_dof_perms,
self.n_vertex_dof + self.n_edge_dof)
def _setup_facet_dofs(self, dim, facet_desc, facet_perms, offset):
"""
Helper function to setup facet DOF connectivity, works for both
edges and faces.
"""
facet_desc = nm.array(facet_desc)
n_dof_per_facet = facet_desc.shape[1]
cmesh = self.domain.cmesh
facets = self.region.entities[dim]
ii = nm.arange(facets.shape[0], dtype=nm.int32)
all_dofs = offset + expand_nodes_to_dofs(ii, n_dof_per_facet)
# Prepare global facet id remapping to field-local numbering.
remap = prepare_remap(facets, cmesh.num[dim])
cconn = self.region.domain.cmesh.get_conn(self.region.tdim, dim)
offs = cconn.offsets
n_f = self.gel.edges.shape[0] if dim == 1 else self.gel.faces.shape[0]
oris = cmesh.get_orientations(dim)
for ig, ap in self.aps.iteritems():
gcells = self.region.get_cells(ig, offset=False)
n_el = gcells.shape[0]
indices = cconn.indices[offs[gcells[0]]:offs[gcells[-1]+1]]
facets_of_cells = remap[indices]
ori = oris[offs[gcells[0]]:offs[gcells[-1]+1]]
perms = facet_perms[ig][ori]
# Define global facet dof numbers.
gdofs = offset + expand_nodes_to_dofs(facets_of_cells,
n_dof_per_facet)
# Elements of facets.
iel = nm.arange(n_el, dtype=nm.int32).repeat(n_f)
ies = nm.tile(nm.arange(n_f, dtype=nm.int32), n_el)
# DOF columns in econn for each facet.
iep = facet_desc[ies]
iaux = nm.arange(gdofs.shape[0], dtype=nm.int32)
ap.econn[iel[:, None], iep] = gdofs[iaux[:, None], perms]
n_dof = n_dof_per_facet * facets.shape[0]
assert_(n_dof ==
|
nm.prod(all_dofs.shape)
|
numpy.prod
|
import skimage.io as io
import matplotlib.pyplot as plt
import numpy as np
from skimage.exposure import histogram
from skimage.transform import probabilistic_hough_line
from matplotlib.pyplot import bar
from skimage.color import *
# Convolution:
from scipy.signal import convolve2d
from scipy import fftpack
import math
from skimage.util import random_noise
from skimage.filters import median
from skimage.feature import canny
from skimage.measure import label
from skimage.color import label2rgb
import cv2
# Edges
from skimage.filters import sobel_h, sobel, sobel_v,roberts, prewitt
# Show the figures / plots inside the notebook
def my_show_images(images,titles=None, row_max=1, dpi=200):
#This function is used to show image(s) with titles by sending an array of images and an array of associated titles.
# images[0] will be drawn with the title titles[0] if exists
# You aren't required to understand this function, use it as-is.
n_ims = len(images)
if titles is None: titles = ['(%d)' % i for i in range(1,n_ims + 1)]
fig = plt.figure(dpi=dpi)
n = 1
for image,title in zip(images,titles):
a = fig.add_subplot(math.ceil(n_ims/row_max), row_max, n)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
n += 1
fig.set_size_inches(np.array(fig.get_size_inches()) * n_ims/row_max)
plt.show()
def my_show_hist(img):
img = (img * 255).astype(np.uint8) if img.dtype != np.uint8 else img
hist = np.histogram(img, range(0, 257))
# print(hist)
plt.bar(hist[1][:-1], hist[0])
plt.show()
def my_imread_gray(fname):
img = io.imread(fname)
if len(img.shape) == 2:
img = img
elif img.shape[2] == 4:
img = rgb2gray(rgba2rgb(img))
else:
img = rgb2gray(img)
if np.max(img) <= 1:
return (img*255).astype(np.uint8)
return img.astype(np.uint8)
def my_close(src, kernel):
dilated = cv2.dilate(src, kernel)
return cv2.erode(dilated, kernel)
def my_open(src, kernel):
dilated = cv2.erode(src, kernel)
return cv2.dilate(dilated, kernel)
def get_distance_between_staves_and_staff_thickness(img_binary_bg_white):
img_height = img_binary_bg_white.shape[0]
flattened = img_binary_bg_white.T.flatten()
flattened_indices = np.arange(0, flattened.shape[0], 1, np.uint32)
flattened[flattened_indices % img_height == 0] = False # Separate each column with a black pixel
image, contours_distance, hierarchy = cv2.findContours((flattened*255).astype(np.uint8),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
flattened = ~flattened # For thickness
flattened[flattened_indices % img_height == 0] = False # Separate each column with a black pixel
image, contours_thickness, hierarchy = cv2.findContours((flattened*255).astype(np.uint8),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
distance_between_staves = most_frequent_white_length(img_height, contours_distance)
staff_thickness = most_frequent_white_length(img_height, contours_thickness)
return distance_between_staves, staff_thickness
def most_frequent_white_length(img_height, contours):
# We refer to length as the vertical distance between 2 black pixels
length_freq = np.zeros((img_height), dtype=np.uint32) # No contour can be taller than img_height because we separated each column with a black pixel
all_possible_lengths = np.arange(0, img_height, 1, dtype=np.uint32)
for i in contours:
contour_y = i.T[1]
length = contour_y[0][1] - contour_y[0][0] if len(contour_y[0]) == 2 else 1
length_freq[length] += 1
# plt.bar(all_possible_lengths, length_freq, width=3)
return all_possible_lengths[length_freq == length_freq.max()][0]
def get_line_separation_kernel_size_from_distance_between_staves(distance_between_staves):
if distance_between_staves % 2 == 0:
return distance_between_staves + 9
else:
return distance_between_staves + 8
def get_rotation_angle(img):
image = img
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=50, line_length=50,
line_gap=30)
_range = 50 if len(lines) >= 50 else len(lines)
angles = []
for i in range(_range):
p1,p2 = lines[i]
angles.append(math.degrees(math.atan2(p2[1]-p1[1],p2[0]-p1[0])))
if angles[i] < 0 :
angles[i] = -1 * angles[i]
if (angles[i] > 90):
angles[i] = -1 * (180-angles[i])
angle = np.median(
|
np.array(angles)
|
numpy.array
|
"""
Created on Thu Aug 13 08:20:11 2020
@author: zlabe
"""
"""
Script plots composites for large ensemble data (monthly) using
several variables
Author : <NAME>
Date : 13 August 2020
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
### Set preliminaries
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
directoryfigure = '/Users/zlabe/Desktop/ExtremeEvents_v1/Composites/LENS/'
reg_name = 'Globe'
dataset = 'lens'
rm_ensemble_mean = False
variq = ['T2M']
monthlychoice = 'annual'
def read_primary_dataset(variq,dataset,lat_bounds,lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
for i in range(len(variq)):
### Read in data for selected region
lat_bounds,lon_bounds = UT.regions(reg_name)
dataall,lats,lons = read_primary_dataset(variq[i],dataset,
lat_bounds,lon_bounds)
### Remove ensemble mean
if rm_ensemble_mean == True:
data= dSS.remove_ensemble_mean(dataall)
print('*Removed ensemble mean*')
elif rm_ensemble_mean == False:
data = dataall
### Calculate ensemble mean
meandata = np.nanmean(data,axis=0)
del data #save storage
### Composite over selected period (x2)
if monthlychoice == 'DJF':
years = np.arange(meandata.shape[0]) + 1921
else:
years = np.arange(meandata.shape[0]) + 1920
length = years.shape[0]//2
historical = meandata[:length,:,:]
future = meandata[length:,:,:]
### Average over composites for plotting
historicalm = np.nanmean(historical,axis=0)
futurem = np.nanmean(future,axis=0)
### Calculate significance
pruns = UT.calc_FDR_ttest(future[:,:,:],historical[:,:,:],0.05) #FDR
###########################################################################
###########################################################################
###########################################################################
### Begin plots!!!
fig = plt.figure()
### Select graphing preliminaries
if rm_ensemble_mean == True:
if variq[i] == 'T2M':
label = r'\textbf{T2M [$\bf{^{\circ}}$C]}'
cmap = cm.cubehelix3_16_r.mpl_colormap
elif variq[i] == 'SLP':
label = r'\textbf{SLP [hPa]}'
cmap = cm.cubehelix3_16_r.mpl_colormap
elif variq[i] == 'U700':
label = r'\textbf{U700 [m/s]}'
cmap = cm.cubehelix3_16_r.mpl_colormap
limit = np.linspace(futurem.min(),futurem.max(),300)
barlim = np.linspace(futurem.min(),futurem.max(),2)
elif rm_ensemble_mean == False:
if variq[i] == 'T2M':
label = r'\textbf{T2M [$\bf{^{\circ}}$C]}'
cmap = plt.cm.twilight
limit = np.arange(-35,35.1,0.5)
barlim = np.arange(-35,36,35)
elif variq[i] == 'SLP':
label = r'\textbf{SLP [hPa]}'
cmap = plt.cm.cividis
limit = np.arange(985,1035.1,2)
barlim = np.arange(985,1036,10)
elif variq[i] == 'U700':
label = r'\textbf{U700 [m/s]}'
cmap = cm.classic_16.mpl_colormap
limit =
|
np.arange(-10,20.1,0.5)
|
numpy.arange
|
from __future__ import print_function
'''
This module should be organized as follows:
Main function:
chi_estimate() = returns chi_n, chi_b
- calls:
wealth.get_wealth_data() - returns data moments on wealth distribution
labor.labor_data_moments() - returns data moments on labor supply
minstat() - returns min of statistical objective function
model_moments() - returns model moments
SS.run_SS() - return SS distributions
'''
'''
------------------------------------------------------------------------
Last updated: 7/27/2016
Uses a simulated method of moments to calibrate the chi_n adn chi_b
parameters of OG-USA.
This py-file calls the following other file(s):
wealth.get_wealth_data()
labor.labor_data_moments()
SS.run_SS
This py-file creates the following other file(s): None
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import pandas as pd
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from . import wealth
from . import labor
from . import SS
from . import utils
def chi_n_func(s, a0, a1, a2, a3, a4):
chi_n = a0 + a1 * s + a2 * s ** 2 + a3 * s ** 3 + a4 * s ** 4
return chi_n
def chebyshev_func(x, a0, a1, a2, a3, a4):
func = np.polynomial.chebyshev.chebval(x, [a0, a1, a2, a3, a4])
return func
def chi_estimate(p, client=None):
'''
--------------------------------------------------------------------
This function calls others to obtain the data momements and then
runs the simulated method of moments estimation by calling the
minimization routine.
INPUTS:
income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params)
ss_parameters = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\
g_n_ss, tau_payroll, retire, mean_income_data,\
h_wealth, p_wealth, m_wealth, b_ellipse, upsilon)
iterative_params = [2,] vector, vector with max iterations and tolerance
for SS solution
chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
baseline_dir = string, path where baseline results located
OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
wealth.compute_wealth_moments()
labor.labor_data_moments()
minstat()
OBJECTS CREATED WITHIN FUNCTION:
wealth_moments = [J+2,] array, wealth moments from data
labor_moments = [S,] array, labor moments from data
data_moments = [J+2+S,] array, wealth and labor moments stacked
bnds = [S+J,] array, bounds for parameter estimates
chi_guesses_flat = [J+S,] vector, initial guesses of chi_b and chi_n stacked
min_arg = length 6 tuple, variables needed for minimizer
est_output = dictionary, output from minimizer
chi_params = [J+S,] vector, parameters estimates for chi_b and chi_n stacked
objective_func_min = scalar, minimum of statistical objective function
OUTPUT:
./baseline_dir/Calibration/chi_estimation.pkl
RETURNS: chi_params
--------------------------------------------------------------------
'''
baseline_dir="./OUTPUT"
#chi_b_guess = np.ones(80)
# a0 = 5.38312524e+01
# a1 = -1.55746248e+00
# a2 = 1.77689237e-02
# a3 = -8.04751667e-06
# a4 = 5.65432019e-08
""" Kei's Vals
a0 = 170
a1 = -2.19154735e+00
a2 = -2.22817460e-02
a3 = 4.49993507e-04
a4 = -1.34197054e-06
"""
""" Adam's Vals 1
a0 = 2.59572155e+02
a1 = -2.35122641e+01
a2 = 4.27581467e-01
a3 = -3.40808933e-03
a4 = 1.00404321e-05
"""
a0 = 1.16807470e+03#5.19144310e+02
a1 = -1.05805189e+02#-4.70245283e+01
a2 = 1.92411660e+00#8.55162933e-01
a3 = -1.53364020e-02#-6.81617866e-03
a4 = 4.51819445e-05#2.00808642e-05
sixty_plus_chi = 10000
params_init = np.array([a0, a1, a2, a3, a4])
# Generate labor data moments
labor_hours = np.array([167, 165, 165, 165, 165, 166, 165, 165, 164, 166, 164])
labor_part_rate = np.array([0.69, 0.849, 0.849, 0.847, 0.847, 0.859, 0.859, 0.709, 0.709, 0.212, 0.212])
employ_rate =
|
np.array([0.937, 0.954, 0.954, 0.966, 0.966, 0.97, 0.97, 0.968, 0.968, 0.978, 0.978])
|
numpy.array
|
import os
import time
from abc import ABC, abstractmethod
from enum import Enum
from pathlib import Path
from typing import List, Dict, Tuple, Union
import loguru
import numpy as np
import pandas as pd
from nilmtk import MeterGroup
from pandas import DataFrame
from sklearn.base import ClassifierMixin
from sklearn.metrics import f1_score, classification_report
from sklearn.model_selection import cross_val_score
from datasources import labels_factory
from datasources.datasource import Datasource, SITE_METER
from nilmlab.lab_exceptions import NoSiteMeterException
from utils.logger import debug, info, timing
class TransformerType(Enum):
# TODO: More clear TransformerType is needed.
raw = 1
transform = 2
approximate = 3
transform_and_approximate = 4
class TimeSeriesTransformer(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def transform(self, series: np.ndarray, sample_period: int = 6) -> list:
"""
An interface to transform a given time series into another representation.
It unifies different transformations and usually either just transforms a time series without dimensionality
reduction or transforms a whole time series and reconstructs it using the underlying time series representation.
Args:
series (ndarray): A time series to be transformed according to the algorithm.
sample_period (int): The sampling frequency.
Returns:
Returns the transformed time series as a list.
"""
pass
@abstractmethod
def approximate(self, series: np.ndarray, window: int = 1, should_fit: bool = True) -> np.ndarray:
"""
An interface to transform a given time series into another representation.
In most transformers it transforms each segment of a time series, because the given time series is in segments.
TODO: should_fit is used only by a few transformers. Move it to their constructors.
Args:
series (ndarray): A time series to be transformed according to the algorithm.
window (int): The size of the sub-segments of the given time series.
This is not supported by all algorithms.
should_fit (bool): If the algorith should firstly fit to the data, executing some prepressing steps.
Returns:
Returns the transformed time series as ndarray.
"""
pass
@abstractmethod
def reconstruct(self, series: np.ndarray) -> list:
"""
It reconstructs the transformed time series.
Args:
series (ndarray): A transformed time series.
Returns:
The reconstructed time series as a list of values.
"""
pass
@abstractmethod
def get_type(self) -> TransformerType:
"""
Returns the type of the transformer, which indicates which functions the underlying algoirthm supports.
Returns: A TransformerType.
"""
pass
@abstractmethod
def set_type(self, method_type: TransformerType):
"""
Sets the type of the transformer, which indicates which functions the underlying algoirthm supports.
"""
pass
@abstractmethod
def get_name(self):
pass
def uses_labels(self):
return False
def bucketize_data(data: np.ndarray, window: int) -> np.ndarray:
"""
It segments the time series grouping it into batches. Its segment is of size equal to the window.
Args:
data (ndarray): The given time series.
window (int): The size of the segments.
Returns:
"""
debug('bucketize_data: Initial shape {}'.format(data.shape))
n_dims = len(data.shape)
if n_dims == 1:
seq_in_batches = np.reshape(data, (int(len(data) / window), window))
elif n_dims == 2:
seq_in_batches = np.reshape(data, (int(len(data) / window), window, data.shape[1]))
else:
raise Exception('Invalid number of dimensions {}.'.format(n_dims))
debug('bucketize_data: Shape in batches: {}'.format(seq_in_batches.shape))
return seq_in_batches
def bucketize_target(target: np.ndarray, window: int) -> np.ndarray:
"""
Creates target data according to the lenght of the window of the segmented data.
Args:
target (ndarray): Target data with the original size.
window (int): The length of window that will be used to create the corresponding labels.
Returns:
The target data for the new bucketized time series.
"""
target_in_batches = bucketize_data(target, window)
any_multilabel =
|
np.any(target_in_batches, axis=1)
|
numpy.any
|
import numpy as np
class Thruster_model(object):
"""
Thruster model for spacecraft computes force and torque in the body frame and converts
to inertial frame
Commanded thrust is clipped to lie between zero and one, and then scaled based off of
thrust capability
ellipsoid c = 1m, a = b = 2m
"""
def __init__(self):
# dvec body position
config = [
[ 0.0, 0.0, 1.0, 0.0, -2.0, -1.0 ], # rotate around X (roll)
[ 0.0, 0.0, 1.0, 0.0, 2.0, -1.0 ], # rotate around X (roll)
[ 0.0, 0.0, 1.0, -2.0, 0.0, -1.0 ], # rotate around Y (pitch)
[ 0.0, 0.0, 1.0, 2.0, 0.0, -1.0 ] # rotate around Y (pitch)
]
# no yaw . note that yaw rotates around z-axis, which we don't want (or need) to do
config =
|
np.asarray(config)
|
numpy.asarray
|
import math
import numpy as np
def calcTf(nAllTermsInDoc, nTermInDoc):
"""
# to unstable (most values are near to each other for all docs)
return math.log10(
(float(nTermInDoc) / float(nAllTermsInDoc))
+ 1
)
"""
return float(nTermInDoc) / float(nAllTermsInDoc)
def calcIdf(nAllDocuments, nDocumentsContainingTerm):
"""
return math.log10(
(float(nAllDocuments) / float(nDocumentsContainingTerm))
+ 1
)
"""
return float(nAllDocuments) / float(nDocumentsContainingTerm)
def calcWeight(tf, idf):
return tf * idf
def missingTermWeight():
return 0
def cosineSimilarity(docWeights, queryWeights):
cache = NormCache()
dj = np.array(docWeights)
q =
|
np.array(queryWeights)
|
numpy.array
|
# Project: VUT FIT SNT Project - Traveling Umpire Problem
# Author: <NAME> <<EMAIL>>
# Year: 2020
# Description: A definition of a class that represents the
# Traveling Umpire Problem.
from inp import parse_inp_file
from out import print_solution
from datetime import datetime
from numpy import ndarray, arange, tile, where, zeros, int32, unique, roll
from numpy.random import choice
class TimeLimitException(Exception):
""" An exception that represents the time limit overrun. """
pass
class Tup:
""" A class that represents the Traveling Umpire Problem. """
PENALTY = 1_000 # implicit value of a penalty
def __init__(self, inp_file: str, d1: int, d2: int, name: str,
time_limit: int) -> None:
"""
Constructs the Traveling Umpire Problem.
:param inp_file: A name of an input file.
:param d1: The parameter d1 for 4. constraint.
:param d2: The parameter d2 for 5. constraint.
:param name: A name of an instance of the problem.
:param time_limit: A time limit of the computation in minutes.
"""
super().__init__()
self.__name = name
self.__teams, self.__dist, opp = parse_inp_file(inp_file)
self.__umps = int(self.__teams / 2)
self.__schedule = self.__build_schedule(opp)
self.__rounds = self.__schedule.shape[0]
self.__q1 = self.umps - d1
self.__q2 = int(self.umps / 2) - d2
self.__penalty = self.umps * self.PENALTY
self.solution = self.init_solution(self.rounds, self.umps)
self.__backtracked = [True] + [False] * (self.rounds - 1)
self.__time_limit = time_limit
self.__time = datetime.now()
@property
def umps(self) -> int:
"""
Returns the number of umpires.
:return: The number of umpires.
"""
return self.__umps
@property
def rounds(self) -> int:
"""
Returns the number of rounds.
:return: The number of rounds.
"""
return self.__rounds
@property
def q1(self) -> int:
"""
Returns the parameter q1 for 4. constraint.
:return: The parameter q1 for 4. constraint.
"""
return self.__q1
@property
def q2(self) -> int:
"""
Returns the parameter q2 for 5. constraint.
:return: The parameter q2 for 5. constraint.
"""
return self.__q2
@property
def penalty(self) -> int:
"""
Returns a defined value of a penalty.
:return: A defined value of a penalty.
"""
return self.__penalty
@property
def solution(self) -> ndarray:
"""
Returns a (partial) solution of the problem.
:return: A (partial) solution of the problem.
"""
return self.__solution
@solution.setter
def solution(self, solution: ndarray) -> None:
"""
Updates a solution of the problem.
:param solution: A new solution of the problem.
"""
self.__solution = solution
@property
def backtracked(self) -> list:
"""
Returns a list of flags with realised backtracks in single rounds.
:return: A list of flags with realised backtracks in single rounds.
"""
return self.__backtracked
def time_limit_check(self) -> None:
"""
Raises an exception if a time limit is exceeded.
:raises: TimeLimitException if a time limit is exceeded.
"""
duration = (datetime.now() - self.__time).total_seconds() // 60
if duration >= self.__time_limit:
raise TimeLimitException
@staticmethod
def __build_schedule(opp: ndarray) -> ndarray:
"""
Builds a schedule of the tournament.
:param opp: An opponents matrix.
:return: A built schedule matrix of the tournament.
"""
rounds, teams = opp.shape
games = int(teams / 2)
schedule_shape = rounds, games
team_indexes = tile(arange(teams), (rounds, 1))
home_games = (team_indexes[where(opp > 0)] + 1).reshape(schedule_shape)
out_games = opp[opp > 0].reshape(schedule_shape)
schedule = zeros((rounds, games, 2), dtype=int32)
schedule[:, :, 0] = home_games
schedule[:, :, 1] = out_games
return schedule
@staticmethod
def init_solution(rounds: int, umps: int) -> ndarray:
"""
Returns an initial solution (a solution of the first round).
:param rounds: The number of rounds.
:param umps: The number of umpires.
:return: An initial solution (a solution of the first round).
"""
solution = zeros((rounds, umps), dtype=int32)
ump_indexes = arange(umps)
for r in range(rounds):
solution[r] = choice(ump_indexes, size=umps, replace=False) + 1
return solution
def print_solution(self) -> None:
"""
Prints the solution.
"""
r = self.rounds - 1
constraints = \
self.constraint3(self.solution, r) \
+ self.constraint4(self.solution, r) \
+ self.constraint5(self.solution, r)
feasibility = 'Infeasible' if constraints.sum() else 'Feasible'
print(f'\n{feasibility} solution:')
solution = zeros(self.solution.shape, dtype=int32)
game_numbers =
|
arange(self.umps)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 20:39:53 2018
@author: bmoseley
"""
# This module defines various generic helper functions in pytorch.
import torch
import numpy as np
get_weights = lambda model: [p.detach().cpu().numpy()[:] for p in model.parameters()]
def get_weights_update_percent(weights1, weights2):
assert len(weights1) == len(weights2)
N = sum([w.size for w in weights1])
mean, std, sum_all = [],[], 0
for i in range(len(weights1)):
w1, w2 = weights1[i], weights2[i]
d = np.abs((w2 - w1)/np.mean(np.abs(w1)))
mean.append(np.mean(d))
std.append(np.std(d))
sum_all += np.sum(d)
return mean, std, sum_all/N
if __name__ == "__main__":
torch.manual_seed(123)
model = torch.nn.Conv2d(2,2,3)
# get weights
weights = get_weights(model)
for x in weights: print(x.size)
for x in weights[:5]: print(x.flatten())
w1 = [np.arange(-10,10)]
w2 = [
|
np.arange(-9,11)
|
numpy.arange
|
import os
import time
import pandas as pd
import numpy as np
import functools
from functools import reduce
def time_pass(func):
@functools.wraps(func)
def wrapper(*args, **kw):
time_begin = time.time()
result = func(*args, **kw)
time_stop = time.time()
time_passed = time_stop - time_begin
minutes, seconds = divmod(time_passed, 60)
hours, minutes = divmod(minutes, 60)
print('%s: %s:%s:%s' % (func.__name__, int(hours), int(minutes), int(seconds)))
return result
return wrapper
@time_pass
def complete_data(the_dat_edge, the_dat_app, the_input_path):
"""
把剩下的数据读取之后拼接到前面读取的数据后面
"""
def read_big_table(path):
reader = pd.read_table(path, header=None, chunksize=10000)
data = pd.concat(reader, axis=0, ignore_index=True)
return data
def read_edge(filename): # 定义一个读取数据的函数来批量读取那些被分开的数据集
tmp = read_big_table(os.path.join(the_input_path, "open_data/dat_edge/%s" % filename))
tmp.columns = ['from_id', 'to_id', 'info']
return tmp
dat_edge_names = ['dat_edge_%s' % str(x) for x in list(range(2, 12))]
dat_edge_left = reduce(lambda x, y: x.append(y),
(read_edge(filename) for filename in dat_edge_names))
def read_app(filename): # 定义一个读取数据的函数来批量读取那些被分开的数据集
tmp = read_big_table(os.path.join(the_input_path, "open_data/dat_app/%s" % filename))
tmp.columns = ['id', 'apps']
return tmp
dat_app_names = ['dat_app_%s' % str(x) for x in list(range(2, 8))]
dat_app_left = reduce(lambda x, y: x.append(y),
(read_app(filename) for filename in dat_app_names))
dat_edge_1 = the_dat_edge.append(dat_edge_left) # 把第一个数据和剩下的数据合并起来
dat_app_1 = the_dat_app.append(dat_app_left) # 把第一个数据和剩下的数据合并起来
return dat_edge_1, dat_app_1
@time_pass
def dummy_symbol(the_dat_symbol):
"""
1. 把dat_symbol的一级分类的所有可能取值all_first挑出来,
2. 然后得到:每一个id的'symbol'列里的一级分类是否包含all_first,得到0-1向量
3. 同样的处理一级分类和二级分类的组合,单独处理二级分类我觉得没这个必要了
"""
def get_first(string):
f_s = string.split(',')
first = set(list(map(lambda x: x.split('_')[0], f_s)))
return first
def get_second(string):
f_s = string.split(',')
second = set(list(map(lambda x: x.split('_')[1], f_s)))
return second
def get_both(string):
f_s = string.split(',')
return set(f_s)
def is_in_first(string):
f_s = string.split(',')
first = set(list(map(lambda x: x.split('_')[0], f_s)))
is_in = list(map(lambda x: x in first, all_first))
return is_in
def is_in_second(string):
f_s = string.split(',')
second = set(list(map(lambda x: x.split('_')[1], f_s)))
is_in = list(map(lambda x: x in second, all_second))
return is_in
def is_in_both(string):
f_s = set(string.split(','))
is_in = list(map(lambda x: x in f_s, all_both))
return is_in
tmp = the_dat_symbol['symbol'].unique()
# 获取所有的一级分类和一二级分类
all_first = reduce(lambda x, y: x.union(y),
map(get_first, tmp))
all_second = reduce(lambda x, y: x.union(y),
map(get_second, tmp))
all_both = reduce(lambda x, y: x.union(y),
map(get_both, tmp))
# 得到每个id的0-1向量,存储成DataFrame
in_first_0 = pd.DataFrame(list(map(is_in_first, the_dat_symbol['symbol'])),
columns=all_first)
in_second_0 = pd.DataFrame(list(map(is_in_second, the_dat_symbol['symbol'])),
columns=all_second)
in_both_0 = pd.DataFrame(list(map(is_in_both, the_dat_symbol['symbol'])),
columns=all_both)
in_first_1 = pd.concat([the_dat_symbol[['id']], in_first_0], axis=1) + 0
in_second_1 = pd.concat([the_dat_symbol[['id']], in_second_0], axis=1) + 0
in_both_1 = pd.concat([the_dat_symbol[['id']], in_both_0], axis=1) + 0
return in_first_1, in_second_1, in_both_1
@time_pass
def deal_dat_edge(data_all):
"""
1. 把dat_edge处理好,运行dat_edge.head(15),就会发现需要把第10行这类数据和其他数据分开,
2. 分为dat_edge_single,dat_edge_multi
3. 然后把dat_edge_multi处理成跟dat_edge_single一样的格式,叫做dat_edge_multi_new
4. 然后把两者合并成为dat_edge_new
5. 之后经由dat_edge_split_2把info分为三个部分:['date', 'times', 'weight']
"""
length = list(map(len, map(lambda x: x.split(','), data_all['info'])))
dat_edge_single = data_all[np.array(length) == 1]
dat_edge_multi = data_all[np.array(length) > 1]
def dat_edge_split(i):
i_info = dat_edge_multi.iloc[i]
string = i_info['info']
s = string.split(',')
result = pd.DataFrame({'info': s,
'from_id': [i_info['from_id']] * len(s),
'to_id': [i_info['to_id']] * len(s),
'id': [i_info['id']] * len(s)})
return result[['id', 'from_id', 'to_id', 'info']]
all_df = map(dat_edge_split, range(len(dat_edge_multi)))
dat_edge_multi_new = pd.concat(all_df, axis=0, ignore_index=True) # 比较慢
dat_edge_new = pd.concat([dat_edge_single, dat_edge_multi_new], axis=0, ignore_index=True)
# dat_edge_new = dat_edge_single.append(dat_edge_multi_new, ignore_index=True)
@time_pass
def dat_edge_split_2(data):
def split(string):
date, left = string.split(':')
times, weight = left.split('_')
return date, times, weight
info_df = pd.DataFrame(list(map(split, data['info'])),
columns=['date', 'times', 'weight'])
data_new_2 = pd.concat([data[['id', 'from_id', 'to_id']], info_df], axis=1)
return data_new_2
dat_edge_new_2 = dat_edge_split_2(dat_edge_new)
return dat_edge_new_2
@time_pass
def deal_edge(the_sample_train, the_dat_edge):
"""
提取出每一个用户的“流出”特征: 向量长度、times之和、times的中位数、最小值、最大值
weight之和、weight的中位数、最小值、最大值,这样就用9个特征提取出了“流出”特征
"""
col_names = (['length', 'unique_count', 'times_sum', 'weight_sum']
+ ['dup_ratio_left', 'dup_ratio_1', 'dup_ratio_2', 'dup_ratio_3', 'dup_ratio_4', 'dup_ratio_5']
+ ['times_left', 'times_1', 'times_2', 'times_3', 'times_4', 'times_5',
'times_6', 'times_7', 'times_8', 'times_9', 'times_10']
+ ['times_min', 'times_25', 'times_median', 'times_75', 'times_max']
+ ['weight_min', 'weight_25', 'weight_median', 'weight_75', 'weight_max']
+ ['times_up_out_ratio', 'times_low_out_ratio']
+ ['weight_up_out_ratio', 'weight_low_out_ratio']
+ ['time_sign_trend', 'time_abs', 'weight_sign_trend', 'weight_abs']
+ ['times_2017_11', 'times_2017_12', 'times_2017_13']
+ ['weight_2017_11', 'weight_2017_12', 'weight_2017_13']
+ ['date_unique_count', 'date_min', 'date_max', 'days_gap']
+ ['latest_times', 'latest_peoples', 'latest_weights', 'multi_ratio'])
sample_dat_edge_from = pd.merge(the_sample_train, the_dat_edge,
left_on='id', right_on='from_id',
how='inner')
dat_edge_from = deal_dat_edge(sample_dat_edge_from)
dat_edge_from['times'] = list(map(int, dat_edge_from['times']))
dat_edge_from['weight'] = list(map(float, dat_edge_from['weight']))
unique_id_from = np.unique(dat_edge_from['id'])
feature_9_1 = list(map(lambda x: cal_9_feature(x, dat_edge_from, 'to_id'), unique_id_from))
df_feature_9_1 = pd.DataFrame(feature_9_1, columns=['out_%s' % x for x in col_names])
df_feature_9_1['id'] = unique_id_from
# 提取出每一个用户的“流入”特征,类似上面的,可以提取出9个“流入”特征
sample_dat_edge_to = pd.merge(the_sample_train, the_dat_edge,
left_on='id', right_on='to_id',
how='inner')
dat_edge_to = deal_dat_edge(sample_dat_edge_to)
dat_edge_to['times'] = list(map(int, dat_edge_to['times']))
dat_edge_to['weight'] = list(map(float, dat_edge_to['weight']))
unique_id_to = np.unique(dat_edge_to['id'])
feature_9_2 = list(map(lambda x: cal_9_feature(x, dat_edge_to, 'from_id'), unique_id_to))
df_feature_9_2 = pd.DataFrame(feature_9_2, columns=['in_%s' % x for x in col_names])
df_feature_9_2['id'] = unique_id_to
unique_id_both = list(set(unique_id_from).union(set(unique_id_to)))
feature_9_3 = list(map(lambda x: cal_both(x, dat_edge_from, dat_edge_to), unique_id_both))
df_feature_9_3 = pd.DataFrame(feature_9_3, columns=['both_%s' % x for x in col_names])
df_feature_9_3['id'] = unique_id_both
# 接下来需要把df_feature_9_1和df_feature_9_2, df_feature_9_3以并联方式merge起来,
# 然后左连接到sample_train上
the_df_feature_18 = reduce(lambda x, y: pd.merge(x, y, on='id', how='outer'),
[df_feature_9_1, df_feature_9_2, df_feature_9_3])
the_df_feature_18['net_in'] = the_df_feature_18['in_weight_sum'] - the_df_feature_18['out_weight_sum']
the_df_feature_18['out_unique_ratio'] = the_df_feature_18['out_unique_count']/the_df_feature_18['out_length']
the_df_feature_18['in_unique_ratio'] = the_df_feature_18['in_unique_count'] / the_df_feature_18['in_length']
the_df_feature_18['out_longer_5'] = (the_df_feature_18['out_length'] > 5) + 0
the_df_feature_18['out_longer_10'] = (the_df_feature_18['out_length'] > 10) + 0
the_df_feature_18['in_longer_5'] = (the_df_feature_18['in_length'] > 5) + 0
the_df_feature_18['in_longer_10'] = (the_df_feature_18['in_length'] > 10) + 0
the_df_feature_18['both_longer_10'] = (the_df_feature_18['both_length'] > 10) + 0
the_df_feature_18['both_longer_20'] = (the_df_feature_18['both_length'] > 20) + 0
# 下面时看了视频解析之后,确认数据为通话数据之后生成的特征
# 先算出总的联系人数目和总的通话时间
the_df_feature_18['sum_degree'] = the_df_feature_18['out_unique_count'] + the_df_feature_18['in_unique_count']
the_df_feature_18['sum_weight'] = the_df_feature_18['out_weight_sum'] + the_df_feature_18['in_weight_sum']
# 进入、出去、总的closeness
the_df_feature_18['out_closeness'] = the_df_feature_18['out_weight_sum']/the_df_feature_18['out_unique_count']
the_df_feature_18['in_closeness'] = the_df_feature_18['in_weight_sum'] / the_df_feature_18['in_unique_count']
the_df_feature_18['sum_closeness'] = the_df_feature_18['sum_weight'] / the_df_feature_18['sum_degree']
return the_df_feature_18
@time_pass
def get_apps_dummy(data):
"""
把dat_app里用户装的app信息0-1化
1. 先把所有的app得到:all_apps
2. 然后得到长度为all_apps的0-1向量
"""
all_apps = set()
for string in data['apps']:
apps = string.split(',')
all_apps = all_apps.union(set(apps))
all_apps = list(all_apps)
def is_in_all_apps(x):
xs = x.split(',')
xs = set(xs)
app_vec = list(map(lambda app: app in xs, all_apps))
return app_vec
apps_dummy_0 = list(map(is_in_all_apps, data['apps']))
apps_dummy_1 = pd.DataFrame(apps_dummy_0, columns=all_apps)
apps_dummy_2 = pd.concat([data[['id']], apps_dummy_1], axis=1)
return apps_dummy_2
def outlier_ratio(the_series):
"""利用箱线图来检测异常率"""
the_median = np.median(the_series)
q1 = np.percentile(the_series, 20)
q3 = np.percentile(the_series, 70)
iqr = q3 - q1
up_bound = the_median + 1.5*iqr
low_bound = the_median - 1.5*iqr
up_out_count = sum(the_series > up_bound)
low_out_count = sum(the_series < low_bound)
the_up_out_ratio = up_out_count/len(the_series)
the_low_out_ratio = low_out_count/len(the_series)
return the_up_out_ratio, the_low_out_ratio
def cal_dup_ratio(series, n):
"""计算一个人给另一个人不同月份转的频次"""
the_dup_ratio = np.zeros(6)
tmp = pd.Series(series.value_counts().values).value_counts()
for j in tmp.index:
if j > 5:
continue
else:
the_dup_ratio[j] = tmp[j] / n
the_dup_ratio[0] = 1 -
|
np.sum(the_dup_ratio)
|
numpy.sum
|
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import argparse, uuid, time
from timeit import default_timer as timer
from skimage import io, transform, morphology
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import PIL
PIL.Image.MAX_IMAGE_PIXELS = None
import warnings
warnings.simplefilter("ignore", UserWarning)
import pydaily
from pyslide import patch
from segnet import pspnet, UNet
from utils import wsi_stride_splitting
from patch_loader import SegPatchDataset, ClsPatchDataset
from wsinet import WsiNet
def load_seg_model(args):
if args.seg_model_name == "UNet":
seg_model = UNet(n_channels=args.in_channels, n_classes=args.seg_class_num)
elif args.seg_model_name == "PSP":
seg_model = pspnet.PSPNet(n_classes=19, input_size=(args.patch_len, args.patch_len))
seg_model.classification = nn.Conv2d(512, args.seg_class_num, kernel_size=1)
else:
raise NotImplemented("Unknown model {}".format(args.seg_model_name))
seg_model_path = os.path.join(args.model_dir, "SegBestModel", args.best_seg_model)
seg_model = nn.DataParallel(seg_model)
seg_model.load_state_dict(torch.load(seg_model_path))
seg_model.cuda()
seg_model.eval()
return seg_model
def load_patch_model(args):
patch_model_path = os.path.join(args.model_dir, "PatchBestModel", args.cnn_model, args.best_patch_model)
patch_model = torch.load(patch_model_path)
patch_model.cuda()
patch_model.eval()
return patch_model
def load_wsi_model(args):
wsi_model = WsiNet(class_num=args.wsi_class_num, in_channels=args.fea_len, mode=args.fusion_mode)
wsi_weights_path = os.path.join(args.model_dir, "wsiBestModel", args.cnn_model,
args.fusion_mode, args.wsi_model_name)
wsi_weights_dict = torch.load(wsi_weights_path, map_location=lambda storage, loc: storage)
wsi_model.load_state_dict(wsi_weights_dict)
wsi_model.cuda()
wsi_model.eval()
return wsi_model
def seg_slide_img(seg_model, slide_path, args):
slide_img = io.imread(slide_path)
coors_arr = wsi_stride_splitting(slide_img.shape[0], slide_img.shape[1], patch_len=args.patch_len, stride_len=args.stride_len)
print("h: {} w: {}".format(slide_img.shape[0], slide_img.shape[1]))
wmap = np.zeros((slide_img.shape[0], slide_img.shape[1]), dtype=np.int32)
pred_map = np.zeros_like(wmap).astype(np.float32)
patch_list, coor_list = [], []
for ic, coor in enumerate(coors_arr):
ph, pw = coor[0], coor[1]
patch_list.append(slide_img[ph:ph+args.patch_len, pw:pw+args.patch_len] / 255.0)
coor_list.append([ph, pw])
wmap[ph:ph+args.patch_len, pw:pw+args.patch_len] += 1
if len(patch_list) == args.seg_batch_size or ic+1 == len(coors_arr):
patch_arr = np.asarray(patch_list).astype(np.float32)
patch_dset = SegPatchDataset(patch_arr)
patch_loader = DataLoader(patch_dset, batch_size=args.seg_batch_size, shuffle=False, num_workers=0, drop_last=False)
with torch.no_grad():
pred_list = []
for patches in patch_loader:
inputs = Variable(patches.cuda())
outputs = seg_model(inputs)
preds = F.sigmoid(outputs)
preds = torch.squeeze(preds, dim=1).data.cpu().numpy()
pred_list.append(preds)
batch_preds = np.concatenate(pred_list, axis=0)
for ind, coor in enumerate(coor_list):
ph, pw = coor[0], coor[1]
pred_map[ph:ph+args.patch_len, pw:pw+args.patch_len] += batch_preds[ind]
patch_list, coor_list = [], []
prob_pred = np.divide(pred_map, wmap)
slide_pred = morphology.remove_small_objects(prob_pred>0.5, min_size=20480).astype(np.uint8)
slide_name = os.path.splitext(os.path.basename(slide_path))[0]
if args.gt_exist == True:
mask_path = os.path.join(os.path.dirname(slide_path), slide_name+".png")
if os.path.exists(mask_path):
mask_img = io.imread(mask_path) / 255.0
intersection = np.multiply(mask_img, slide_pred)
pred_dice = np.sum(intersection) / (np.sum(mask_img)+np.sum(slide_pred)-np.sum(intersection) + 1.0e-8)
print("Dice: {:.3f}".format(pred_dice))
pred_save_path = os.path.join(args.output_dir, "predictions", os.path.basename(slide_path))
io.imsave(pred_save_path, slide_pred*255)
def extract_model_feas(patch_model, input_tensor, args):
if args.cnn_model == "resnet50":
x = patch_model.conv1(input_tensor)
x = patch_model.bn1(x)
x = patch_model.relu(x)
x = patch_model.maxpool(x)
x = patch_model.layer1(x)
x = patch_model.layer2(x)
x = patch_model.layer3(x)
x = patch_model.layer4(x)
x = patch_model.avgpool(x)
feas = torch.flatten(x, 1)
logits = patch_model.fc(feas)
probs = F.softmax(logits, dim=1)
elif args.cnn_model == "vgg16bn":
x = patch_model.features(input_tensor)
x = patch_model.avgpool(x)
x = torch.flatten(x, 1)
feas = patch_model.classifier[:4](x)
logits = patch_model.classifier[4:](feas)
probs = F.softmax(logits, dim=-1)
else:
raise AssertionError("Unknown model name {}".format(args.cnn_model))
return feas, probs
def gen_wsi_feas(patch_model, img_path, args):
img_name = os.path.splitext(img_path)[0]
feas_list, probs_list, coor_list = [], [], []
cur_img = io.imread(img_path)
# split coors and save patches
coors_arr = wsi_stride_splitting(cur_img.shape[0], cur_img.shape[1], args.patch_len, args.stride_len)
patch_list = []
for ind, coor in enumerate(coors_arr):
start_h, start_w = coor[0], coor[1]
patch_img = cur_img[start_h:start_h+args.patch_len, start_w:start_w+args.patch_len]
# image background control
if patch.patch_bk_ratio(patch_img, bk_thresh=0.864) <= 0.88:
patch_list.append(patch_img)
coor_list.append([start_h, start_w, start_h+args.patch_len, start_w+args.patch_len])
# Processing the feature extraction in batch-wise manner to avoid huge memory consumption
if len(patch_list) == args.cls_batch_size or ind+1 == len(coors_arr):
patch_arr = np.asarray(patch_list)
patch_dset = ClsPatchDataset(patch_arr)
patch_loader = DataLoader(patch_dset, batch_size=args.cls_batch_size, shuffle=False, num_workers=0, drop_last=False)
with torch.no_grad():
for inputs in patch_loader:
batch_tensor = Variable(inputs.cuda())
feas, probs = extract_model_feas(patch_model, batch_tensor, args)
batch_feas = feas.cpu().data.numpy().tolist()
batch_probs = probs.cpu().data.numpy().tolist()
feas_list.extend(batch_feas)
probs_list.extend(batch_probs)
patch_list = []
all_feas = np.asarray(feas_list).astype(np.float32)
all_probs = np.asarray(probs_list).astype(np.float32)
sorted_ind = np.argsort(all_probs[:, 0])
feas_placeholder = np.zeros((args.wsi_patch_num, all_feas.shape[1]), dtype=np.float32)
test_patch_num = min(len(all_feas), args.wsi_patch_num)
chosen_total_ind = sorted_ind[:test_patch_num]
feas_placeholder[:test_patch_num] = all_feas[chosen_total_ind]
chosen_coors = np.asarray(coor_list)[chosen_total_ind].tolist()
return feas_placeholder, test_patch_num, chosen_coors
def cls_slide_img(patch_model, wsi_model, slide_path, args):
chosen_feas, chosen_num, chosen_coors = gen_wsi_feas(patch_model, slide_path, args)
slide_fea_data = torch.from_numpy(chosen_feas).unsqueeze(0)
im_data = Variable(slide_fea_data.cuda())
true_num = torch.from_numpy(
|
np.array([chosen_num])
|
numpy.array
|
import pytest
import numpy as np
from python_quaternions.quaternion import Quaternion, UnitQuaternion
def assert_array_equal(array_0, array_1, tol=0.0):
"""
Utility function asserting whether two numpy arrays are componentwise equal,
within a tolerance.
"""
assert np.array_equal(array_0.shape, array_1.shape)
for arr_0_elem, arr_1_elem in zip(array_0, array_1):
assert
|
np.abs(arr_0_elem - arr_1_elem)
|
numpy.abs
|
import data
import glob
import re
import itertools
from collections import defaultdict
import numpy as np
import utils
class SliceNormRescaleDataGenerator(object):
def __init__(self, data_path, batch_size, transform_params, patient_ids=None, labels_path=None,
slice2roi_path=None, full_batch=False, random=True, infinite=False, view='sax',
data_prep_fun=data.transform_norm_rescale, **kwargs):
if patient_ids:
self.patient_paths = []
for pid in patient_ids:
self.patient_paths.append(data_path + '/%s/study/' % pid)
else:
self.patient_paths = glob.glob(data_path + '/*/study/')
self.slice_paths = [sorted(glob.glob(p + '/%s_*.pkl' % view)) for p in self.patient_paths]
self.slice_paths = list(itertools.chain(*self.slice_paths))
self.slicepath2pid = {}
for s in self.slice_paths:
self.slicepath2pid[s] = int(utils.get_patient_id(s))
self.nsamples = len(self.slice_paths)
self.batch_size = batch_size
self.rng = np.random.RandomState(42)
self.full_batch = full_batch
self.random = random
self.infinite = infinite
self.id2labels = data.read_labels(labels_path) if labels_path else None
self.transformation_params = transform_params
self.data_prep_fun = data_prep_fun
self.slice2roi = utils.load_pkl(slice2roi_path) if slice2roi_path else None
def generate(self):
while True:
rand_idxs = np.arange(self.nsamples)
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batch
x_batch = np.zeros((nb, 30) + self.transformation_params['patch_size'], dtype='float32')
y0_batch =
|
np.zeros((nb, 1), dtype='float32')
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
from common import STATE_SIZES, TIME_NAMES, FUNCTION_NAMES
def plot_relative_time(data, save_destination):
# data to plot
n_groups = 4
n_comparison = 3
data1 = (90, 55, 40, 65)
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.15
opacity = 1
colors = ['#D7191C', '#FDAE61', '#ABDDA4', '#2B83BA']
bars = []
for comparison_index in range(n_comparison):
distance = bar_width * comparison_index
for color_index in range(len(colors)):
inner_data = data[comparison_index][color_index]
color = colors[color_index]
kwargs = {
'alpha': opacity,
'color': color,
}
if color_index > 0:
bottom =
|
np.array(data[comparison_index][0])
|
numpy.array
|
import numpy as np
from mogapy.solver import Solver
def cndsa2(fitnesses, constraints):
"""Take in an MxN array of fitnesses and an MxO array of constraints, rank according to level of constrained
non-dominance.
Input:
fitnesses: A MxN numpy array, each row is a solution, each column is a fitness variable.
constrains: A MxO numpy array, each row is a solution, each column is a constraint violation variable (0 means
solution is feasible for a constraint condition).
Outputs:
An ordered list, each element contains a list of row indices of fitnesses that correspond
to a non-dominated ranking: 0 - 1st rank, 1 - 2nd rank and so on.
1st rank is completely non-Dominated (the Pareto front for the given solutions)."""
M, N = fitnesses.shape
# Dominated Counter
d = np.zeros((M, 1))
# Dominating Tracker, a M length list showing what solutions are dominated by a solution.
s = [[] for _ in range(M)]
# The current front ranking (initialised at 0)
f = 0
# Rankings list (theoretically there can be up to M rankings)
F = [[] for _ in range(M)]
# For every solution, check if it dominates the rest
for i in range(M):
# select solution p
#p = fitnesses[i, :]
p = constraints[i, :]
for j in range(i + 1, M):
# select solution q
#q = fitnesses[j, :]
q = constraints[j, :]
# There are three cases of constrained-domination p is feasible and q is not feasible: p dominates q,
# or vice-versa q dominates p p is feasible and q is feasible: do fitness based domination p and q are
# not feasible: get the normalized violation and assess: if equal then do fitness based domination If p
# dominates q
# Case One, p dominates q
if np.all(p == 0) and np.any(q > 0):
# Increase the domination counter of q
d[j] = d[j] + 1
# Add index of q to s[i]
s[i].append(j)
# skip to next q
continue
elif np.all(q == 0) and np.any(p > 0):
# Increase the domination counter of p
d[i] = d[i] + 1
# Add index of p to s[j]
s[j].append(i)
# skip to next q
continue
# Case Two, p and q are feasible: sort by feasibility
if np.all(p == 0) and np.all(q == 0):
pf = fitnesses[i, :]
qf = fitnesses[j, :]
if np.all(pf <= qf) and np.any(pf < qf):
# Increase the domination counter of q
d[j] = d[j] + 1
# Add index of q to s[i]
s[i].append(j)
continue
# If q dominates p
elif np.all(qf <= pf) and np.any(qf < pf):
# Increase the domination counter of p
d[i] = d[i] + 1
# Add index of p to s[j]
s[j].append(i)
continue
# Case Three, p and q are infeasible: sort by least infeasible
if np.any(p > 0) and np.any(q > 0):
if np.less(p.sum(), q.sum()):
# Increase the domination counter of q
d[j] = d[j] + 1
# Add index of q to s[i]
s[i].append(j)
continue
elif np.less(q.sum(), p.sum()):
# Increase the domination counter of p
d[i] = d[i] + 1
# Add index of p to s[j]
s[j].append(i)
continue
else:
pf = fitnesses[i, :]
qf = fitnesses[j, :]
if np.all(pf <= qf) and np.any(pf < qf):
# Increase the domination counter of q
d[j] = d[j] + 1
# Add index of q to s[i]
s[i].append(j)
continue
# If q dominates p
elif np.all(qf <= pf) and np.any(qf < pf):
# Increase the domination counter of p
d[i] = d[i] + 1
# Add index of p to s[j]
s[j].append(i)
continue
# If solution p is non-dominated, then assign first non-dominated rank (0 indexed)
if d[i] == 0:
F[f].append(i)
# Loop through solutions to find the non-dominated points
while len(F) > f and len(F[f]) > 0:
# For each solution in rank f
for i in F[f]:
# For each solution dominated by i
for j in s[i]:
d[j] = d[j] - 1
if d[j] == 0:
F[f + 1].append(j)
# Increment the rank
f = f + 1
# Remove empty rankings from F and return
return [i for i in F if len(i) > 0]
def ndsa2(fitnesses):
"""Take in a list of fitnesses, rank according to level of non-dominatedness.
Input:
fitnesses: A MxN numpy array, each row is a solution, each column is a fitness variable.
Outputs:
An ordered list, each element contains a list of row indices of fitnesses that correspond
to a non-dominated ranking: 0 - 1st rank, 1 - 2nd rank and so on.
1st rank is completely non-Dominated (the Pareto front for the given solutions)."""
M, N = fitnesses.shape
# Dominated Counter
d =
|
np.zeros((M, 1))
|
numpy.zeros
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.