prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""Pascal VOC Detection evaluation."""
from __future__ import division
from collections import defaultdict
import numpy as np
import mxnet as mx
from ..bbox import bbox_iou
class VOCMApMetric(mx.metric.EvalMetric):
"""
Calculate mean AP for object detection task
Parameters:
---------
iou_thresh : float
IOU overlap threshold for TP
class_names : list of str
optional, if provided, will print out AP for each class
"""
def __init__(self, iou_thresh=0.5, class_names=None):
super(VOCMApMetric, self).__init__('VOCMeanAP')
if class_names is None:
self.num = None
else:
assert isinstance(class_names, (list, tuple))
for name in class_names:
assert isinstance(name, str), "must provide names as str"
num = len(class_names)
self.name = list(class_names) + ['mAP']
self.num = num + 1
self.reset()
self.iou_thresh = iou_thresh
self.class_names = class_names
def reset(self):
"""Clear the internal statistics to initial state."""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
self._n_pos = defaultdict(int)
self._score = defaultdict(list)
self._match = defaultdict(list)
def get(self):
"""Get the current evaluation result.
Returns
-------
name : str
Name of the metric.
value : float
Value of the evaluation.
"""
self._update() # update metric at this time
if self.num is None:
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
else:
names = ['%s'%(self.name[i]) for i in range(self.num)]
values = [x / y if y != 0 else float('nan') \
for x, y in zip(self.sum_metric, self.num_inst)]
return (names, values)
# pylint: disable=arguments-differ, too-many-nested-blocks
def update(self, pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults=None):
"""Update internal buffer with latest prediction and gt pairs.
Parameters
----------
pred_bboxes : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes with shape `B, N, 4`.
Where B is the size of mini-batch, N is the number of bboxes.
pred_labels : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes labels with shape `B, N`.
pred_scores : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes scores with shape `B, N`.
gt_bboxes : mxnet.NDArray or numpy.ndarray
Ground-truth bounding boxes with shape `B, M, 4`.
Where B is the size of mini-batch, M is the number of ground-truths.
gt_labels : mxnet.NDArray or numpy.ndarray
Ground-truth bounding boxes labels with shape `B, M`.
gt_difficults : mxnet.NDArray or numpy.ndarray, optional, default is None
Ground-truth bounding boxes difficulty labels with shape `B, M`.
"""
def as_numpy(a):
"""Convert a (list of) mx.NDArray into numpy.ndarray"""
if isinstance(a, (list, tuple)):
out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a]
try:
out = np.concatenate(out, axis=0)
except ValueError:
out = np.array(out)
return out
elif isinstance(a, mx.nd.NDArray):
a = a.asnumpy()
return a
if gt_difficults is None:
gt_difficults = [None for _ in as_numpy(gt_labels)]
if isinstance(gt_labels, list):
gt_diff_shape = gt_difficults[0].shape[0] if hasattr(gt_difficults[0], 'shape') else 0
if len(gt_difficults) * gt_diff_shape != \
len(gt_labels) * gt_labels[0].shape[0]:
gt_difficults = [None] * len(gt_labels) * gt_labels[0].shape[0]
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difficult in zip(
*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults]]):
# strip padding -1 for pred and gt
valid_pred = np.where(pred_label.flat >= 0)[0]
pred_bbox = pred_bbox[valid_pred, :]
pred_label = pred_label.flat[valid_pred].astype(int)
pred_score = pred_score.flat[valid_pred]
valid_gt = np.where(gt_label.flat >= 0)[0]
gt_bbox = gt_bbox[valid_gt, :]
gt_label = gt_label.flat[valid_gt].astype(int)
if gt_difficult is None:
gt_difficult = np.zeros(gt_bbox.shape[0])
else:
gt_difficult = gt_difficult.flat[valid_gt]
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
self._n_pos[l] += np.logical_not(gt_difficult_l).sum()
self._score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
self._match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = bbox_iou(pred_bbox_l, gt_bbox_l)
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < self.iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
self._match[l].append(-1)
else:
if not selec[gt_idx]:
self._match[l].append(1)
else:
self._match[l].append(0)
selec[gt_idx] = True
else:
self._match[l].append(0)
def _update(self):
""" update num_inst and sum_metric """
aps = []
recall, precs = self._recall_prec()
for l, rec, prec in zip(range(len(precs)), recall, precs):
ap = self._average_precision(rec, prec)
aps.append(ap)
if self.num is not None and l < (self.num - 1):
self.sum_metric[l] = ap
self.num_inst[l] = 1
if self.num is None:
self.num_inst = 1
self.sum_metric = np.nanmean(aps)
else:
self.num_inst[-1] = 1
self.sum_metric[-1] = np.nanmean(aps)
def _recall_prec(self):
""" get recall and precision from internal records """
n_fg_class = max(self._n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in self._n_pos.keys():
score_l = np.array(self._score[l])
match_l = np.array(self._match[l], dtype=np.int32)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
with np.errstate(divide='ignore', invalid='ignore'):
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if self._n_pos[l] > 0:
rec[l] = tp / self._n_pos[l]
return rec, prec
def _average_precision(self, rec, prec):
"""
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
if rec is None or prec is None:
return np.nan
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], np.nan_to_num(prec), [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
class VOC07MApMetric(VOCMApMetric):
""" Mean average precision metric for PASCAL V0C 07 dataset
Parameters:
---------
iou_thresh : float
IOU overlap threshold for TP
class_names : list of str
optional, if provided, will print out AP for each class
"""
def __init__(self, *args, **kwargs):
super(VOC07MApMetric, self).__init__(*args, **kwargs)
def _average_precision(self, rec, prec):
"""
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
if rec is None or prec is None:
return np.nan
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max( | np.nan_to_num(prec) | numpy.nan_to_num |
import numpy as np
from matplotlib import pyplot as pl
import heatsim2
import pyopencl as cl
from crack_heatflow import surface_heating
#from function_as_script import scriptify
#from crackheat_inversion.heatinvert import heatinvert as heatinvert_function
#heatinvert = scriptify(heatinvert_function)
ctx=cl.create_some_context()
k=6.7 # rough thermal conductivity of titanium, W/(m*deg K)
rho=4430.0 # rough density for titanium
c=526.0 # rough specific heat for titanium, J/kg*k
t1=0.2 # excitation start
t2=1.2 # excitation end
xcenter=3e-3
grid_refinement=1
# Create x,y,z voxel center coords
nz=30*grid_refinement
ny=20*grid_refinement+1
nx=30*grid_refinement+1
(dz,dy,dx,
z,y,x,
zgrid,ygrid,xgrid,
z_bnd,y_bnd,x_bnd,
z_bnd_z,z_bnd_y,z_bnd_x,
y_bnd_z,y_bnd_y,y_bnd_x,
x_bnd_z,x_bnd_y,x_bnd_x,
r3d,r2d) = heatsim2.build_grid(0,10e-3,nz,
-7e-3,7e-3,ny,
-10e-3,10e-3,nx)
materials=(
# material 0: titanium
(heatsim2.TEMPERATURE_COMPUTE,k,rho,c),
)
boundaries=(
# boundary 0: conducting
(heatsim2.boundary_conducting,),
(heatsim2.boundary_insulating,),
)
Power_watts_per_m2 = 1000.0
volumetric=( # on material grid
# 0: nothing
(heatsim2.NO_SOURCE,),
#1: stepped source
(heatsim2.STEPPED_SOURCE,t1,t2,Power_watts_per_m2/dy), # t0 (sec), t1 (sec), Power W/m^2/dy
)
# initialize all elements to zero
(material_elements,
boundary_z_elements,
boundary_y_elements,
boundary_x_elements,
volumetric_elements)=heatsim2.zero_elements(nz,ny,nx)
# set x and y and z=0 edges to insulating
boundary_x_elements[:,:,0]=1 # insulating
boundary_x_elements[:,:,-1]=1 # insulating
boundary_y_elements[:,0,:]=1 # insulating
boundary_y_elements[:,-1,:]=1 # insulating
boundary_z_elements[0,:,:]=1 # insulating
boundary_z_elements[-1,:,:]=1 # insulating
# Source turned on between 1mm and 3mm
volumetric_elements[(abs(ygrid) < 1e-6) & (np.sqrt((xgrid-xcenter)**2+zgrid**2) >= 1e-3) & (np.sqrt((xgrid-xcenter)**2+zgrid**2) < 3e-3)]=1 # stepped source
y0idx = np.where(abs(y) < 1e-6)[0][0]
t0=0
dt=.01/grid_refinement
nt_bnd=200*grid_refinement
nt_centers=nt_bnd-1
t_bnd=t0+np.arange(nt_bnd,dtype='d')*dt
t_centers=(t_bnd[:-1]+t_bnd[1:])/2.0
# NOTE: Important that t1 and t2 line up with elements of t_bnd
t1idx=np.argmin(abs(t1-t_bnd))
t2idx=np.where((t_bnd-t2) < 0)[0][-1]
assert(t_bnd[t1idx] <= t1 and abs(t1-t_bnd[t1idx]) < 1e-4)
assert(t_bnd[t2idx] <= t2 and abs(t2-t_bnd[t2idx]) < 1e-4)
(ADI_params,ADI_steps)=heatsim2.setup(z[0],y[0],x[0],
dz,dy,dx,
nz,ny,nx,
dt,
materials,
boundaries,
volumetric,
material_elements,
boundary_z_elements,
boundary_y_elements,
boundary_x_elements,
volumetric_elements)
T= | np.zeros((nt_centers,nz,ny,nx),dtype='d') | numpy.zeros |
"""
This module is the perturbation to matrix.
"""
import numpy as np
from scipy.linalg import eigh
class Pert():
def __init__(self, H0=None, evals=None, evecs=None):
if evals is not None and evecs is not None:
self.evals, self.evecs = evals, evecs
elif H0 is not None:
self.evals, self.evecs = eigh(self.H0)
else:
raise ValueError("at least H0| evals, evecs should be given")
self.n = len(self.evals)
self.evals, self.evecs = eigh(self.H0)
self.dHH = None
def evals1(self, dH):
return self.Epert1(self.evecs, dH)
def evecs1(self, dH):
return self.Vpert1(self.evals, self.evecs, dH, self.n)
def evals2(self, dH):
return self.Epert2(self.evals, self.evecs, dH, self.n)
@staticmethod
def Epert1(evecs, dH):
return np.diag(evecs.T.conj().dot(dH).dot(evecs))
@staticmethod
def Vpert1(evals, evecs, dH, n):
dV = np.zeros((n, n), dtype='complex')
dHH = evecs.T.conj() @ dH @ evecs
for i in range(n):
for k in range(n):
if abs(evals[k] - evals[i]) > 0.000001:
dV[:, i] += dHH[k, i] / (evals[i] - evals[k]) * evecs[:, k]
return dV
@staticmethod
def Epert2(evals, evecs, dH, n):
d2E = np.zeros(n, dtype='complex')
dHH = evecs.T.conj() @ dH @ evecs
for i in range(n):
for k in range(n):
if abs(evals[k] - evals[i]) > 1e-10:
d2E[i] += dHH[i, k] * dHH[k, i] / (evals[i] - evals[k])
return d2E
def unit2(x=0.3):
return np.array([[np.cos(x), np.sin(x)],[-np.sin(x), np.cos(x)]])
def test_pert_degenerate_2d(x=0.01):
H0=np.array([[2,0],[0,2]])
evals0, evecs0= | np.linalg.eigh(H0) | numpy.linalg.eigh |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the Trainer class, the main class responsible for
the process of training a neural network.
"""
import sys
import logging
from time import time
import h5py
import numpy
import theano
from theanolm.backend import IncompatibleStateError
from theanolm.parsing import ShufflingBatchIterator, LinearBatchIterator
from theanolm.training.stoppers import create_stopper
class Trainer(object):
"""Training Process
Saves a history of validation costs and decreases learning rate when the
cost does not decrease anymore.
"""
def __init__(self, training_options, vocabulary, training_files, sampling):
"""Creates the optimizer and initializes the training process.
Creates empty member variables for the perplexities list and the
training state at the validation point. Training state is saved at only
one validation point at a time, so validation interval is at least the
the number of samples used per validation.
:type training_options: dict
:param training_options: a dictionary of training options
:type vocabulary: Vocabulary
:param vocabulary: vocabulary that provides mapping between words and
word IDs
:type training_files: list of file objects
:param training_files: list of files to be used as training data
:type sampling: list of floats
:param sampling: specifies a fraction for each training file, how much
to sample on each epoch
"""
self._vocabulary = vocabulary
print("Computing the number of mini-batches in training data.")
linear_iter = LinearBatchIterator(
training_files,
vocabulary,
batch_size=training_options['batch_size'],
max_sequence_length=training_options['sequence_length'],
map_oos_to_unk=True)
sys.stdout.flush()
self._updates_per_epoch = 0
for _, _, _ in linear_iter:
self._updates_per_epoch += 1
if self._updates_per_epoch < 1:
raise ValueError("Training data does not contain any sentences.")
logging.debug("One epoch of training data contains %d mini-batch "
"updates.",
self._updates_per_epoch)
self.class_prior_probs = vocabulary.get_class_probs()
logging.debug("Class unigram log probabilities are in the range [%f, "
"%f].",
numpy.log(self.class_prior_probs.min()),
numpy.log(self.class_prior_probs.max()))
self._training_iter = ShufflingBatchIterator(
training_files,
sampling,
vocabulary,
batch_size=training_options['batch_size'],
max_sequence_length=training_options['sequence_length'],
map_oos_to_unk=True)
self._stopper = create_stopper(training_options, self)
self._options = training_options
# iterator to cross-validation data, or None for no cross-validation
self._validation_iter = None
# a text scorer for performing cross-validation
self._scorer = None
# number of perplexity samples per validation
self._samples_per_validation = 7
# function for combining validation samples
self._statistic_function = lambda x: numpy.median(numpy.asarray(x))
# the stored validation samples
self._local_perplexities = []
# the state at the center of validation samples
self._validation_state = None
# number of mini-batch updates between log messages
self._log_update_interval = 0
# the network to be trained
self._network = None
# the optimization function
self._optimizer = None
# current candidate for the minimum validation cost state
self._candidate_state = None
# index to the cost history that corresponds to the current candidate
# state
self._candidate_index = None
# current training epoch
self.epoch_number = 0
# number of mini-batch updates performed in this epoch
self.update_number = 0
# total number of mini-batch updates performed (after restart)
self._total_updates = 0
# validation set cost history
self._cost_history = None
# function for averaging cross-validation measurements
self._statistics_function = None
# duration of the last mini-batch update
self._update_duration = None
def set_validation(self, validation_iter, scorer,
samples_per_validation=None, statistics_function=None):
"""Sets cross-validation iterator and parameters.
:type validation_iter: BatchIterator
:param validation_iter: an iterator for computing validation set
perplexity
:type scorer: TextScorer
:param scorer: a text scorer for computing validation set perplexity
:type samples_per_validation: int
:param samples_per_validation: number of perplexity samples to compute
per cross-validation
:type statistic_function: Python function
:param statistic_function: a function to be performed on a list of
consecutive perplexity measurements to compute the validation cost
(median by default)
"""
self._validation_iter = validation_iter
self._scorer = scorer
if samples_per_validation is not None:
self._samples_per_validation = samples_per_validation
if statistics_function is not None:
self._statistics_function = statistics_function
def set_logging(self, log_interval):
"""Sets logging parameters.
:type log_interval: int
:param log_interval: number of mini-batch updates between log messages
"""
self._log_update_interval = log_interval
def initialize(self, network, state, optimizer, load_and_train=False):
"""Sets the network and the HDF5 file that stores the network state,
optimizer, and validation scorer and iterator.
If the HDF5 file contains a network state, initializes the network with
that state.
:type network: Network
:param network: the network, which will be used to retrieve state when
saving
:type state: h5py.File
:param state: HDF5 file where initial training state will be possibly
read from, and candidate states will be saved to
:type optimizer: BasicOptimizer
:param optimizer: one of the optimizer implementations
"""
self._network = network
self._optimizer = optimizer
self._candidate_state = state
if 'trainer' in self._candidate_state and load_and_train:
print("Restoring initial network state from {} partially.".format(
self._candidate_state.filename))
sys.stdout.flush()
self._reset_partial_state()
self._candidate_index = None
self.epoch_number = 1
self.update_number = 0
self._cost_history = numpy.asarray([], dtype=theano.config.floatX)
elif 'trainer' in self._candidate_state:
print("Restoring initial network state from {}.".format(
self._candidate_state.filename))
sys.stdout.flush()
self._reset_state()
else:
self._candidate_index = None
self.epoch_number = 1
self.update_number = 0
self._cost_history = numpy.asarray([], dtype=theano.config.floatX)
self._total_updates = 0
def train(self):
"""Trains a neural network.
If cross-validation has been configured using ``set_validation()``,
computes the validation set perplexity as many times per epoch as
specified by the _validation_frequency_ option and saves the model when
the perplexity improves. Otherwise saves the model after each epoch.
"""
if (self._network is None) or (self._optimizer is None) or \
(self._candidate_state is None):
raise RuntimeError("Trainer has not been initialized before "
"calling train().")
start_time = time()
while self._stopper.start_new_epoch():
epoch_start_time = time()
for word_ids, file_ids, mask in self._training_iter:
self.update_number += 1
self._total_updates += 1
class_ids = self._vocabulary.word_id_to_class_id[word_ids]
update_start_time = time()
self._optimizer.update_minibatch(word_ids, class_ids, file_ids, mask)
self._update_duration = time() - update_start_time
if (self._log_update_interval >= 1) and \
(self._total_updates % self._log_update_interval == 0):
self._log_update()
self._validate()
if not self._stopper.start_new_minibatch():
break
if self._validation_iter is None:
self._set_candidate_state()
epoch_duration = time() - epoch_start_time
epoch_minutes = epoch_duration / 60
epoch_time_h, epoch_time_m = divmod(epoch_minutes, 60)
message = "Finished training epoch {} in {:.0f} hours {:.1f} minutes." \
.format(self.epoch_number, epoch_time_h, epoch_time_m)
best_cost = self.candidate_cost()
if best_cost is not None:
message += " Best validation perplexity {:.2f}.".format(
best_cost)
print(message)
self.epoch_number += 1
self.update_number = 0
duration = time() - start_time
minutes = duration / 60
time_h, time_m = divmod(minutes, 60)
print("Training finished in {:.0f} hours {:.1f} minutes." \
.format(time_h, time_m))
def get_state(self, state):
"""Pulls parameter values from Theano shared variables and updates a
HDF5 file with all the network and training state variables.
For consistency, all the parameter values are returned as numpy types,
since state read from a model file also contains numpy types. This also
ensures the cost history will be copied into the returned dictionary.
:type state: h5py.File
:param state: HDF5 file for storing the current state
"""
h5_trainer = state.require_group('trainer')
h5_trainer.attrs['epoch_number'] = self.epoch_number
h5_trainer.attrs['update_number'] = self.update_number
if 'cost_history' in h5_trainer:
h5_trainer['cost_history'].resize(self._cost_history.shape)
h5_trainer['cost_history'][:] = self._cost_history
else:
h5_trainer.create_dataset(
'cost_history', data=self._cost_history, maxshape=(None,),
chunks=(1000,))
if self._network is not None:
self._network.get_state(state)
self._training_iter.get_state(state)
if self._optimizer is not None:
self._optimizer.get_state(state)
def _reset_state(self):
"""Resets the values of Theano shared variables to the current candidate
state.
Sets candidate state index point to the last element in the loaded cost
history.
Requires that if ``state`` is set, it contains values for all the
training parameters.
:type state: h5py.File
:param state: if a HDF5 file is given, reads the the training parameters
from this file, and assumes this is the state of minimum
cost found so far
"""
self._network.set_state(self._candidate_state)
if 'trainer' not in self._candidate_state:
raise IncompatibleStateError("Training state is missing.")
h5_trainer = self._candidate_state['trainer']
if 'epoch_number' not in h5_trainer.attrs:
raise IncompatibleStateError("Current epoch number is missing from "
"training state.")
self.epoch_number = int(h5_trainer.attrs['epoch_number'])
if 'update_number' not in h5_trainer.attrs:
raise IncompatibleStateError("Current update number is missing "
"from training state.")
self.update_number = int(h5_trainer.attrs['update_number'])
logging.info("[%d] (%.2f %%) of epoch %d",
self.update_number,
self.update_number / self._updates_per_epoch * 100,
self.epoch_number)
if 'cost_history' in h5_trainer:
self._cost_history = h5_trainer['cost_history'].value
if self._cost_history.size == 0:
print("Validation set cost history is empty in the training state.")
self._candidate_index = None
else:
self._candidate_index = self._cost_history.size - 1
self._log_validation()
else:
print("Warning: Validation set cost history is missing from "
"training state. Initializing to empty cost history.")
self._cost_history = | numpy.asarray([], dtype=theano.config.floatX) | numpy.asarray |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script performs a supervised training and prediction for
Spatial Transcriptomics datasets
The multi-class classification is performed using a 2 layers
neural network with the option to use GPU (CUDA) acceleration
The training set will be a matrix with counts (genes as columns and spots as rows)
and the test set will be another matrix of counts with the same format
One file with class labels for the training set is needed
so for the classifier to know what class each spot(row) in
the training set belongs to, the file should be tab delimited :
SPOT_NAME(as it in the matrix) CLASS_NUMBER
It will then try to predict the classes of the spots(rows) in the
test set. If class labels for the test sets
are given the script will compute accuracy of the prediction.
The script allows to normalize the train/test counts using different
methods as well as performing pre-filtering operations.
@Author <NAME> <<EMAIL>>
"""
import os
import sys
import time
import argparse
import numpy as np
import pandas as pd
import gc
import platform
import random
import copy
from collections import defaultdict
from stanalysis.preprocessing import *
from stanalysis.utils import *
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as utils
import torchvision
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from sklearn.model_selection import StratifiedShuffleSplit
import gc
# Windows work-around
__spec__ = None
import multiprocessing
SEARCH_BATCH = [(200, 200), (500, 500), (1000, 1000)]
L2 = [0.0, 0.001, 0.0001]
SEARCH_LR = [0.1, 0.01, 0.001, 0.0001]
SEARCH_HL = [(3000, 500), (2000, 500), (1000, 500), (3000, 1000), (2000, 1000), (2000, 300), (1000, 300)]
SEED = 999
def computeWeightsClasses(dataset):
# Distribution of labels
label_count = defaultdict(int)
for _, label in dataset:
label_count[label.item()] += 1
# Weight for each sample
weights = np.asarray([1.0 / x for x in label_count.values()])
return weights
def computeWeights(dataset, nclasses):
count = [0] * nclasses
for item in dataset:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N / float(count[i])
weight = [0] * len(dataset)
for idx, val in enumerate(dataset):
weight[idx] = weight_per_class[val[1]]
return np.asarray(weight)
def str_to_act_func(str):
if str in "TANH":
return torch.nn.Tanh()
elif str in "SELU":
return torch.nn.SELU()
else:
return torch.nn.ReLU()
def create_model(n_feature,
n_class,
hidden_layer_one,
hidden_layer_two,
activation_function):
# Init model
H1 = hidden_layer_one
H2 = hidden_layer_two
model = torch.nn.Sequential(
torch.nn.Linear(n_feature, H1),
torch.nn.BatchNorm1d(num_features=H1),
str_to_act_func(activation_function),
torch.nn.Linear(H1, H2),
torch.nn.BatchNorm1d(num_features=H2),
str_to_act_func(activation_function),
torch.nn.Linear(H2, n_class))
return model
def create_loaders(trn_set,
vali_set,
train_batch_size,
validation_batch_size,
train_sampler,
test_sampler,
shuffle_train,
shuffle_test,
kwargs):
# Create loaders
trn_loader = utils.DataLoader(trn_set,
sampler=train_sampler,
shuffle=shuffle_train,
batch_size=train_batch_size,
**kwargs)
vali_loader = utils.DataLoader(vali_set,
sampler=test_sampler,
shuffle=shuffle_test,
batch_size=validation_batch_size,
**kwargs)
return trn_loader, vali_loader
def train(model, trn_loader, optimizer, loss_func, device):
model.train()
training_loss = 0
training_acc = 0
counter = 0
for data, target in trn_loader:
data = Variable(data.to(device))
target = Variable(target.to(device))
# Forward pass
output = model(data)
tloss = loss_func(output, target)
training_loss += tloss.item()
# Zero the gradients
optimizer.zero_grad()
# Backward pass
tloss.backward()
# Update parameters
optimizer.step()
# Compute prediction's score
pred = torch.argmax(output.data, 1)
training_acc += accuracy_score(target.data.cpu().numpy(),
pred.data.cpu().numpy())
counter += 1
avg_loss = training_loss / float(counter)
avg_acc = training_acc / float(counter)
return avg_loss, avg_acc
def test(model, vali_loader, loss_func, device):
model.eval()
test_loss = 0
counter = 0
preds = list()
for data, target in vali_loader:
with torch.no_grad():
data = data.to(device)
target = target.to(device)
output = model(data)
test_loss += loss_func(output, target).item()
pred = torch.argmax(output.data, 1)
preds += pred.cpu().numpy().tolist()
counter += 1
avg_loss = test_loss / float(counter)
return preds, avg_loss
def predict(model, data, device):
model.eval()
with torch.no_grad():
data = data.to(device)
output = model(data)
pred = torch.argmax(output.data, 1)
return output, pred
def main(train_data,
test_data,
train_classes_file,
test_classes_file,
log_scale,
normalization,
stratified_loss,
outdir,
standard_transformation,
train_batch_size,
validation_batch_size,
epochs,
learning_rate,
stratified_sampler,
min_class_size,
use_cuda,
num_exp_genes,
num_exp_spots,
min_gene_expression,
verbose,
hidden_layer_one,
hidden_layer_two,
train_validation_ratio,
train_test_ratio,
grid_search,
activation_function,
l2,
num_genes_keep_train,
num_genes_keep_test,
top_genes_criteria_train,
top_genes_criteria_test):
if not os.path.isfile(train_data):
sys.stderr.write("Error, the training data input is not valid\n")
sys.exit(1)
if not os.path.isfile(train_classes_file):
sys.stderr.write("Error, the train labels input is not valid\n")
sys.exit(1)
if not os.path.isfile(test_data):
sys.stderr.write("Error, the test data input is not valid\n")
sys.exit(1)
if test_classes_file is not None and not os.path.isfile(test_classes_file):
sys.stderr.write("Error, the test labels input is not valid\n")
sys.exit(1)
if min_class_size < 0:
sys.stderr.write("Error, invalid minimum class size\n")
sys.exit(1)
if learning_rate < 0:
sys.stderr.write("Error, invalid learning rate\n")
sys.exit(1)
if hidden_layer_one <= 0 or hidden_layer_two <= 0:
sys.stderr.write("Error, invalid hidden layers\n")
sys.exit(1)
if train_batch_size < 1 or validation_batch_size < 1:
sys.stderr.write("Error, batch size is too small\n")
sys.exit(1)
if epochs < 1:
sys.stderr.write("Error, number of epoch is too small\n")
sys.exit(1)
if num_exp_genes < 0.0 or num_exp_genes > 1.0:
sys.stderr.write("Error, invalid number of expressed genes\n")
sys.exit(1)
if num_exp_spots < 0.0 or num_exp_spots > 1.0:
sys.stderr.write("Error, invalid number of expressed spots\n")
sys.exit(1)
if train_validation_ratio < 0.1 or train_validation_ratio > 0.9:
sys.stderr.write("Error, invalid train test ratio genes\n")
sys.exit(1)
if not torch.cuda.is_available() and use_cuda:
sys.stderr.write("Error, CUDA is not available in this computer\n")
sys.exit(1)
if not outdir or not os.path.isdir(outdir):
outdir = os.getcwd()
print("Output folder {}".format(outdir))
# To ensure reproducibility
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
if use_cuda:
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
print("Loading training dataset...")
train_data_frame = pd.read_csv(train_data, sep="\t", header=0,
index_col=0, engine='c', low_memory=True)
train_data_frame = remove_noise(train_data_frame, num_exp_genes,
num_exp_spots, min_gene_expression)
# Load all the classes for the training set
train_labels = parse_labels(train_classes_file, min_class_size)
print("Loading testing dataset...")
test_data_frame = pd.read_csv(test_data, sep="\t", header=0,
index_col=0, engine='c', low_memory=True)
test_data_frame = remove_noise(test_data_frame, num_exp_genes,
num_exp_spots, min_gene_expression)
# Load all the classes for the prediction set
if test_classes_file is not None:
test_labels = parse_labels(test_classes_file, 0)
# Normalize counts
print("Normalizing...")
train_data_frame = normalize_data(train_data_frame, normalization)
test_data_frame = normalize_data(test_data_frame, normalization)
# Keep top genes (variance or expressed)
train_data_frame = keep_top_genes(train_data_frame, num_genes_keep_train / 100.0,
criteria=top_genes_criteria_train)
test_data_frame = keep_top_genes(test_data_frame, num_genes_keep_test / 100.0,
criteria=top_genes_criteria_test)
# Keep only the record in the training set that intersects with the prediction set
print("Genes in training set {}".format(train_data_frame.shape[1]))
print("Spots in training set {}".format(train_data_frame.shape[0]))
print("Genes in testing set {}".format(test_data_frame.shape[1]))
print("Spots in testing set {}".format(test_data_frame.shape[0]))
intersect_genes = np.intersect1d(train_data_frame.columns.values,
test_data_frame.columns.values)
if len(intersect_genes) == 0:
sys.stderr.write("Error, there are no genes intersecting the train and test datasets\n")
sys.exit(1)
print("Intersected genes {}".format(len(intersect_genes)))
train_data_frame = train_data_frame.loc[:, intersect_genes]
test_data_frame = test_data_frame.loc[:, intersect_genes]
# Log the counts
if log_scale:
print("Transforming datasets to log space...")
train_data_frame = np.log1p(train_data_frame)
test_data_frame = np.log1p(test_data_frame)
# Apply the z-transformation
if standard_transformation:
print("Applying standard transformation...")
train_data_frame = ztransformation(train_data_frame)
test_data_frame = ztransformation(test_data_frame)
# Sort labels data together
shared_spots = np.intersect1d(train_data_frame.index, train_labels.index)
train_data_frame = train_data_frame.loc[shared_spots, :]
train_labels = | np.asarray(train_labels.loc[shared_spots, ["cluster"]]) | numpy.asarray |
import os
import shutil
import unittest
from types import SimpleNamespace
import numpy as np
import pandas as pd
import bilby
import bilby_pipe
import bilby_pipe.pp_test
class TestPP(unittest.TestCase):
def setUp(self):
self.outdir = "test_outdir"
self.args = SimpleNamespace(
directory=self.outdir,
outdir=None,
label=None,
n=None,
print=False,
filter=None,
)
os.mkdir(self.outdir)
def tearDown(self):
shutil.rmtree(self.outdir)
del self.outdir
def create_fake_results(self):
self.N_results = 3
self.results_filenames = []
self.priors = bilby.core.prior.PriorDict(
dict(
A=bilby.core.prior.Normal(0, 1, "A"),
B=bilby.core.prior.Normal(0, 1, "B"),
)
)
for i in range(self.N_results):
result = bilby.core.result.Result()
result.outdir = self.outdir
result.label = f"label_{i}"
result.search_parameter_keys = ["A", "B"]
result.priors = self.priors
result.posterior = pd.DataFrame(
dict(A=np.random.normal(0, 1, 100), B= | np.random.normal(0, 1, 100) | numpy.random.normal |
import cv2
import json
import numpy as np
import os
import time
import glob
from model import EfficientDetModel
from utils import preprocess_image, postprocess_boxes
from utils.draw_boxes import draw_boxes
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 0
weighted_bifpn = False
model_path = 'checkpoints/deepfashion.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
# coco classes
classes = {value['id'] - 1: value['name'] for value in json.load(open('deepfashion_13.json', 'r')).values()}
num_classes = 13
score_threshold = 0.3
colors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]
#_, model = efficientdet(phi=phi,
# weighted_bifpn=weighted_bifpn,
# num_classes=num_classes,
# score_threshold=score_threshold)
models = EfficientDetModel(0)
model = models.p_model
model.load_weights(model_path, by_name=True)
# 'datasets/VOC2007/JPEGImages/*.jpg'
for image_path in glob.glob('data/sample_val/image/000002.jpg'):
image = cv2.imread(image_path)
src_image = image.copy()
# BGR -> RGB
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale = preprocess_image(image, image_size=image_size)
# run network
start = time.time()
boxes, scores, labels, masks = model.predict_on_batch([np.expand_dims(image, axis=0)])
boxes, scores, labels = | np.squeeze(boxes) | numpy.squeeze |
import numpy as np
from sklearn.datasets import load_iris, load_digits
from sklearn.linear_model import Perceptron
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from os import path, mkdir
from itertools import product
SEED = 42
output_dir = r'graphs'
EPOCHS = 400
LEARNING_RATE = 0.00001
if not path.exists(output_dir):
mkdir(output_dir)
class Adaline:
def __init__(self, input_dim, lr, classes):
"""
Initializes the classifier's weights
:param input_dim: The dimension of the input
:param lr: learning rate for the algorithm
:param classes: classes\labels of the dataset
"""
self.w = [np.random.uniform(-1, 1, (input_dim + 1, 1)) / np.square(input_dim) for i in range(len(classes))]
self.lr = lr
self.classes = classes
@staticmethod
def concat_ones(x):
n = np.ones((x.shape[0], 1))
return np.hstack((x, n))
def print_training_log(self, verbose, train_x, train_y, val_x, val_y, epoch):
if verbose == 0:
score_train = self.score(train_x[:, :-1], train_y)
score_val = None if val_x is None else self.score(val_x[:, :-1], val_y)
print(f'Epoch {epoch}: acc - {score_train} val_acc - {score_val}')
return score_train, score_val
def fit(self, train_x, train_y, val_x=None, val_y=None, max_epochs=-1, target_acc=None, verbose=0):
"""
:param train_x: train set features
:param train_y: train set labels
:param val_x: validation set features
:param val_y: validation set labels
:param max_epochs: maximum number of epoches
:param target_acc: if max_epoch is not given, use this stopping criterion
:param verbose: 0 - print logs (e.g. losses and accuracy)
1 - don't print logs
"""
epoch = 1
mappers = [ | np.vectorize(lambda x, c=c: 1 if x == c else -1) | numpy.vectorize |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from augmixations.core import (
generate_rect_coordinates,
insert_image_in_background,
check_middle_part_overlap_critical,
correct_box_if_full_side_overlap,
correct_box_if_some_alnge_overlap,
correct_background_boxes,
)
@pytest.mark.parametrize('params',
[(500, 500, 0, 0, 500, 500, 100, 100, 300, 300),
(1000, 2000, 0, 0, 2000, 1000, 300, 300, 600, 600),
(500, 1000, 0, 0, 500, 500, None, None, None, None),
(500, 700, None, None, None, None, None, None, None, None),
(300, 300, None, None, None, None, 100, 100, 300, 300)])
def test_generate_rect_coordinates(params):
img_h, img_w, min_x, min_y, max_x, max_y, min_h, min_w, max_h, max_w = params
rect = generate_rect_coordinates(img_h, img_w, min_x, min_y, max_x, max_y, min_h, min_w, max_h, max_w)
x1, y1, x2, y2 = rect
assert x1 < x2 and y1 < y2
assert x1 >= 0 and y1 >= 0
assert x2 <= img_w and y2 <= img_h
@pytest.mark.parametrize('params',
[(np.ones((500, 500, 3), dtype=np.uint8)*255,
np.ones((300, 300, 3), dtype=np.uint8)*255,
100, 100, 0, 0, 500, 500),
(np.ones((500, 700, 3), dtype=np.uint8)*255,
np.ones((300, 200, 3), dtype=np.uint8)*255,
100, 100, None, None, None, None), ])
def test_insert_image_in_background(params):
bg_img, fg_img, start_x, start_y, min_x, min_y, max_x, max_y = params
out_img, (shift_x, shift_y) = insert_image_in_background(
bg_img, fg_img, start_x, start_y, min_x, min_y, max_x, max_y)
if max_x is not None:
assert start_x + shift_x <= max_x
if min_x is not None:
assert start_x + shift_x >= min_x
if max_y is not None:
assert start_y + shift_y <= max_y
if min_y is not None:
assert start_y + shift_y >= min_y
assert out_img.shape == bg_img.shape
@pytest.mark.parametrize('params', [
({'x1': 75, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 75, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 75, 'x2': 175, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 75, 'y1': 75, 'x2': 175, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
# Вариант изменения бокса,
# если перекрыта большая часть одной из сторон
({'x1': 75, 'y1': 25, 'x2': 225, 'y2': 100, 'area': 75*150, 'height': 75, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.9, 0.6, np.array([50, 100, 250, 250]), False),
({'x1': 75, 'y1': 200, 'x2': 225, 'y2': 300, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.9, 0.6, np.array([50, 50, 250, 200]), False),
({'x1': 25, 'y1': 75, 'x2': 100, 'y2': 225, 'area': 75*150, 'height': 150, 'width': 75},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.6, 0.9, np.array([100, 50, 250, 250]), False),
({'x1': 200, 'y1': 75, 'x2': 300, 'y2': 225, 'area': 100*150, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.6, 0.9, np.array([50, 50, 200, 250]), False),
# Вставленная картинка в центре бокса, но очень большая
({'x1': 55, 'y1': 55, 'x2': 245, 'y2': 245, 'area': 190*190, 'height': 190, 'width': 190},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.5, 0.9, 0.9, np.array([50, 50, 250, 250]), True),
# Вставленная картинка с полным вертикальным перекрытием по центру
({'x1': 55, 'y1': 0, 'x2': 245, 'y2': 300, 'area': 190*300, 'height': 300, 'width': 190},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.5, 0.9, 0.9, np.array([50, 50, 250, 250]), True),
# Вставленная картинка с полным горизонтальным перекрытием по центру
({'x1': 0, 'y1': 55, 'x2': 300, 'y2': 245, 'area': 190*300, 'height': 190, 'width': 300},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.5, 0.9, 0.9, np.array([50, 50, 250, 250]), True),
])
def test_check_middle_part_overlap_critical(params):
rect_info, box_info, max_overlap_area_ratio, max_h_overlap, max_w_overlap, true_box, true_overlap = params
new_box, critical_overlap = check_middle_part_overlap_critical(
rect_info,
box_info,
max_overlap_area_ratio,
max_h_overlap,
max_w_overlap,
debug=True,
label='Test box',)
assert critical_overlap == true_overlap
assert np.array_equal(true_box, new_box)
@pytest.mark.parametrize('params', [
# Перекрытие одной из сторон полностью
# коэф. перекрываемой прощади для всех вариантов - 0.75
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 175, 'area': 100*150, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([125, 50, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 175, 'area': 100*150, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([125, 50, 150, 150]), True),
({'x1': 25, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([50, 125, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([50, 125, 150, 150]), True),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([50, 50, 75, 150]), False),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([50, 50, 75, 150]), True),
({'x1': 25, 'y1': 75, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([50, 50, 150, 75]), False),
({'x1': 25, 'y1': 75, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([50, 50, 150, 75]), True),
])
def test_correct_box_if_full_side_overlap(params):
rect_info, box_info, max_overlap_area_ratio, true_box, real_overlap = params
new_box, critical_overlap = correct_box_if_full_side_overlap(
rect_info, box_info,
max_overlap_area_ratio,
debug=True,
label='Test box',)
assert real_overlap == critical_overlap
assert np.array_equal(true_box, new_box)
@pytest.mark.parametrize('params', [
# Тест перекрыт какой-либо из углов угол, проверка поочерёдно:
# 1. Обычное перекрытие проходящее по всем порогам
# 2. Уменьшение порога площади перекрытия, мин. коэф = 0.5625
# 3. Увеличение порога минимальной ширины, мин. коэф = 0.25
# 4. Увеличение порога минимальной высоты, мин. коэф = 0.25
# 5. Увеличение погоров вин ширины и высоты и уменьшения порога мин площади
# ЛЕВЫЙ ВЕРХНИЙ УГОЛ
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.7, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.5, np.array([50, 50, 150, 150]), True),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.9, 0.7, np.array([125, 50, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.7, 0.7, np.array([50, 125, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.7, 0.5, np.array([125, 125, 150, 150]), True),
# ПРАВЫЙ ВЕРХНИЙ УГОЛ
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.7, np.array([50, 50, 150, 150]), False),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.5, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.9, 0.7, np.array([50, 50, 75, 150]), False),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.7, 0.7, np.array([50, 125, 150, 150]), False),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.7, 0.5, np.array([50, 125, 75, 150]), True),
# ЛЕВЫЙ НИЖНИЙ УГОЛ
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.7, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.5, np.array([50, 50, 150, 150]), True),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.9, 0.7, np.array([50, 125, 150, 150]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.7, 0.7, np.array([50, 50, 150, 75]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.7, 0.5, np.array([50, 125, 150, 75]), True),
# ПРАВЫЙ НИЖНИЙ УГОЛ
({'x1': 100, 'y1': 100, 'x2': 200, 'y2': 200, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.5, np.array([50, 50, 150, 150]), False),
({'x1': 100, 'y1': 100, 'x2': 200, 'y2': 200, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.2, np.array([50, 50, 150, 150]), True),
({'x1': 100, 'y1': 100, 'x2': 200, 'y2': 200, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.7, np.array([50, 50, 100, 150]), False),
({'x1': 100, 'y1': 100, 'x2': 200, 'y2': 200, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.4, 0.7, np.array([50, 50, 150, 100]), False),
({'x1': 100, 'y1': 100, 'x2': 200, 'y2': 200, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.4, 0.2, np.array([50, 50, 100, 100]), True),
])
def test_correct_box_if_some_alnge_overlap(params):
rect_info, box_info, max_h_overlap, max_w_overlap, max_overlap_area, true_box, real_overlap = params
new_box, critical_overlap = correct_box_if_some_alnge_overlap(
rect_info,
box_info,
max_h_overlap,
max_w_overlap,
max_overlap_area,
debug=True,
label='Test box',)
assert real_overlap == critical_overlap
assert np.array_equal(true_box, new_box)
@pytest.mark.parametrize('params', [
# Нормальные прямоугольники с перекрытикем 0.25, проходящие по всем параметрам
(np.array([np.array([50, 50, 150, 150])]), np.array([1], dtype=object),
np.array([100, 100, 200, 200]), 0.5, 0.25, 0.25, 0.9, 0.9,
np.array([np.array([50, 50, 150, 150])]), np.array([1.0], dtype=object)),
(np.array([np.array([50, 150, 150, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.5, 0.25, 0.25, 0.9, 0.9,
np.array([np.array([50, 150, 150, 250])]), np.array(['1'], dtype=object)),
(np.array([np.array([150, 150, 250, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.5, 0.25, 0.25, 0.9, 0.9,
np.array([np.array([150, 150, 250, 250])]), np.array(['1'], dtype=object)),
(np.array([np.array([150, 50, 250, 150])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.5, 0.25, 0.25, 0.9, 0.9,
np.array([np.array([150, 50, 250, 150])]), np.array(['1'], dtype=object)),
# Изменяем коэф макс площади до 0.2
(np.array([np.array([50, 50, 150, 150])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.2, 0.25, 0.25, 0.9, 0.9,
np.array([]), np.array([])),
(np.array([np.array([50, 150, 150, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.2, 0.25, 0.25, 0.9, 0.9,
np.array([]), np.array([])),
(np.array([np.array([150, 150, 250, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.2, 0.25, 0.25, 0.9, 0.9,
np.array([]), np.array([])),
(np.array([np.array([150, 50, 250, 150])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.2, 0.25, 0.25, 0.9, 0.9,
np.array([]), np.array([])),
# Изменяем коэф мин высоты до 0.7 (перекрытие 0.5) при ее изменении
# с помощью уменьшения коэфта макс ширины до 0.2
(np.array([np.array([50, 50, 150, 150])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.8, 0.7, 0.25, 0.9, 0.2,
np.array([]), np.array([])),
(np.array([np.array([50, 150, 150, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.8, 0.7, 0.25, 0.9, 0.2,
np.array([]), np.array([])),
(np.array([np.array([150, 150, 250, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.8, 0.7, 0.25, 0.9, 0.2,
np.array([]), np.array([])),
(np.array([np.array([150, 50, 250, 150])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.8, 0.7, 0.25, 0.9, 0.2,
np.array([]), np.array([])),
# Изменяем коэф мин ширины до 0.7 (перекрытие 0.5) при ее изменении
# с помощью уменьшения коэфта макс ширины до 0.2
(np.array([np.array([50, 50, 150, 150])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.8, 0.25, 0.7, 0.2, 0.9,
np.array([]), np.array([])),
(np.array([np.array([50, 150, 150, 250])]), np.array(['1'], dtype=object),
np.array([100, 100, 200, 200]), 0.8, 0.25, 0.7, 0.2, 0.9,
np.array([]), | np.array([]) | numpy.array |
"""Infer population parameters along the red giant branch, in bins of LOGG"""
# Standard library
import os
from os import path
import sys
# Third-party
import numpy as np
from schwimmbad import choose_pool
# Project
from hq.log import logger
from hq.script_helpers import get_parser
from helpers import get_metadata, get_ez_samples, get_rg_mask, get_ms_mask
from model_ez import run_pixel
scripts_path = path.split(path.abspath(__file__))[0]
cache_path = path.abspath(path.join(scripts_path, '../cache/'))
plot_path = path.abspath(path.join(scripts_path, '../plots/'))
logg_step = 0.25
logg_binsize = 1.5 * logg_step
logg_bincenters = np.arange(0, 4+1e-3, logg_step)
teff_step = 300
teff_binsize = 1.5 * teff_step
teff_bincenters = | np.arange(3400, 7000+1e-3, teff_step) | numpy.arange |
from __future__ import absolute_import
import os
import sys
import numpy as np
import nibabel as nib
from spinalcordtoolbox.utils import __sct_dir__
sys.path.append(os.path.join(__sct_dir__, 'scripts'))
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.deepseg_lesion import core as deepseg_lesion
import sct_utils as sct
def test_model_file_exists():
for model_name in deepseg_lesion.MODEL_LST:
model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_lesion_models', '{}_lesion.h5'.format(model_name))
assert os.path.isfile(model_path)
def test_segment():
contrast_test = 't2'
model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_lesion_models', '{}_lesion.h5'.format(contrast_test))
# create fake data
data = | np.zeros((48,48,96)) | numpy.zeros |
import argparse
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint
class TrainFER():
def __init__(self, data_path='./data/fer2013.csv', model_path='./models/model.h5'):
self.data_path = data_path
self.model_path = model_path
self.num_features = 128
self.num_labels = 7
self.batch_size = 64
self.epochs = 100
self.width, self.height = 48, 48
self.data = pd.read_csv(self.data_path)
self.model = self.build_model()
self.X_train, self.X_val,self.X_test,\
self.y_train, self.y_val, self.y_test = self.load_data()
def load_data(self):
# get label
emotions = pd.get_dummies(self.data['emotion']).values
pixels = self.data['pixels'].tolist()
faces = []
for pixel_row in pixels:
face = [int(pixel) for pixel in pixel_row.split(' ')]
face = np.asarray(face).reshape(self.width, self.height)
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = | np.expand_dims(faces, -1) | numpy.expand_dims |
"""
Classes for dealing with data products.
"""
import os
import warnings
import cwinpy
import lal
import lalpulsar
import numpy as np
from astropy.io import registry as io_registry
from gwpy.detector import Channel
from gwpy.io.mp import read_multi
from gwpy.plot.colors import GW_OBSERVATORY_COLORS
from gwpy.segments import SegmentList
from gwpy.timeseries import TimeSeries, TimeSeriesBase
from gwpy.types import Series
from numba import jit
# import utility functions
from .utils import gcd_array, is_par_file, logfactorial
class MultiHeterodynedData(object):
"""
A class to contain time series' of heterodyned data, using the
:class:`~cwinpy.data.HeterodynedData` class, for multiple detectors/data
streams.
Parameters
----------
data: (str, array_like, dict, HeterodynedData)
The heterodyned data either as a string giving a file path, an array of
data, or a dictionary of file paths/data arrays, that are keyed on
valid detector names.
times: (array_like, dict)
If `data` is an array, or dictionary of arrays, then `times` must be
set giving the time stamps for the data values. If `times` is a
dictionary then it should be keyed on the same detector names as in
`data`.
detector: (str, lal.Detector)
If `data` is a file name or data array then `detector` must be given as
a string or :class:`lal.Detector`.
Notes
-----
See the :class:`~cwinpy.data.HeterodynedData` documentation for information
on additional keyword arguments.
"""
def __init__(
self,
data=None,
times=None,
detector=None,
window=30,
inject=False,
par=None,
injpar=None,
freqfactor=2.0,
bbthreshold="default",
remove_outliers=False,
thresh=3.5,
**kwargs,
):
# set keyword argument
self._heterodyned_data_kwargs = {}
self._heterodyned_data_kwargs["window"] = window
self._heterodyned_data_kwargs["par"] = par
self._heterodyned_data_kwargs["injpar"] = injpar
self._heterodyned_data_kwargs["inject"] = inject
self._heterodyned_data_kwargs["freqfactor"] = freqfactor
self._heterodyned_data_kwargs["bbthreshold"] = bbthreshold
self._heterodyned_data_kwargs["remove_outliers"] = remove_outliers
self._heterodyned_data_kwargs["thresh"] = thresh
self._data = dict() # initialise empty dict
self._currentidx = 0 # index for iterator
# add data
if data is not None:
self.add_data(data, times, detector=detector)
def add_data(self, data, times=None, detector=None):
"""
Add heterodyned data to the class.
Parameters
----------
data: (str, array_like, dict, HeterodynedData)
The heterodyned data either as a string giving a file path, an
array of data, a dictionary of file paths/data arrays that are
keyed on valid detector names, or a
:class:`~cwinpy.data.HeterodynedData` object.
times: (array_like, dict)
If `data` is an array, or dictionary of arrays, then `times` must
be set giving the time stamps for the data values. If `times` is
a dictionary then it should be keyed on the same detector names as
in `data`.
detector: (str, lal.Detector)
If `data` is a file name or data array then `detector` must be
given as a string or :class:`lal.Detector`.
"""
if isinstance(data, HeterodynedData):
if data.detector is None and detector is None:
raise ValueError("No detector is given!")
if data.detector is None and detector is not None:
data.detector = detector
self._add_HeterodynedData(data)
elif isinstance(data, dict):
for detkey in data:
if isinstance(data[detkey], HeterodynedData):
if data[detkey].detector is None:
data[detkey].detector = detkey
self._add_HeterodynedData(data[detkey])
else:
if isinstance(times, dict):
if detkey not in times:
raise KeyError(
"'times' does not contain the "
"detector: {}".format(detkey)
)
else:
dettimes = times[detkey]
else:
dettimes = times
self._add_data(data[detkey], detkey, dettimes)
else:
if isinstance(times, dict):
raise TypeError("'times' should not be a dictionary")
self._add_data(data, detector, times)
def _add_HeterodynedData(self, data):
detname = data.detector
if detname not in self._data:
self._data[detname] = [data] # add as a list
else:
# if data from that detector already exists then append to the list
self._data[detname].append(data)
def _add_data(self, data, detector, times=None):
if detector is None or data is None:
raise ValueError("data and detector must be set")
het = HeterodynedData(
data, times, detector=detector, **self._heterodyned_data_kwargs
)
self._add_HeterodynedData(het)
def __getitem__(self, det):
"""
Get the list of :class:`~cwinpy.data.HeterodynedData` objects keyed to
a given detector.
"""
if det in self.detectors:
return self._data[det]
else:
return None
def pop(self, det):
return self._data.pop(det)
@property
def to_list(self):
datalist = []
for key in self._data:
if isinstance(self._data[key], list):
datalist += self._data[key]
else:
datalist.append(self._data[key])
return datalist
@property
def detectors(self):
"""
Return the list of detectors contained in the object.
"""
return list(self._data.keys())
@property
def pars(self):
"""
Return the list of heterodyne source parameter files for each data set
contained in the object.
"""
return [het.par for het in self]
@property
def freq_factors(self):
"""
Return the this of heterodyne frequency scaling factors for each data
set contained in the object.
"""
return [het.freq_factor for het in self]
@property
def injection_snr(self):
"""
Get the coherent optimal signal-to-noise ratio of an injected signal in
all heterodyned data sets. See
:meth:`cwinpy.data.HeterodynedData.injection_snr`.
"""
snr2 = 0.0
for het in self:
if het.injpar is not None:
snr2 += het.injection_snr ** 2
return np.sqrt(snr2)
def signal_snr(self, signalpar):
"""
Get the coherent signal-to-noise ratio of a given signal. See
:meth:`cwinpy.data.HeterodynedData.signal_snr`.
"""
snr2 = 0.0
for het in self:
snr2 += het.signal_snr(signalpar) ** 2
return np.sqrt(snr2)
def __iter__(self):
self._currentidx = 0 # reset iterator index
return self
def __next__(self):
if self._currentidx >= len(self):
raise StopIteration
else:
self._currentidx += 1
return self.to_list[self._currentidx - 1]
def plot(
self,
det=None,
together=False,
which="abs",
figsize=(12, 4),
remove_outliers=False,
thresh=3.5,
zero_time=True,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
**plotkwargs,
):
"""
Plot all, or some of, the time series' contained in the class. The
general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.plot` and additional arguments are
given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
from matplotlib import pyplot as pl
if len(self) == 0:
# nothing in the class!
return None
# set which plots to output
ndet = 1
if det is not None:
if det not in self.detectors:
raise ValueError("Detector {} is not in the class".format(det))
# get the number of time series' for the requested detector
ndet = len(self[det])
nplots = 1
if together:
if ndet > 1:
nplots = ndet
hets = self[det]
else:
nplots = len(self)
hets = self # datasets to plot
# create the figure
if figsize[0] == 12 and figsize[1] == 4:
# check default size and increase
figsize = (figsize[0], figsize[1] * nplots)
figs, axs = pl.subplots(nplots, 1, figsize=figsize)
for ax, het in zip(axs, hets):
_ = het.plot(
which=which,
ax=ax,
remove_outliers=remove_outliers,
thresh=thresh,
zero_time=zero_time,
labelsize=labelsize,
fontsize=fontsize,
legendsize=legendsize,
fontname=fontname,
labelname=labelname,
**plotkwargs,
)
else:
# a list of figures
figs = []
if det is not None:
hets = self[det]
else:
hets = self
# loop over data and produce plots
for het in hets:
figs.append(
het.plot(
which=which,
figsize=figsize,
remove_outliers=remove_outliers,
thresh=thresh,
zero_time=zero_time,
labelsize=labelsize,
fontsize=fontsize,
legendsize=legendsize,
fontname=fontname,
labelname=labelname,
**plotkwargs,
)
)
return figs
def power_spectrum(
self,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
dt=None,
fraction_labels=None,
fraction_label_num=None,
average=None,
window=None,
overlap=None,
**plotkwargs,
):
"""
Plot all, or some of, the power spectra of the time series' contained
in the class. The general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.power_spectrum` and additional
arguments are given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
return self._plot_power(
"power",
det=det,
together=together,
figsize=figsize,
remove_outliers=remove_outliers,
thresh=thresh,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
dt=dt,
fraction_labels=fraction_labels,
fraction_label_num=fraction_label_num,
average=average,
window=window,
overlap=overlap,
**plotkwargs,
)
def periodogram(
self,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
fraction_labels=None,
fraction_label_num=None,
**plotkwargs,
):
"""
Plot all, or some of, the periodograms of the time series' contained
in the class. The general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.periodogram` and additional
arguments are given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
return self._plot_power(
"periodogram",
det=det,
together=together,
figsize=figsize,
remove_outliers=remove_outliers,
thresh=thresh,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
fraction_labels=fraction_labels,
fraction_label_num=fraction_label_num,
**plotkwargs,
)
def spectrogram(
self,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
fraction_labels=None,
fraction_label_num=None,
dt=None,
overlap=None,
window=None,
**plotkwargs,
):
"""
Plot all, or some of, the spectograms of the time series' contained
in the class. The general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.spectrogram` and additional
arguments are given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
return self._plot_power(
"spectrogram",
det=det,
together=together,
figsize=figsize,
window=window,
remove_outliers=remove_outliers,
thresh=thresh,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
dt=dt,
fraction_labels=fraction_labels,
fraction_label_num=fraction_label_num,
overlap=overlap,
**plotkwargs,
)
def _plot_power(
self,
plottype,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
dt=None,
average=None,
overlap=None,
window=None,
fraction_labels=None,
fraction_label_num=None,
**plotkwargs,
):
"""
General purpose function for plotting the various spectrum figures.
Parameters
----------
plottype: str
The "spectrum" plots that are required: 'power_spectrum',
'periodogram', or 'spectrogram'
"""
from matplotlib import pyplot as pl
if plottype.lower() not in ["spectrogram", "periodogram", "power"]:
raise ValueError("Spectrum plot type is not known")
if len(self) == 0:
# nothing in the class!
return None
# set which plots to output
ndet = 1
if det is not None:
if det not in self.detectors:
raise ValueError("Detector {} is not in the class".format(det))
# get the number of time series' for the requested detector
ndet = len(self[det])
# set keyword arguments
speckwargs = {}
for key, value in zip(
[
"thresh",
"remove_outliers",
"labelsize",
"labelname",
"fontsize",
"fontname",
"legendsize",
"fraction_labels",
"fraction_label_num",
"figsize",
],
[
thresh,
remove_outliers,
labelsize,
labelname,
fontsize,
fontname,
legendsize,
fraction_labels,
fraction_label_num,
figsize,
],
):
if value is not None:
speckwargs[key] = value
if plottype.lower() == "power" and average is not None:
speckwargs["average"] = average
if plottype.lower() in ["spectrogram", "power"]:
if overlap is not None:
speckwargs["overlap"] = overlap
if window is not None:
speckwargs["window"] = window
if dt is not None:
speckwargs["dt"] = dt
nplots = 1
if together:
if ndet > 1:
nplots = ndet
hets = self[det]
else:
nplots = len(self)
hets = self # datasets to plot
# create the figure
if figsize is None:
# create default size
if plottype.lower() == "spectrogram":
figsize = (12, 4 * nplots)
else:
figsize = (6, 5 * nplots)
figs, axs = pl.subplots(nplots, 1, figsize=figsize)
for ax, het in zip(axs, hets):
if plottype.lower() == "periodogram":
plfunc = het.periodogram
elif plottype.lower() == "power":
plfunc = het.power_spectrum
else:
plfunc = het.spectrogram
_ = plfunc(**speckwargs, ax=ax, **plotkwargs)
figs.tight_layout()
else:
# a list of figures
figs = []
if det is not None:
hets = self[det]
else:
hets = self
# loop over data and produce plots
for het in hets:
if plottype.lower() == "periodogram":
plfunc = het.periodogram
figidx = 2
elif plottype.lower() == "power":
plfunc = het.power_spectrum
figidx = 2
else:
plfunc = het.spectrogram
figidx = 3
figs.append(plfunc(**speckwargs, **plotkwargs)[figidx])
return figs
def __len__(self):
length = 0
for key in self._data:
if isinstance(self._data[key], list):
length += len(self._data[key])
else:
length += 1
return length
class HeterodynedData(TimeSeriesBase):
"""
A class to contain a time series of heterodyned data.
Some examples of input `data` are:
1. The path to a file containing (gzipped) ascii text with the
following three columns::
# GPS time stamps real strain imaginary strain
1000000000.0 2.3852e-25 3.4652e-26
1000000060.0 -1.2963e-26 9.7423e-25
1000000120.0 5.4852e-25 -1.8964e-25
...
or four columns::
# GPS time stamps real strain imaginary strain std. dev.
1000000000.0 2.3852e-25 3.4652e-26 1.0e-25
1000000060.0 -1.2963e-26 9.7423e-25 1.0e-25
1000000120.0 5.4852e-25 -1.8964e-25 1.0e-25
...
where any row that starts with a ``#`` or a ``%`` is considered a comment.
2. A 1-dimensional array of complex data, and accompanying array of `time`
values, e.g.,
>>> import numpy as np
>>> N = 100 # the data length
>>> data = np.random.randn(N) + 1j*np.random.randn(N)
>>> times = np.linspace(1000000000., 1000005940., N)
or, a 2-dimensional array with the real and complex values held in separate
columns, e.g.,
>>> import numpy as np
>>> N = 100 # the data length
>>> data = np.random.randn(N, 2)
>>> times = np.linspace(1000000000., 1000005940., N)
or, a 2-dimensional array with the real and complex values held in separate
columns, *and* a third column holding the standard deviation for each
entry, e.g.,
>>> import numpy as np
>>> N = 100 # the data length
>>> stds = np.ones(N) # standard deviations
>>> data = np.array([stds*np.random.randn(N),
>>> ... stds*np.random.randn(N), stds]).T
>>> times = np.linspace(1000000000., 1000005940., N)
Parameters
----------
data: (str, array_like)
A file (plain ascii text, gzipped ascii text, or HDF5 file) containing
a time series of heterodyned data, or an array containing the complex
heterodyned data.
times: array_like
If the data was passed using the `data` argument, then the associated
time stamps should be passed using this argument.
par: (str, lalpulsar.PulsarParametersPy)
A parameter file, or :class:`lalpulsar.PulsarParametersPy` object
containing the parameters with which the data was heterodyned.
detector: (str, lal.Detector)
A string, or lal.Detector object, identifying the detector from which
the data was generated.
window: int, 30
The length of a window used for calculating a running median over the
data. If set to zero the running median will just be initialised with
zero values.
inject: bool, False
Set to ``True`` to add a simulated signal to the data based on the
parameters supplied in `injpar`, or `par` if `injpar` is not given.
injpar: (str, lalpulsar.PulsarParametersPy)
A parameter file name or :class:`lalpulsar.PulsarParametersPy`
object containing values for the injected signal. A `par` file must
also have been provided, and the injected signal will assume that
the data has already been heterodyned using the parameters from
`par`, which could be different.
injtimes: list, None
A list containing pairs of times between which to add the simulated
signal. By default the signal will be added into the whole data set.
freqfactor: float, 2.0
The frequency scale factor for the data signal, e.g., a value of two
for emission from the l=m=2 mode at twice the rotation frequency of the
source.
fakeasd: (float, str)
A amplitude spectral density value (in 1/sqrt(Hz)) at which to
generate simulated Gaussian noise to add to the data. Alternatively, if
a string is passed, and that string represents a known detector, then
the amplitude spectral density for that detector at design sensitivity
will be used (this requires a `par` value to be included, which
contains the source rotation frequency).
fakeseed: (int, class:`numpy.random.RandomState`), None
A seed for the random number generator used to create the fake data
(see :meth:`numpy.random.seed` and :class:`numpy.random.RandomState`
for more information).
issigma: bool
Set to ``True`` if the ``fakeasd`` value passed is actually a noise
standard deviation value rather than an amplitude spectral density.
bbthreshold: (str, float), "default"
The threshold method, or value for the
:meth:`~cwinpy.data.HeterodynedData.bayesian_blocks` function.
bbminlength: int, 5
The minimum length (in numbers of data points) of a chunk that the data
can be split into by the
:meth:`~cwinpy.data.HeterodynedData.bayesian_blocks` function. To
perform no splitting of the data set this value to be larger than the
total data length, e.g., ``inf``.
bbmaxlength: int, inf
The maximum length (in numbers of data points) of a chunk that the data
can be split into by the
:meth:`~cwinpy.data.HeterodynedData.bayesian_blocks` function. By
default this is ``inf``, i.e., chunks can be as long as possible.
remove_outliers: bool, False
If ``True`` outliers will be found (using
:meth:`~cwinpy.data.HeterodynedData.find_outliers`) and removed from the
data. They will not be stored anywhere in the class.
thresh: float, 3.5
The modified z-score threshold for outlier removal (see
:meth:`~cwinpy.data.HeterodynedData.find_outliers`)
comments: str
A string containing any comments about the data.
ephemearth: str, None
The path to the Earth ephemeris used for the signal phase model.
ephemsun: str, None
The path to the Sun ephemeris used for the signal phase model.
"""
# set some default detector color maps for plotting
colmapdic = {"H1": "Reds", "L1": "Blues", "V1": "PuRd", "G1": "Greys"}
# set some default plotting values
PLOTTING_DEFAULTS = {
"labelsize": 14, # font size for axes tick labels
"fontsize": 16, # font size for axes labels
"fontname": "Gentium", # font name for axes labels
"labelname": "Carlito", # font names for axes tick labels
}
_metadata_slots = Series._metadata_slots + (
"dt",
"comments",
"par",
"injpar",
"window",
"laldetector",
"vars",
"bbthreshold",
"bbminlength",
"bbmaxlength",
"outlier_thresh",
"injtimes",
"freq_factor",
"filter_history",
"running_median",
"inj_data",
"input_stds",
"outlier_mask",
"include_ssb",
"include_bsb",
"include_glitch",
"include_fitwaves",
"cwinpy_version",
)
def __new__(
cls,
data=None,
times=None,
par=None,
detector=None,
window=30,
inject=False,
injpar=None,
injtimes=None,
freqfactor=2.0,
fakeasd=None,
fakeseed=None,
issigma=False,
bbthreshold="default",
bbminlength=5,
bbmaxlength=np.inf,
remove_outliers=False,
thresh=3.5,
comments="",
ephemearth=None,
ephemsun=None,
**kwargs,
):
stds = None # initialise standard deviations
# read/parse data
if isinstance(data, str):
try:
new = cls.read(data)
except Exception as e:
raise IOError("Error reading file '{}':\n{}".format(data, e))
if new.detector is None:
new.detector = detector
else:
if isinstance(data, (TimeSeriesBase, HeterodynedData)):
dataarray = data.value
hettimes = data.times
if detector is None:
detector = data.detector
if type(data) is HeterodynedData:
if data.stds is not None:
stds = data.stds
else:
# use data
hettimes = times
if hettimes is None and data is None:
raise ValueError("Time stamps and/or data must be supplied")
elif data is not None:
dataarray = np.atleast_2d(np.asarray(data))
if dataarray.shape[0] == 1:
dataarray = dataarray.T
else:
# set data to zeros
dataarray = np.zeros((len(hettimes), 1), dtype=np.complex)
if (
dataarray.shape[1] == 1
and dataarray.dtype == np.complex
and hettimes is not None
):
dataarray = dataarray.flatten()
elif dataarray.shape[1] == 2 and hettimes is not None:
# real and imaginary components are separate
dataarray = dataarray[:, 0] + 1j * dataarray[:, 1]
elif dataarray.shape[1] == 3:
if hettimes is None:
# first column of array should be times
hettimes = dataarray[:, 0]
dataarray = dataarray[:, 1] + 1j * dataarray[:, 2]
else:
# third column can be standard deviations
stds = dataarray[:, 2]
dataarray = dataarray[:, 0] + 1j * dataarray[:, 1]
elif dataarray.shape[1] == 4:
if hettimes is None:
# first column of array should be times
hettimes = dataarray[:, 0]
stds = dataarray[:, 3]
dataarray = dataarray[:, 1] + 1j * dataarray[:, 2]
else:
raise ValueError("Supplied data array is the wrong shape")
else:
raise ValueError("Supplied data array is the wrong shape")
if len(hettimes) != dataarray.shape[0]:
raise ValueError("Supplied times is not that same length as the data")
if hettimes is not None and times is not None:
if not np.array_equal(hettimes, times):
raise ValueError(
"Supplied times and times in data file are not the same"
)
# generate TimeSeriesBase
new = super(HeterodynedData, cls).__new__(cls, dataarray, times=hettimes)
new.stds = None
if stds is not None:
# set pre-calculated data standard deviations
new.stds = stds
new._input_stds = True
else:
new._input_stds = False
new.detector = detector
new.window = window # set the window size
# remove outliers
new.outlier_mask = None
if remove_outliers:
new.remove_outliers(thresh=thresh)
# set the (minimum) time step and sampling frequency
try:
_ = new.dt
except AttributeError:
# times do not get set in a TimeSeries if steps are irregular, so
# manually set the time step to the minimum time difference
if len(new) > 1:
new.dt = np.min(np.diff(new.times))
else:
warnings.warn("Your data is only one data point long!")
new.dt = None
# don't recompute values on data that has been read in
if not isinstance(data, str) or remove_outliers:
# initialise the running median
_ = new.compute_running_median(N=new.window)
# calculate change points (and variances)
new.bayesian_blocks(
threshold=bbthreshold, minlength=bbminlength, maxlength=bbmaxlength
)
# set the parameter file
if par is not None:
# overwrite existing par file
new.par = par
else:
if not hasattr(new, "par"):
new.par = None
# set the frequency scale factor
new.freq_factor = freqfactor
# add noise, or create data containing noise
if fakeasd is not None:
new.add_noise(fakeasd, issigma=issigma, seed=fakeseed)
# set solar system ephemeris files if provided
new.set_ephemeris(ephemearth, ephemsun)
# set and add a simulated signal
new.injection = bool(inject)
if new.injection:
# inject the signal
if injpar is None:
new.inject_signal(injtimes=injtimes)
else:
new.inject_signal(injpar=injpar, injtimes=injtimes)
# add/update comments if given
if comments is not None:
if len(comments) > 0:
new.comments = comments
# add CWInPy version used for creation of data if not present
if not hasattr(new, "cwinpy_version"):
new.cwinpy_version = cwinpy.__version__
return new
@classmethod
def read(cls, source, *args, **kwargs):
"""
Read in a time series of data from a given file. Currently this only
supports ascii text files as described for the
:class:`~cwinpy.data.HeterodynedData` class.
See :meth:`gwpy.timeseries.TimeSeries.read` for more information.
"""
return read_multi(lambda x: x[0], cls, source, *args, **kwargs)
def write(self, target, *args, **kwargs):
"""
Write this :class:`~cwinpy.data.HeterodynedData` object to a file.
"""
if self._input_stds:
kwargs["includestds"] = True
return io_registry.write(self, target, *args, **kwargs)
@property
def dt(self):
try:
return self.dx
except AttributeError:
return self._dt
@dt.setter
def dt(self, dt):
"""
Overload the default setting of the time step in a TimeSeries, so that
it does not delete non-uniform time values.
"""
self._dt = dt
@property
def window(self):
"""The running median window length."""
return self._window
@window.setter
def window(self, window):
if isinstance(window, int):
if window < 2 and window != 0:
raise ValueError("Window length must be greater than 2")
else:
self._window = window
else:
raise TypeError("Window must be an integer")
@property
def dt(self):
try:
return self.dx
except AttributeError:
return self._dt
@dt.setter
def dt(self, dt):
"""
Overload the default setting of the time step in a TimeSeries, so that
it does not delete non-uniform time values.
"""
self._dt = dt
@property
def comments(self):
"""Any comments on the data"""
return self._comments
@comments.setter
def comments(self, comment):
if comment is None:
self._comments = None
elif isinstance(comment, str):
self._comments = comment
else:
raise TypeError("Data comment should be a string")
@property
def data(self):
"""
A :class:`numpy.ndarray` containing the heterodyned data.
"""
if self.outlier_mask is not None:
return self.value[self.outlier_mask]
else:
return self.value
@property
def times(self):
if self.outlier_mask is not None:
return super(HeterodynedData, self).times[self.outlier_mask]
else:
return super(HeterodynedData, self).times
@property
def tottime(self):
"""
The total time (in seconds) of the data.
"""
return self.times[-1] - self.times[0]
@property
def par(self):
return self._par
@par.setter
def par(self, par):
self._par = self._parse_par(par)
@property
def injpar(self):
return self._injpar
@injpar.setter
def injpar(self, par):
self._injpar = self._parse_par(par)
def _parse_par(self, par):
"""
Parse a pulsar parameter file or :class:`lalpulsar.PulsarParametersPy`
object.
Parameters
----------
par: (str, lalpulsar.PulsarParametersPy)
A file or object containing a set of pulsar parameters.
Returns
-------
lalpulsar.PulsarParametersPy
"""
if par is not None:
from lalpulsar.PulsarParametersWrapper import PulsarParametersPy
if isinstance(par, PulsarParametersPy):
return par
elif isinstance(par, str):
if is_par_file(par):
newpar = PulsarParametersPy(par)
else:
raise IOError("Could not read in pulsar parameter file")
else:
raise TypeError("'par' is not a recognised type")
else:
newpar = None
return newpar
@property
def detector(self):
"""The name of the detector from which the data came."""
try:
return self.channel.ifo
except AttributeError:
return None
@property
def laldetector(self):
"""
The :class:`lal.Detector` containing the detector's response and
location.
"""
try:
return self._laldetector
except AttributeError:
return None
@detector.setter
def detector(self, detector):
if isinstance(detector, lal.Detector):
self.channel = Channel("{}:".format(detector.frDetector.prefix))
self._laldetector = detector
elif isinstance(detector, str):
self.channel = Channel("{}:".format(detector))
try:
self._laldetector = lalpulsar.GetSiteInfo(detector)
except RuntimeError:
raise ValueError("Could not set LAL detector!")
@property
def running_median(self):
"""A :class:`~numpy.ndarray` containing the running median of the data."""
return self._running_median
def compute_running_median(self, N=30):
"""
Calculate a running median from the data with the real and imaginary
parts separately. The running median will be calculated using a window
of samples of a given number. This does not account for any gaps in the
data, so could contain discontinuities.
Parameters
----------
N: int, 30
The window length of the running median. Defaults to 30 points. If
set to 0 the running median will be initialised as an array of
zeros.
Returns
-------
array_like
A :class:`numpy.ndarray` array containing the data with the
running median subtracted.
"""
if N < 2 and N != 0:
raise ValueError("The running median window must be greater than 1")
self._running_median = TimeSeriesBase(
np.zeros(len(self), dtype=np.complex), times=self.times
)
if N > 0:
for i in range(len(self)):
if i < N // 2:
startidx = 0
endidx = i + (N // 2) + 1
elif i > len(self) - N:
startidx = i - (N // 2) + 1
endidx = len(self)
else:
startidx = i - (N // 2) + 1
endidx = i + (N // 2) + 1
self._running_median[i] = np.median(
self.data.real[startidx:endidx]
) + 1j * np.median(self.data.imag[startidx:endidx])
return self.running_median
def subtract_running_median(self):
"""
Subtract the running median from the data.
Returns
-------
array_like
A :class:`~numpy.ndarray` array containing the data with with
running median subtracted.
"""
return self.data - self.running_median.value
@property
def vars(self):
"""
The variances of the data points.
"""
try:
if self.outlier_mask is None:
return self._vars
else:
return self._vars[self.outlier_mask]
except (AttributeError, TypeError):
return None
@vars.setter
def vars(self, vars):
if vars is not None:
if isinstance(vars, float):
if vars <= 0.0:
raise ValueError("Variance cannot be negative")
tmpmsk = None
if self.outlier_mask is not None:
tmpmsk = np.copy(self.outlier_mask)
self._vars = vars * np.ones(len(self))
if tmpmsk is not None:
self.outlier_mask = tmpmsk # reset mask
else:
if len(vars) != len(self):
raise ValueError("Supplied variances are wrong length")
if self.outlier_mask is None:
self._vars = np.asarray(vars)
else:
tmpmsk = np.copy(self.outlier_mask)
self.outlier_mask = None
self._vars = np.zeros(len(self))
self._vars[tmpmsk] = vars
self.outlier_mask = tmpmsk # reset mask
else:
self._vars = None
@property
def stds(self):
"""
The standard deviations of the data points.
"""
try:
if self._vars is None:
return None
else:
return np.sqrt(self._vars)
except AttributeError:
return None
@stds.setter
def stds(self, stds):
if stds is not None:
self.vars = stds ** 2
else:
self.vars = None
def compute_variance(self, change_points=None, N=30):
"""
Compute the (sample) variance of the data within a set of change
points. The variance will be calculated after subtraction of a running
median. As the data is complex, we calculate the variance of a vector
in which the real and imaginary components are concatenated. This is
equivalent to a two-sided power spectral density.
Parameters
----------
change_points: array_like, None
An array of indices of statistical change points within the data
N: int, 30
The window size (in terms of data point number) of the running
median.
Returns
-------
array_like
A :class:`numpy.ndarray` of variances for each data point.
"""
if self.vars is not None:
if len(self.vars) == len(self):
return self.vars
# subtract running median from the data
datasub = self.subtract_running_median()
if change_points is None and len(self._change_point_indices_and_ratios) == 0:
# return the (sample) variance (hence 'ddof=1')
self.vars = np.full(
len(self), np.hstack((datasub.real, datasub.imag)).var(ddof=1)
)
else:
tmpvars = np.zeros(len(self))
if change_points is not None:
cps = np.concatenate(
([0], np.asarray(change_points), [len(datasub)])
).astype("int")
else:
if len(self.change_point_indices) == 1:
cps = np.array([0, len(datasub)], dtype=np.int)
else:
cps = np.concatenate(
(self.change_point_indices, [len(datasub)])
).astype("int")
if self.stds is None:
self.stds = np.zeros(len(self))
for i in range(len(cps) - 1):
if cps[i + 1] < 1 or cps[i + 1] > len(datasub):
raise ValueError("Change point index is out of bounds")
if cps[i + 1] <= cps[i]:
raise ValueError("Change point order is wrong")
datachunk = datasub[cps[i] : cps[i + 1]]
# get (sample) variance of chunk
tmpvars[cps[i] : cps[i + 1]] = np.hstack(
(datachunk.real, datachunk.imag)
).var(ddof=1)
self.vars = tmpvars
return self.vars
def inject_signal(self, injpar=None, injtimes=None):
"""
Inject a simulated signal into the data.
Parameters
----------
injpar: (str, lalpulsar.PulsarParametersPy)
A parameter file or object containing the parameters for the
simulated signal.
injtimes: list
A list of pairs of time values between which to inject the signal.
"""
# create the signal to inject
if injpar is None:
self.injpar = self.par
signal = self.make_signal()
else:
self.injpar = injpar
signal = self.make_signal(signalpar=self.injpar)
# set the times between which the injection will be added
self.injtimes = injtimes
# initialise the injection to zero
inj_data = TimeSeriesBase(
np.zeros_like(self.data), times=self.times, channel=self.channel
)
for timerange in self.injtimes:
timeidxs = np.arange(len(self))[
(self.times.value >= timerange[0]) & (self.times.value <= timerange[1])
]
inj_data[timeidxs] = signal[timeidxs]
# add injection to data
self += inj_data
# save injection data
self._inj_data = inj_data
# (re)compute the running median
_ = self.compute_running_median(N=self.window)
@property
def injtimes(self):
"""
A list of times at which an injection was added to the data.
"""
return self._injtimes
@injtimes.setter
def injtimes(self, injtimes):
if injtimes is None:
# include all time
timelist = np.array([[self.times[0].value, self.times[-1].value]])
else:
timelist = injtimes
try:
timelist = np.atleast_2d(timelist)
except Exception as e:
raise ValueError("Could not parse list of injection times: {}".format(e))
for timerange in timelist:
if timerange[0] >= timerange[1]:
raise ValueError("Injection time ranges are incorrect")
self._injtimes = timelist
def set_ephemeris(self, earth=None, sun=None):
"""
Set the solar system ephemeris and time correction files.
Parameters
----------
earth: str, None
The Earth ephemeris file used for the phase model. Defaults to
None, in which case the ephemeris files will be determined from the
pulsar parameter file information.
sun: str, None
The Sun ephemeris file used for the phase model. Defaults to
None, in which case the ephemeris files will be determined from the
pulsar parameter file information.
"""
efiles = [earth, sun]
for ef in efiles:
if ef is None:
continue
if isinstance(ef, str):
if not os.path.isfile(ef):
raise IOError("Ephemeris file '{}' does not exist".format(ef))
else:
raise TypeError("Ephemeris file is not a string")
self.ephemearth = efiles[0]
self.ephemsun = efiles[1]
@property
def injection_data(self):
"""
The pure simulated signal that was added to the data.
"""
return self._inj_data.value
@property
def injection_snr(self):
"""
Return the optimal signal-to-noise ratio using the pure injected signal
and true noise calculated using:
.. math::
\\rho = \\sqrt{\\sum_i \\left(\\left[\\frac{\\Re{(s_i)}}{\\sigma_i}\\right]^2 +
\\left[\\frac{\\Im{(s_i)}}{\\sigma_i}\\right]^2\\right)}
where and :math:`s` is the pure signal and :math:`\\sigma` is the
estimated noise standard deviation.
"""
if not self.injection:
return None
return np.sqrt(
((self.injection_data.real / self.stds) ** 2).sum()
+ ((self.injection_data.imag / self.stds) ** 2).sum()
)
def make_signal(self, signalpar=None):
"""
Make a signal at the data time stamps given a parameter file.
Note that the antenna response applied to the signal will be that after
averaging over the data time step (e.g., if a time step of 30 minutes
is used then the antenna response will be the average of +/- 15 minutes
around the timestamp). However, it assumes other variations are slower,
so it does not average out any differences in the phase evolution
between the heterodyne parameters and any injected parameters (if
specified as different) and just produced a point estimate at the data
timestamp.
Parameters
----------
signalpar: (str, lalpulsar.PulsarParametersPy)
A parameter file or object containing the parameters for the
simulated signal.
Returns
-------
array_like
A complex :class:`numpy.ndarray` containing the signal.
"""
if self.par is None:
raise ValueError(
"To perform an injection a parameter file must be supplied"
)
if self.detector is None:
raise ValueError("To perform an injection a detector must be supplied")
from lalpulsar.simulateHeterodynedCW import HeterodynedCWSimulator
# initialise the injection
het = HeterodynedCWSimulator(
self.par,
self.detector,
times=self.times,
earth_ephem=self.ephemearth,
sun_ephem=self.ephemsun,
)
# get the injection
if signalpar is None:
# use self.par for the injection parameters
signal = het.model(usephase=True, freqfactor=self.freq_factor)
else:
signal = het.model(
signalpar,
updateSSB=True,
updateBSB=True,
updateglphase=True,
updatefitwaves=True,
usephase=True,
freqfactor=self.freq_factor,
)
return TimeSeriesBase(signal, times=self.times, channel=self.channel)
def signal_snr(self, signalpar):
"""
Get the signal-to-noise ratio of a signal based on the supplied
parameter file.
Parameters
----------
signalpar: (str, lalpulsar.PulsarParametersPy)
A parameter file or object containing the parameters for the
simulated signal.
Returns
-------
float:
The signal-to-noise ratio.
"""
# generate the signal
signal = self.make_signal(signalpar=signalpar)
# get signal-to-noise ratio based on estimated data standard deviation
return np.sqrt(
((signal.real / self.stds) ** 2).sum()
+ ((signal.imag / self.stds) ** 2).sum()
).value
@property
def freq_factor(self):
"""
The scale factor of the source rotation frequency with which the data
was heterodyned.
"""
return self._freq_factor
@freq_factor.setter
def freq_factor(self, freqfactor):
if not isinstance(freqfactor, (float, int)):
raise TypeError("Frequency scale factor must be a number")
if freqfactor <= 0.0:
raise ValueError("Frequency scale factor must be a positive number")
self._freq_factor = float(freqfactor)
def add_noise(self, asd, issigma=False, seed=None):
"""
Add white Gaussian noise to the data based on a supplied one-sided
noise amplitude spectral density (in 1/sqrt(Hz)).
If generating noise from a given detector's design curve, a frequency
is required, which itself requires a pulsar parameter file to have been
supplied.
Parameters
----------
asd: (float, str)
The noise amplitude spectral density (1/sqrt(Hz)) at which to
generate the Gaussian noise, or a string containing a valid
detector name for which the design sensitivity ASD can be used, or
a file containing an amplitude spectral density frequency series.
issigma: bool, False
If `issigma` is ``True`` then the value passed to `asd` is assumed
to be a dimensionless time domain standard deviation for the noise
level rather than an amplitude spectral density.
seed: (int, :class:`numpy.random.RandomState`), None
A seed for the random number generator used to create the fake data
(see :meth:`numpy.random.seed` and :class:`numpy.random.RandomState`
for more information).
"""
if isinstance(asd, str):
import lalsimulation as lalsim
if self.par is None:
raise AttributeError(
"A source parameter file containing a frequency is required"
)
# check a frequency is available
freqs = self.par["F"]
if freqs is None:
raise ValueError(
"Heterodyne parameter file contains no " "frequency value"
)
# check if the str is a file or not
if os.path.isfile(asd):
# frequency series to contain the PSD
psdfs = lal.CreateREAL8FrequencySeries(
"",
lal.LIGOTimeGPS(1000000000), # dummy epoch
self.freq_factor * freqs[0], # frequency to find
0.1, # dummy delta f
lal.HertzUnit,
2, # need two points as last element is set to zero
)
# read PSD from ASD file
try:
_ = lalsim.SimNoisePSDFromFile(psdfs, psdfs.f0, asd)
except Exception as e:
raise RuntimeError("Problem getting ASD from file: {}".format(e))
# convert to ASD
asdval = np.sqrt(psdfs.data.data[0])
else:
# check is str is a detector alias
aliases = {
"AV": ["VIRGO", "V1", "ADV", "ADVANCEDVIRGO", "AV"],
"AL": [
"H1",
"L1",
"LHO",
"LLO",
"ALIGO",
"ADVANCEDLIGO",
"AL",
"AH1",
"AL1",
],
"IL": ["IH1", "IL1", "INITIALLIGO", "IL"],
"IV": ["iV1", "INITIALVIRGO", "IV"],
"G1": ["G1", "GEO", "GEOHF"],
"IG": ["IG", "GEO600", "INITIALGEO"],
"T1": ["T1", "TAMA", "TAMA300"],
"K1": ["K1", "KAGRA", "LCGT"],
}
# set mapping of detector names to lalsimulation PSD functions
simmap = {
"AV": lalsim.SimNoisePSDAdvVirgo, # advanced Virgo
"AL": PSDwrapper(
lalsim.SimNoisePSDaLIGOaLIGODesignSensitivityT1800044
), # aLIGO
"IL": lalsim.SimNoisePSDiLIGOSRD, # iLIGO
"IV": lalsim.SimNoisePSDVirgo, # iVirgo
"IG": lalsim.SimNoisePSDGEO, # GEO600
"G1": lalsim.SimNoisePSDGEOHF, # GEOHF
"T1": lalsim.SimNoisePSDTAMA, # TAMA
"K1": lalsim.SimNoisePSDKAGRA, # KAGRA
}
# set detector if not already set
if self.channel is None:
namemap = {
"H1": ["H1", "LHO", "IH1", "AH1"],
"L1": ["L1", "LLO", "IL1", "AL1"],
"V1": [
"V1",
"VIRGO",
"ADV",
"ADVANCEDVIRGO",
"AV",
"IV1",
"INITIALVIRGO",
"IV",
],
"G1": ["G1", "GEO", "GEOHF", "IG", "GEO600", "INITIALGEO"],
"T1": ["T1", "TAMA", "TAMA300"],
"K1": ["K1", "KAGRA", "LCGT"],
}
nameval = None
for dkey in namemap:
if asd.upper() in namemap[dkey]:
nameval = dkey
self.channel = Channel("{}:".format(dkey))
break
if nameval is None:
raise ValueError(
"Detector '{}' is not a known detector alias".format(asd)
)
# check if string is valid
detalias = None
for dkey in aliases:
if asd.upper() in aliases[dkey]:
detalias = dkey
break
if detalias is None:
raise ValueError(
"Detector '{}' is not as known detector alias".format(asd)
)
freqs = self.par["F"]
if freqs is None:
raise ValueError(
"Heterodyne parameter file contains no frequency value"
)
# set amplitude spectral density value
asdval = np.sqrt(simmap[detalias](self.freq_factor * freqs[0]))
# convert to time domain standard deviation
if self.dt is None:
raise ValueError(
"No time step present. Does your data only consist of one value?"
)
sigmaval = 0.5 * asdval / np.sqrt(self.dt.value)
elif isinstance(asd, float):
if issigma:
sigmaval = asd
else:
if self.dt is None:
raise ValueError(
"No time step present. Does your data "
"only consist of one value?"
)
sigmaval = 0.5 * asd / np.sqrt(self.dt.value)
else:
raise TypeError("ASD must be a float or a string with a detector name.")
# set noise seed
if isinstance(seed, np.random.RandomState):
rstate = seed
else:
rstate = np.random.RandomState(seed)
# get noise for real and imaginary components
noise = TimeSeriesBase(
(
rstate.normal(loc=0.0, scale=sigmaval, size=len(self))
+ 1j * rstate.normal(loc=0.0, scale=sigmaval, size=len(self))
),
times=self.times,
)
self += noise
# (re)compute the running median
_ = self.compute_running_median(N=self.window)
# (re)compute change points (and variances)
self.bayesian_blocks()
# set noise based on provided value
self.stds = sigmaval
# standard devaitions have been provided rather than calculated
self._input_stds = True
def bayesian_blocks(self, threshold="default", minlength=5, maxlength=np.inf):
"""
Apply a Bayesian-Block-style algorithm to cut the data (after
subtraction of a running median) up into chunks with different
statistical properties using the formalism described in Section 2.4 of
[1]_. Within each chunk the data should be well described by a single
Gaussian distribution with zero mean.
Splitting of the data relies on a threshold on the natural logarithm of
the odds comparing the hypothesis that the data is best described by
two different contiguous zero mean Gaussian distributions with
different unknown variances to the hypothesis that the data is
described by a single zero mean Gaussian with unknown variance. The
former hypothesis is a compound hypothesis consisting of the sum of
evidences for the split in the data at any point.
The ``'default'`` threshold for splitting is empirically derived in
[1]_ for the cases that the prior odds between the two hypotheses is
equal, and has a 1% false alarm probability for splitting data that is
actually drawn from a single zero mean Gaussian. The ``'trials'``
threshold comes from assigning equal priors to the single Gaussian
hypothesis and the full compound hypotheses that there is a split
(in the ``'default'`` threshold it implicitly assume the single
Gaussian hypothesis and *each* numerator sub-hypothesis have equal
prior probability). This is essentially like a trials factor.
Alternatively, the `threshold` value can be any real number.
Parameters
----------
threshold: (str, float)
A string giving the method for determining the threshold for
splitting the data (described above), or a value of the threshold.
minlength: int
The minimum length that a chunk can be split into. Defaults to 5.
maxlength: int
The maximum length that a chunk can be split into. Defaults to inf.
References
----------
.. [1] <NAME>, <NAME>, <NAME> & <NAME>, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
# chop up the data (except if minlength is greater than the data length)
self._change_point_indices_and_ratios = []
if self.bbthreshold is None:
self.bbthreshold = threshold
if self.bbminlength is None:
self.bbminlength = minlength
if self.bbmaxlength is None:
self.bbmaxlength = maxlength
if self.bbminlength < len(self):
self._chop_data(self.subtract_running_median())
# sort the indices
self._change_point_indices_and_ratios = sorted(
self._change_point_indices_and_ratios
)
# if any chunks are longer than maxlength, then split them
if self.bbmaxlength < len(self):
insertcps = []
cppos = 0
for clength in self.chunk_lengths:
if clength > self.bbmaxlength:
insertcps.append((cppos + maxlength, 0))
cppos += clength
self._change_point_indices_and_ratios += insertcps
self._change_point_indices_and_ratios = sorted(
self._change_point_indices_and_ratios
)
# (re)calculate the variances for each chunk
if not self._input_stds:
_ = self.compute_variance(N=self.window)
@property
def bbthreshold(self):
"""
The threshold method/value for cutting the data in the Bayesian Blocks
algorithm.
"""
try:
return self._bbthreshold
except AttributeError:
return None
@bbthreshold.setter
def bbthreshold(self, thresh):
if isinstance(thresh, str):
if thresh.lower() not in ["default", "trials"]:
raise ValueError("Threshold '{}' is not a valid type".format(thresh))
elif not isinstance(thresh, float) and thresh is not None:
raise ValueError("Threshold '{}' is not a valid type".format(thresh))
self._bbthreshold = thresh
@property
def bbminlength(self):
"""
The minimum length of a chunk that the data can be split into by
the Bayesian Blocks algorithm.
"""
try:
return self._bbminlength
except AttributeError:
return None
@bbminlength.setter
def bbminlength(self, minlength):
if minlength is None:
self._bbminlength = None
return
if not isinstance(minlength, int) and not np.isinf(minlength):
raise TypeError("Minimum chunk length must be an integer")
if not np.isinf(minlength):
if minlength < 1:
raise ValueError("Minimum chunk length must be a positive integer")
self._bbminlength = minlength
@property
def bbmaxlength(self):
"""
The maximum length of a data chunk.
"""
try:
return self._bbmaxlength
except AttributeError:
return None
@bbmaxlength.setter
def bbmaxlength(self, maxlength):
if maxlength is None:
self._bbmaxlength = None
return
if maxlength < self.bbminlength:
raise ValueError(
"Maximum chunk length must be greater than the minimum chunk length."
)
self._bbmaxlength = maxlength
@property
def change_point_indices(self):
"""
Return a list of indices of statistical change points in the data.
"""
if len(self._change_point_indices_and_ratios) == 0:
return [0]
else:
return [0] + [cps[0] for cps in self._change_point_indices_and_ratios]
@property
def change_point_ratios(self):
"""
Return a list of the log marginal likelihood ratios for the statistical
change points in the data.
"""
if len(self._change_point_indices_and_ratios) == 0:
return [-np.inf]
else:
return [-np.inf] + [cps[1] for cps in self._change_point_indices_and_ratios]
@property
def chunk_lengths(self):
"""
A list with the lengths of the chunks into which the data has been
split.
"""
if len(self._change_point_indices_and_ratios) == 0:
return [len(self)]
else:
return np.diff(np.concatenate((self.change_point_indices, [len(self)])))
@property
def num_chunks(self):
"""
The number of chunks into which the data has been split.
"""
if len(self.change_point_indices) == 0:
return 1
else:
return len(self.change_point_indices)
def _chop_data(self, data, startidx=0):
# find change point (don't split if data is zero)
if np.all(self.subtract_running_median() == (0.0 + 0 * 1j)):
lratio, cpidx, ntrials = (-np.inf, 0, 1)
else:
lratio, cpidx, ntrials = self._find_change_point(data, self.bbminlength)
# set the threshold
if isinstance(self.bbthreshold, float):
thresh = self.bbthreshold
elif self.bbthreshold.lower() == "default":
# default threshold for data splitting
thresh = 4.07 + 1.33 * np.log10(len(data))
elif self.bbthreshold.lower() == "trials":
# assign equal prior probability for each hypothesis
thresh = np.log(ntrials)
else:
raise ValueError("threshold is not recognised")
if lratio > thresh:
# split the data at the change point
self._change_point_indices_and_ratios.append((cpidx + startidx, lratio))
# split the data and check for another change point
chunk1 = data[0:cpidx]
chunk2 = data[cpidx:]
self._chop_data(chunk1, startidx=startidx)
self._chop_data(chunk2, startidx=(cpidx + startidx))
@staticmethod
@jit(nopython=True)
def _find_change_point(subdata, minlength):
"""
Find the change point in the data, i.e., the "most likely" point at
which the data could be split to be described by two independent
zero mean Gaussian distributions. This also finds the evidence ratio
for the data being described by any two independent zero mean Gaussian
distributions compared to being described by only a single zero mean
Gaussian.
Parameters
----------
subdata: array_like
A complex array containing a chunk of data.
minlength: int
The minimum length of a chunk.
Returns
-------
tuple:
A tuple containing the maximum log Bayes factor, the index of the
change point (i.e. the "best" point at which to split the data into
two independent Gaussian distributions), and the number of
denominator sub-hypotheses.
"""
if len(subdata) < 2 * minlength:
return (-np.inf, 0, 1)
dlen = len(subdata)
datasum = (np.abs(subdata) ** 2).sum()
# calculate the evidence that the data is drawn from a zero mean
# Gaussian with a single unknown standard deviation
logsingle = (
-lal.LN2 - dlen * lal.LNPI + logfactorial(dlen - 1) - dlen * np.log(datasum)
)
lsum = dlen - 2 * minlength + 1
logtot = -np.inf
logdouble = np.zeros(lsum)
sumforwards = (np.abs(subdata[:minlength]) ** 2).sum()
sumbackwards = (np.abs(subdata[minlength:]) ** 2).sum()
# go through each possible splitting of the data in two
for i in range(lsum):
if np.all(subdata[: minlength + i] == (0.0 + 0 * 1j)) or np.all(
subdata[minlength + i :] == (0.0 + 0 * 1j)
):
# do this to avoid warnings about np.log(0.0)
logdouble[i] = -np.inf
else:
dlenf = minlength + i
dlenb = dlen - (minlength + i)
logf = (
-lal.LN2
- dlenf * lal.LNPI
+ logfactorial(dlenf - 1)
- dlenf * np.log(sumforwards)
)
logb = (
-lal.LN2
- dlenb * lal.LNPI
+ logfactorial(dlenb - 1)
- dlenb * np.log(sumbackwards)
)
# evidence for that split
logdouble[i] = logf + logb
adval = np.abs(subdata[minlength + i]) ** 2
sumforwards += adval
sumbackwards -= adval
# evidence for *any* split
logtot = np.logaddexp(logtot, logdouble[i])
# change point (maximum of the split evidences)
cp = logdouble.argmax() + minlength
# ratio of any change point compared to no splits
logratio = logtot - logsingle
return (logratio, cp, lsum)
def find_outliers(self, thresh=3.5):
"""
Find, and return the indices of, and "outliers" in the data. This is a
modified version of the median-absolute-deviation (MAD) function from
[1]_, using the algorithm of [2]_.
Parameters
----------
thresh: float, 3.5
The modified z-score to use as a threshold. Real or imaginary data
with a modified z-score (based on the median absolute deviation)
greater than this value will be classified as outliers.
Returns
-------
array_like:
A boolean :class:`numpy.ndarray` that is ``True`` for values that
are outliers.
References
----------
.. [1] https://github.com/joferkington/oost_paper_code/blob/master/utilities.py and
https://stackoverflow.com/a/22357811/1862861
.. [2] <NAME> and <NAME> (1993), `"Volume 16: How to Detect and
Handle Outliers"
<https://hwbdocuments.env.nm.gov/Los%20Alamos%20National%20Labs/TA%2054/11587.pdf>`_,
The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
"""
if not isinstance(thresh, float):
raise TypeError("Threshold must be a float")
else:
if thresh <= 0.0:
raise ValueError("Threshold must be a positive number")
modzscore = []
# reset mask to show all points
self._outlier_mask = None
for points in [self.data.real, self.data.imag]:
median = np.median(points)
diff = np.abs(
points - median
) # only 1d data, so different from https://stackoverflow.com/a/22357811/1862861
mad = np.median(diff)
modzscore.append(0.6745 * diff / mad)
# return boolean array of real or imaginary indices above the threshold
return (modzscore[0] > thresh) | (modzscore[1] > thresh)
def _not_outliers(self, thresh):
"""
Get an boolean array of points that are not outliers as identiied
by :meth:`cwinpy.data.HeterodynedData.find_outliers`.
"""
oidx = ~self.find_outliers(thresh=thresh)
return np.arange(len(self))[oidx]
def remove_outliers(self, thresh=3.5):
"""
Remove any outliers from the object using the method described in
:meth:`cwinpy.data.HeterodynedData.find_outliers`.
Parameters
----------
thresh: float
"""
if self.outlier_thresh is None:
self.outlier_thresh = thresh
idx = ~self.find_outliers(thresh=self.outlier_thresh)
if not np.all(idx):
self.outliers_removed = True
self.remove(idx)
def remove(self, idx):
"""
Create a mask to effectively remove values at given indices from
the object. This will recalculate the Bayesian Blocks data splitting
and variances if required.
Parameters
----------
idx: int, array_like
A list of indices to remove.
"""
try:
self.outlier_mask = idx
if self.outlier_mask is None:
return
# recalculate running median, Bayesian Blocks and variances
_ = self.compute_running_median(N=self.window)
self.bayesian_blocks()
except Exception as e:
raise RuntimeError("Problem removing elements from data:\n{}".format(e))
@property
def outlier_mask(self):
"""
Masking array to remove outliers.
"""
try:
return self._outlier_mask
except AttributeError:
return None
@outlier_mask.setter
def outlier_mask(self, mask):
self._outlier_mask = None # reset mask
if mask is None:
return
idx = np.asarray(mask)
if idx.dtype == np.int:
zidx = np.ones(len(self), dtype=np.bool)
zidx[idx] = False
elif idx.dtype == np.bool:
if len(idx) != len(self):
raise ValueError("Outlier mask is the wrong size")
else:
zidx = idx
else:
raise TypeError("Outlier mask is the wrong type")
if np.all(zidx):
self._outlier_mask = None
else:
self._outlier_mask = zidx
@property
def outlier_thresh(self):
"""
The modified z-score threshold for removing outliers (see
:meth:`~cwinpy.data.HeterodynedData.find_outliers`).
"""
try:
thresh = self._outlier_thresh
except AttributeError:
thresh = None
return thresh
@outlier_thresh.setter
def outlier_thresh(self, thresh):
if not isinstance(thresh, (float, int)) and thresh is not None:
raise TypeError("Outlier threshold must be a number")
self._outlier_thresh = thresh
@property
def outliers_removed(self):
"""
Return a boolean stating whether outliers have been removed from the
data set or not.
"""
try:
rem = self._outliers_removed
except AttributeError:
rem = False
return rem
@outliers_removed.setter
def outliers_removed(self, rem):
try:
self._outliers_removed = bool(rem)
except Exception as e:
raise TypeError("Value must be boolean: {}".format(e))
def plot(
self,
which="abs",
figsize=(12, 4),
ax=None,
remove_outliers=False,
thresh=3.5,
zero_time=True,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
**plotkwargs,
):
"""
Plot the data time series.
Parameters
----------
which: str, 'abs'
Say whehther to plot the absolute value of the data, ``'abs'``, the
``'real'`` component of the data, the ``'imag'`` component of
the data, or ``'both'`` the real and imaginary components.
figsize: tuple, (12, 4)
A tuple with the size of the figure. Values set in `rcparams` will
override this value.
ax: Axes
A :class:`matplotlib.axes.Axes` onto which to add the figure.
remove_outliers: bool, False
Set whether to remove outlier for the plot.
thresh: float, 3.5
The threshold for outlier removal (see
:meth:`~cwinpy.data.HeterodynedData.find_outliers`).
zero_time: bool, True
Start the time axis at zero.
labelsize: int
Set the fontsize for the axes tick labels.
fontsize: int
Set the fontsize for the axes labels.
legendsize: int
Set the fontsize for the legend (defaults to be the same as the
value or `fontsize`).
fontname: str
Set the font name for the axes labels and legend.
labelname: str
Set the font name for the axes tick labels. If not set, this will
default to the value given in `fontname`.
plotkwargs:
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
figure:
The :class:`matplotlib.figure.Figure` containing the plot.
Examples
--------
To plot both the real and imginary data one would do:
>>> import numpy as np
>>> from cwinpy import HeterodynedData
>>> # create some fake data (as an example)
>>> times = np.linspace(1000000000., 1000086340., 1440)
>>> het = HeterdynedData(times=times, fakeasd=1e-48)
>>> # plot real data
>>> fig = het.plot(which='both')
"""
if remove_outliers and self.outlier_mask is None:
idx = self._not_outliers(thresh=thresh)
else:
idx = np.arange(len(self))
# set some default plotting styles
if "ls" not in plotkwargs:
# set the line style to "None"
plotkwargs["ls"] = "None"
if "marker" not in plotkwargs:
# set marker to a circle
plotkwargs["marker"] = "o"
# set the data to use
if which.lower() in ["abs", "absolute"]:
if "ylabel" not in plotkwargs:
plotkwargs["ylabel"] = "$|B_k|$"
plot = super(HeterodynedData, self.take(idx).abs()).plot(**plotkwargs)
elif which.lower() in ["real", "re"]:
if "ylabel" not in plotkwargs:
plotkwargs["ylabel"] = "$\\Re{(B_k)}$"
plot = super(HeterodynedData, self.take(idx).real).plot(**plotkwargs)
elif which.lower() in ["im", "imag", "imaginary"]:
if "ylabel" not in plotkwargs:
plotkwargs["ylabel"] = "$\\Im{(B_k)}$"
plot = super(HeterodynedData, self.take(idx).imag).plot(**plotkwargs)
elif which.lower() == "both":
from gwpy.timeseries import TimeSeriesDict
pldata = TimeSeriesDict()
pldata["Real"] = self.take(idx).real
pldata["Imag"] = self.take(idx).imag
if "ylabel" not in plotkwargs:
plotkwargs["ylabel"] = "$B_k$"
plot = pldata.plot(**plotkwargs)
plot.gca().legend(loc="upper right", numpoints=1)
else:
raise ValueError("'which' must be 'abs', 'real', 'imag' or 'both")
return plot
def spectrogram(
self,
dt=86400,
window=None,
overlap=0.5,
plot=True,
ax=None,
remove_outliers=False,
thresh=3.5,
fraction_labels=True,
fraction_label_num=4,
figsize=(12, 4),
labelsize=None,
fontsize=None,
fontname=None,
labelname=None,
legendsize=None,
**plotkwargs,
):
"""
Compute and plot a spectrogram from the data using the
:func:`matplotlib.mlab.specgram` function.
Parameters
----------
dt: (float, int)
The length of time (in seconds) for each spectrogram time bin.
The default is 86400 seconds (i.e., one day).
window: (callable, np.ndarray)
The window to apply to each FFT block. Default is to use
:func:`scipy.signal.tukey` with the `alpha` parameter set to 0.1.
overlap: (float, int)
If a floating point number between [0, 1) this gives the fractional
overlap between adjacent FFT blocks (which defaults to 0.5, i.e., a
50% overlap). If an integer of 1 or more this is the number of
points to overlap between adjacent FFT blocks (this is how the
argument is used in :func:`~matplotlib.mlab.specgram`).
plot: bool, True
By default a plot of the spectrogram will be produced (this can be
plotted on a supplied :class:`~matplotlib.axes.Axes` or
:class:`~matplotlib.figure.Figure`), but the plotting can be turned
off if this is set to ``False``.
ax: (axes, figure)
If `ax` is a :class:`matplotlib.axes.Axes` then the spectrogram
will be plotted on the supplied axis.
remove_outliers: bool, False
Set to ``True`` to remove outliers points before generating the
spectrogram. This is not required if the class was created with
the `remove_outliers` keyword already set to ``True``.
thresh: float, 3.5
The modified z-score threshold for outlier removal (see
:meth:`~cwinpy.data.HeterodynedData.find_outliers`).
fraction_labels: bool, True
Set to ``True`` to output the frequency labels on the plot as
fractions.
fraction_label_num: int, 4
The fraction labels will be spaced at `Fs`/`fraction_label_num`
intervals, between the upper and lower Nyquist values. The default
if 4, i.e., spacing will be at a quarter of the Nyquist frequency.
figsize: tuple, (12, 4)
A tuple containing the size (in inches) to set for the figure.
labelsize: int
Set the fontsize for the axes tick labels.
fontsize: int
Set the fontsize for the axes labels.
legendsize: int
Set the fontsize for the legend (defaults to be the same as the
value or `fontsize`).
fontname: str
Set the font name for the axes labels and legend.
labelname: str
Set the font name for the axes tick labels. If not set, this will
default to the value given in `fontname`.
plotkwargs:
Keyword arguments for :func:`matplotlib.pyplot.imshow`.
Returns
-------
array_like:
A :class:`numpy.ndarray` of frequencies for the spectrogram
array_like:
A 2d :class:`numpy.ndarray` of the spectrogram power at each
frequency and time
array_like:
A :class:`numpy.ndarray` of the central times of each FFT in the
spectrogram.
figure:
The :class:`~matplotlib.figure.Figure` containing the spectrogram
plot. This is not returned if `plot` is set to ``False``.
"""
speckwargs = {}
speckwargs["dt"] = dt
speckwargs["window"] = window
speckwargs["overlap"] = overlap
speckwargs["remove_outliers"] = remove_outliers
speckwargs["thresh"] = thresh
speckwargs["fraction_labels"] = fraction_labels
speckwargs["fraction_label_num"] = fraction_label_num
return self._plot_power(
"spectrogram",
speckwargs,
figsize=figsize,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
legendsize=legendsize,
**plotkwargs,
)
def periodogram(
self,
plot=True,
ax=None,
remove_outliers=False,
thresh=3.5,
fraction_labels=True,
fraction_label_num=4,
figsize=(6, 5),
labelsize=None,
labelname=None,
fontsize=None,
fontname=None,
legendsize=None,
**plotkwargs,
):
"""
Compute and plot a two-sided periodogram of the data using
:func:`scipy.signal.periodogram`. Note that this uses zero-padded
uniformly sampled data, rather than using the Lomb-Scargle method (such
as :class:`astropy.stats.LombScargle`) that can deal with data with
gaps, but doesn't work for complex data.
See :meth:`~cwinpy.data.HeterodynedData.spectrogram` for input
parameters, excluding `dt`, `window` and `overlap`. The default figure
size is (6, 5).
Parameters
----------
plotkwargs:
Keyword parameters for :func:`matplotlib.pyplot.plot`.
Returns
-------
array_like:
The frequency series
array_like:
The periodogram power
figure:
The :class:`~matplotlib.figure.Figure` is a plot is requested.
"""
speckwargs = {}
speckwargs["plot"] = plot
speckwargs["ax"] = ax
speckwargs["remove_outliers"] = remove_outliers
speckwargs["thresh"] = thresh
speckwargs["fraction_labels"] = fraction_labels
speckwargs["fraction_label_num"] = fraction_label_num
return self._plot_power(
"periodogram",
speckwargs,
figsize=figsize,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
legendsize=legendsize,
**plotkwargs,
)
def power_spectrum(
self,
plot=True,
ax=None,
remove_outliers=False,
thresh=3.5,
fraction_labels=True,
fraction_label_num=4,
average="median",
dt=86400,
figsize=(6, 5),
labelsize=None,
labelname=None,
fontsize=None,
fontname=None,
legendsize=None,
window=None,
overlap=0.5,
**plotkwargs,
):
"""
Compute and plot the power spectrum of the data. This compute the
spectrogram, and averages the power over time.
See :meth:`~cwinpy.data.HeterodynedData.spectrogram` for input
parameters. The default figure size is (6, 5).
Parameters
----------
average: str, 'median'
The method by which to "average" the spectrum in time. This can be
'median' (the default) or 'mean'.
plotkwargs:
Keyword parameters for :func:`matplotlib.pyplot.plot`.
Returns
-------
array_like:
The frequency series
array_like:
The power spectrum
figure:
The :class:`~matplotlib.figure.Figure` is a plot is requested.
"""
speckwargs = {}
speckwargs["plot"] = plot
speckwargs["ax"] = ax
speckwargs["remove_outliers"] = remove_outliers
speckwargs["thresh"] = thresh
speckwargs["fraction_labels"] = fraction_labels
speckwargs["fraction_label_num"] = fraction_label_num
speckwargs["dt"] = dt
speckwargs["average"] = average
speckwargs["window"] = window
speckwargs["overlap"] = overlap
return self._plot_power(
"power",
speckwargs,
figsize=figsize,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
legendsize=legendsize,
**plotkwargs,
)
def _plot_power(
self,
ptype,
speckwargs={},
figsize=None,
labelsize=None,
labelname=None,
fontsize=None,
fontname=None,
legendsize=None,
**plotkwargs,
):
"""
General function for plotting the
:meth:`~cwinpy.data.HeterodynedData.spectrogram`,
:meth:`~cwinpy.data.HeterodynedData.power_spectrum` or
:meth:`~cwinpy.data.HeterodynedData.periodogram`.
Parameters
----------
ptype: str
A string with 'spectrogram' for
:meth:`~cwinpy.data.HeterodynedData.spectrogram`, 'periodogram' for
:meth:`~cwinpy.data.HeterodynedData.periodogram`, or 'power' for
:meth:`~cwinpy.data.HeterodynedData.power_spectrum`.
speckwargs: dict
A dictionary of spectrum generation keyword arguments.
figsize: tuple
The size (in inches) of the created figure.
plotkwargs:
Additional plotting keyword arguments.
"""
if not isinstance(ptype, str):
raise TypeError("Power spectrum type must be a string")
if ptype not in ["spectrogram", "periodogram", "power"]:
raise ValueError("Type must be 'spectrogram', 'periodogram', or " "'power'")
# set plotting defaults
if labelsize is None:
labelsize = self.PLOTTING_DEFAULTS["labelsize"]
if labelname is None:
labelname = self.PLOTTING_DEFAULTS["labelname"]
if fontsize is None:
fontsize = self.PLOTTING_DEFAULTS["fontsize"]
if fontname is None:
fontname = self.PLOTTING_DEFAULTS["fontname"]
if legendsize is None:
legendsize = fontsize
# get some options
remove_outliers = speckwargs.get("remove_outliers", False)
thresh = speckwargs.get("thresh", 3.5)
plot = speckwargs.get("plot", True)
ax = speckwargs.get("ax", None)
# get the zero padded data
padded = self._zero_pad(remove_outliers=remove_outliers, thresh=thresh)
if self.outlier_mask is None and remove_outliers:
idx = self._not_outliers(thresh=thresh)
times = self.times[idx].value
tottime = times[-1] - times[0]
else:
times = self.times.value
tottime = self.tottime.value
Fs = 1.0 / gcd_array(np.diff(times)) # sampling frequency
if ptype in ["spectrogram", "power"]:
dt = speckwargs.get("dt", 86400)
overlap = speckwargs.get("overlap", 0.5)
window = speckwargs.get("window", None)
if not isinstance(dt, (float, int)):
raise ValueError("Time bin must be an integer or float")
if dt < 1.0 / Fs or dt > (tottime + (1.0 / Fs)):
raise ValueError("The time bin selected is invalid")
# set the number of samples for each FFT block
nfft = int(dt * Fs)
if isinstance(overlap, float):
if overlap >= 0.0 and overlap < 1.0:
noverlap = int(overlap * nfft)
else:
raise ValueError("Overlap must be a float between 0 and 1")
elif isinstance(overlap, int):
if overlap >= 0 and overlap <= len(self) - 1:
noverlap = overlap
else:
raise ValueError("Overlap is out of allowed range")
else:
raise TypeError("Overlap must be an integer or float")
if window is None:
from scipy.signal import tukey
window = tukey(nfft, alpha=0.1)
# generate spectrogram
try:
from matplotlib.mlab import specgram
power, frequencies, stimes = specgram(
padded, Fs=Fs, window=window, NFFT=nfft, noverlap=noverlap
)
except Exception as e:
raise RuntimeError("Problem creating spectrogram: {}".format(e))
if ptype == "power":
# average the spectrogram for a power spectrum
average = speckwargs.get("average", "median")
if average not in ["median", "mean"]:
raise ValueError("Average method must be 'median' or 'mean'")
if average == "median":
power = np.median(power, axis=-1)
else:
power = np.mean(power, axis=-1)
else:
# perform periodogram
try:
from scipy.signal import periodogram
frequencies, power = periodogram(
padded, fs=Fs, return_onesided=False, detrend=lambda x: x
)
# sort results in frequency
frequencies, power = np.array(sorted(zip(frequencies, power))).T
except Exception as e:
raise RuntimeError("Problem creating periodogram: {}".format(e))
if ax is None and not plot:
if ptype == "spectrogram":
return frequencies, power, stimes
else:
return frequencies, power
# perform plotting
try:
from matplotlib import pyplot as pl
from matplotlib.axes import Axes
fraction_labels = speckwargs.get("fraction_labels", True)
fraction_label_num = speckwargs.get("fraction_label_num", 4)
# set whether to output frequency labels as fractions
if fraction_labels:
# set at quarters of the sample frequency
if not isinstance(fraction_label_num, int):
raise TypeError("'fraction_label_num' must be an integer")
if fraction_label_num < 1:
raise ValueError("'fraction_label_num' must be positive")
df = Fs / fraction_label_num
ticks = np.linspace(-2 / Fs, 2 / Fs, int(Fs / df) + 1)
labels = []
for tick in ticks:
if tick == 0.0:
labels.append("$0$")
else:
# set the fraction label
sign = "-" if tick < 0.0 else ""
label = "${0}^{{{1}}}\u2044_{{{2}}}$".format(
sign, 1, int(np.abs(tick))
)
labels.append(label)
if ptype != "spectrogram":
ticks = np.linspace(-Fs / 2, Fs / 2, int(Fs / df) + 1)
if ptype == "spectrogram":
from matplotlib import colors
# set plotting keyword arguments
if "cmap" not in plotkwargs:
if self.detector is not None:
if self.detector in self.colmapdic:
plotkwargs["cmap"] = self.colmapdic[self.detector]
# extents of the plot
if "extent" not in plotkwargs:
plotkwargs["extent"] = [0, tottime, -2 / Fs, 2 / Fs]
if "aspect" not in plotkwargs:
plotkwargs["aspect"] = "auto"
if "norm" not in plotkwargs:
plotkwargs["norm"] = colors.Normalize()
if isinstance(ax, Axes):
fig = ax.get_figure()
thisax = ax
else:
fig, thisax = pl.subplots(figsize=figsize)
thisax.imshow(np.sqrt(np.flipud(power)), **plotkwargs)
if self.detector is not None:
from matplotlib.offsetbox import AnchoredText
legend = AnchoredText(self.detector, loc=1)
thisax.add_artist(legend)
thisax.set_xlabel(
"GPS - {}".format(int(times[0])),
fontname=fontname,
fontsize=fontsize,
)
thisax.set_ylabel(
"Frequency (Hz)", fontname=fontname, fontsize=fontsize
)
if fraction_labels:
thisax.set_yticks(ticks)
thisax.set_yticklabels(labels)
# set axes to use scientific notation
thisax.ticklabel_format(
axis="x", style="sci", scilimits=(0, 5), useMathText=True
)
else:
# set plot color
if self.detector is not None:
if "color" not in plotkwargs:
if self.detector in GW_OBSERVATORY_COLORS:
plotkwargs["color"] = GW_OBSERVATORY_COLORS[self.detector]
if "label" not in plotkwargs:
plotkwargs["label"] = self.detector
if isinstance(ax, Axes):
fig = ax.get_figure()
thisax = ax
else:
fig, thisax = pl.subplots(figsize=figsize)
thisax.plot(frequencies, power, **plotkwargs)
if self.detector is not None:
from matplotlib.font_manager import FontProperties
legfont = FontProperties(family=fontname, size=legendsize)
thisax.legend(prop=legfont)
thisax.set_ylabel("Power", fontname=fontname, fontsize=fontsize)
thisax.set_xlabel(
"Frequency (Hz)", fontname=fontname, fontsize=fontsize
)
thisax.set_xlim([-Fs / 2, Fs / 2])
if fraction_labels:
thisax.set_xticks(ticks)
thisax.set_xticklabels(labels)
# set axes to use scientific notation
thisax.ticklabel_format(axis="y", style="sci", useMathText=True)
# set tick font name
for tick in thisax.get_xticklabels() + thisax.get_yticklabels():
tick.set_fontname(labelname)
# set the axes tick label size
thisax.tick_params(which="both", labelsize=labelsize)
# add a grid
thisax.grid(True, linewidth=0.5, linestyle="--")
except Exception as e:
raise RuntimeError("Problem creating spectrogram: {}".format(e))
fig.tight_layout()
if ptype == "spectrogram":
return frequencies, power, stimes, fig
else:
return frequencies, power, fig
def _zero_pad(self, remove_outliers=False, thresh=3.5):
"""
If required zero pad the data to return an evenly sampled dataset for
use in generating a power spectrum.
Parameters
----------
remove_outliers: bool, False
If ``True`` remove outliers before zero padding (nothing is done
if outliers have already been removed).
thresh: float, 3.5
The modified z-score threshold for outlier removal.
Returns
-------
:class:`numpy.ndarray`:
An array of the data padded with zeros.
"""
if self.outlier_mask is None and remove_outliers:
idx = self._not_outliers(thresh=thresh)
times = self.times.value[idx]
data = self.data[idx]
else:
times = self.times.value
data = self.data
# check diff of times
if len(times) < 2:
raise ValueError("There must be at least two samples!")
dts = np.diff(times).astype(
np.float32
) # convert to float32 due to precision errors
if np.all(dts == self.dt.value):
# no zero padding required as data is evenly sampled
return data
# get the greatest common divisor of the deltaTs
gcd = gcd_array(dts)
# get the "new" padded time stamps
tottime = times[-1] - times[0]
newtimes = np.linspace(times[0], times[-1], 1 + int(tottime) // gcd)
# get indices of original times im new times
tidxs = np.where(np.in1d(newtimes, times))[0]
# get zero array and add data
padded = np.zeros(len(newtimes), dtype=np.complex)
padded[tidxs] = data
return padded
@property
def include_ssb(self):
"""
A boolean stating whether the heterodyne included Solar System
barycentring.
"""
try:
return self._include_ssb
except AttributeError:
return False
@include_ssb.setter
def include_ssb(self, incl):
self._include_ssb = bool(incl)
@property
def include_bsb(self):
"""
A boolean stating whether the heterodyne included Binary System
barycentring.
"""
try:
return self._include_bsb
except AttributeError:
return False
@include_bsb.setter
def include_bsb(self, incl):
self._include_bsb = bool(incl)
@property
def include_glitch(self):
"""
A boolean stating whether the heterodyne included corrections for any
glitch phase evolution.
"""
try:
return self._include_glitch
except AttributeError:
return False
@include_glitch.setter
def include_glitch(self, incl):
self._include_glitch = bool(incl)
@property
def include_fitwaves(self):
"""
A boolean stating whether the heterodyne included corrections for any
red noise FITWAVES parameters.
"""
try:
return self._include_fitwaves
except AttributeError:
return False
@include_fitwaves.setter
def include_fitwaves(self, incl):
self._include_fitwaves = bool(incl)
def as_timeseries(self):
"""
Return the data as a :class:`gwpy.timeseries.TimeSeries`.
"""
return TimeSeries(self.data, times=self.times, channel=self.channel)
@property
def filter_history(self):
"""
An array with the "history" of any filters used during the
heterodyning.
"""
try:
return self._filter_history
except AttributeError:
return None
@filter_history.setter
def filter_history(self, history):
self._filter_history = np.array(history)
def heterodyne(self, phase, stride=1, singlesided=False, datasegments=None):
"""
Heterodyne the data (see :meth:`gwpy.timeseries.TimeSeries.heterodyne`)
for details). Unlike :meth:`gwpy.timeseries.TimeSeries.heterodyne` this
will heterodyne unevenly sampled data, although contiguous segments of
data will be truncated if they do not contain an integer number of
strides. Additional parameters are given below:
Parameters
----------
datasegments: list
A list of pairs of times within which to include data. Data outside
these times will be removed.
"""
try:
phaselen = len(phase)
except Exception as exc:
raise TypeError("Phase is not array_like: {}".format(exc))
if phaselen != len(self):
raise ValueError(
"Phase array must be the same length as the HeterodynedData"
)
dt = self.dt.value
samplerate = 1.0 / dt
stridesamp = int(stride * samplerate)
# find contiguous stretches of data
if not np.allclose(
np.diff(self.times.value), self.dt.value * np.ones(len(self) - 1)
):
breaks = np.argwhere(np.diff(self.times.value) != self.dt.value)[0].tolist()
segments = SegmentList()
breaks = [-1] + breaks + [len(self) - 1]
for i in range(len(breaks) - 1):
segments.append(
(
self.times.value[breaks[i] + 1] - dt / 2,
self.times.value[breaks[i + 1]] + dt / 2,
)
)
else:
segments = SegmentList(
[(self.times.value[0] - dt / 2, self.times.value[-1] + dt / 2)]
)
if datasegments is not None:
# get times within segments and data time span
segments = (segments & SegmentList(datasegments)) & SegmentList(
[self.times.value[0] - dt / 2, self.times.value[-1] + dt / 2]
)
# check that some data is within the segments
if len(segments) == 0:
return None
# heterodyne the data
hetdata = np.exp(-1j * | np.asarray(phase) | numpy.asarray |
"""``TARexp`` supports classification models implemented in Scikit-learn through
:py:class:`tarexp.component.ranker.SklearnRanker` wrapper. However, any supervised
learning model that can produce a score for each document in the collection can be
integrated into ``TARexp``.
.. caution::
For rankers that require allocating a lot of memory or carries states
(e.g. neural models and SGDClassifier in sklearn that supports partial_fit),
it would be ideal to put the actual model initialization into ``.begin`` method
and properly dispose the model instance in ``.reset`` method.
"""
import warnings
from copy import deepcopy
import numpy as np
from tarexp.component.base import Component
class Ranker(Component):
def __init__(self, **kwargs):
super().__init__()
def trainRanker(self, X, y, *args, **kwargs):
raise NotImplementedError
def scoreDocuments(self, X, *args, **kwargs):
raise NotImplementedError
class SklearnRanker(Ranker):
def __init__(self, module, **kwargs):
super().__init__()
assert hasattr(module, 'fit')
if not hasattr(module, 'predict_proba'):
warnings.warn("Model that supports predicting probabilities is preferable. "
"Will invoke `decision_function` instead.")
assert hasattr(module, 'decision_function')
self.sk_module = module
self._model_kwargs = kwargs
self.reset()
def reset(self):
# make sure model starts fresh so other component can safely take
# advantage of the state of the model as input
self.model = self.sk_module(**self._model_kwargs)
def trainRanker(self, X, y, **kwargs):
assert X.shape[0] == len(y)
if np.unique(y).size == 1:
# fix for sklearn models that does not support one-class classification
X, y = addDummyNeg(X, y)
assert | np.unique(y) | numpy.unique |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
import random
import math
import time
import copy
import _init_paths
from opts import opts
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.utils import xyxy2xywh, generate_anchors, xywh2xyxy, encode_delta
def letterbox(img, height=608, width=1088,
color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
#np.clip(xy[:, 0], 0, width, out=xy[:, 0])
#np.clip(xy[:, 2], 0, width, out=xy[:, 2])
#np.clip(xy[:, 1], 0, height, out=xy[:, 1])
#np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
return imw, targets, M
else:
return imw
def get_data(img_path, label_path):
height = 608
width = 1088
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
# Normalized xywh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw
labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh
labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw
labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh
else:
labels = np.array([])
plotFlag = False
if plotFlag:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure(figsize=(50, 50))
plt.imshow(img[:, :, ::-1])
plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-')
plt.axis('off')
plt.savefig('test.jpg')
time.sleep(10)
nL = len(labels)
if nL > 0:
# convert xyxy to xywh
labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) # / height
labels[:, 2] /= width
labels[:, 3] /= height
labels[:, 4] /= width
labels[:, 5] /= height
# img = np.ascontiguousarray(img[:, :, ::-1]) # BGR to RGB
return img, labels, img_path, (h, w)
def gaussian2D_bbox(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
y *= 0.1
x *= 0.1
# print(y)
# print(np.shape(y), np.shape(x))
# print('x * x', np.shape(x * x))
# print('y * y', np.shape(y * y))
# print('x*x+y*y', np.shape(x*x+y*y))
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
# print('h shape:', np.shape(h))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian_bbox(heatmap, center, shape, radius, k=1):
diameter = 2 * radius + 1
h, w = shape
h = int(h / 2)
w = int(w / 2)
gaussian = gaussian2D_bbox((2 * h + 1, 2 * w + 1), sigma=diameter / 6)
# gaussian = gaussian2D((6 * h + 1, 6 * w + 1), sigma=diameter / 6)
# gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
# print(type(gaussian))
# print(np.shape(gaussian))
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, w), min(width - x, w + 1)
top, bottom = min(y, h), min(height - y, h + 1)
# print(width, height)
# print(left, right, top, bottom)
# print(w, h)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[h - top:h + bottom, w - left:w + right]
# masked_gaussian = gaussian[3 * h - top:3 * h + bottom, 3 * w - left:3 * w + right]
# print(np.shape(masked_heatmap))
# print(h - top, h + bottom, w - left, w + right, np.shape(masked_gaussian))
# print(masked_gaussian)
# import sys
# sys.exit(0)
# masked_gaussian = gaussian
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_msra_gaussian_bbox(heatmap, center, shape):
# tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
# ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
# br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
ul = [int(mu_x - shape[0]), int(mu_y - shape[1])]
br = [int(mu_x + shape[0] + 1), int(mu_y + shape[1] + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
# size = 2 * tmp_size + 1
size = 2 * 5 + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
print(x-x0)
import sys
sys.exit(0)
x = np.arange(0, 2 * shape[0] + 1, 1, np.float32)
y = np.arange(0, 2 * shape[1] + 1, 1, np.float32)
x0 = shape[0]
y0 = shape[1]
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * shape[0] ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
# def draw_msra_gaussian(heatmap, center, sigma):
# tmp_size = sigma * 3
# mu_x = int(center[0] + 0.5)
# mu_y = int(center[1] + 0.5)
# w, h = heatmap.shape[0], heatmap.shape[1]
# ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
# br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
# if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
# return heatmap
# size = 2 * tmp_size + 1
# x = np.arange(0, size, 1, np.float32)
# y = x[:, np.newaxis]
# x0 = y0 = size // 2
# g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
# g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
# img_x = max(0, ul[0]), min(br[0], h)
# img_y = max(0, ul[1]), min(br[1], w)
# heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
# heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
# g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
# return heatmap
#
# def draw_umich_gaussian(heatmap, center, radius, k=1):
# diameter = 2 * radius + 1
# gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
#
# x, y = int(center[0]), int(center[1])
#
# height, width = heatmap.shape[0:2]
#
# left, right = min(x, radius), min(width - x, radius + 1)
# top, bottom = min(y, radius), min(height - y, radius + 1)
#
# masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
# masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
# if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
# np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
# return heatmap
def hm_gen(img_path, label_path, num_classes, down_ratio, K):
imgs, labels, img_path, (input_h, input_w) = get_data(img_path, label_path)
output_h = imgs.shape[0] // down_ratio
output_w = imgs.shape[1] // down_ratio
num_objs = labels.shape[0]
# print('\noutput_h', output_h)
# print('\noutput_w', output_w)
# print('\nnum_objs', num_objs)
# import sys
# sys.exit(0)
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
# if opt.ltrb:
# wh = np.zeros((opt.K, 4), dtype=np.float32)
# else:
# wh = np.zeros((opt.K, 2), dtype=np.float32)
wh = np.zeros((K, 2), dtype=np.float32)
reg = np.zeros((K, 2), dtype=np.float32)
ind = np.zeros((K,), dtype=np.int64)
reg_mask = np.zeros((K,), dtype=np.uint8)
ids = np.zeros((K,), dtype=np.int64)
bbox_xys = np.zeros((K, 4), dtype=np.float32)
# draw_gaussian = draw_msra_gaussian if opt.mse_loss else draw_umich_gaussian
draw_gaussian = draw_msra_gaussian
# draw_gaussian = draw_umich_gaussian
for k in range(num_objs):
label = labels[k]
bbox = label[2:]
# cls_id = int(label[0])
cls_id = 0
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox)
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
# radius = 6 if opt.mse_loss else radius
# radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array(
[bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
radius *= 3
draw_umich_gaussian_bbox(hm[cls_id], ct_int, (math.ceil(h), math.ceil(w)), radius)
# draw_msra_gaussian_bbox(hm[cls_id], ct_int, (math.ceil(h), math.ceil(w)))
# draw_gaussian(hm[cls_id], ct_int, radius)
# if opt.ltrb:
# wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
# bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
# else:
# wh[k] = 1. * w, 1. * h
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
ids[k] = label[1]
bbox_xys[k] = bbox_xy
ret = {'input': imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids,
'bbox': bbox_xys}
return ret
def norm_image(image):
"""
标准化图像
:param image: [H,W,C]
:return:
"""
image = image.copy()
image -= np.max(np.min(image), 0)
image /= np.max(image)
image *= 255.
return np.uint8(image)
def gen_cam(image, mask):
"""
生成CAM图
:param image: [H,W,C],原始图像
:param mask: [H,W],范围0~1
:return: tuple(cam,heatmap)
"""
# mask转为heatmap
# mask[np.where(mask == 0)] = 0.25
# print(mask)
mask *= 0.9
print(mask)
print(type(mask), np.shape(mask))
print('min:', np.min(mask), ', max:', np.max(mask))
mask = 1-mask
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1] # gbr to rgb
# print('\nheatmap', np.shape(heatmap))
# print(heatmap)
# print(image)
# cv2.imwrite(os.path.join(save_dir, '{}.jpg'.format('hm_2')), heatmap*255)#ret['hm'][0] * 255)
# import sys
# sys.exit(0)
# 合并heatmap到原始图像
cam = heatmap + | np.float32(image) | numpy.float32 |
from __future__ import print_function
try:
from builtins import range, zip
except:
pass
import os, sys, glob, h5py, socket, shutil, time, re, \
subprocess
import numpy as np
from collections import deque
from subprocess import Popen, PIPE
from pyglib.io.fio import file_exists
import pyglib.run.environ as env
def get_file_info(fname, unit, idmf, case, scratch, so, para, cmplx, _band,
updn, dnup):
'''help function to setup informations in def file.
'''
if 'in2' == fname:
return [unit, "'{}.in2{}'".format(case, cmplx), "'old'",
"'formatted'", 0]
elif 'inso' == fname:
return [unit, "'{}.inso'".format(case), "'unknown'", "'formatted'", 0]
elif 'indmfl' == fname:
return [unit, "'{}.indmfl'".format(case), "'old'", "'formatted'", 0]
elif 'outputdmfupdn' == fname:
return [unit, "'{}.outputdmf{}{}'".format(case, idmf, updn), \
"'unknown'", "'formatted'", 0]
elif 'in1c' == fname:
return [unit, "'{}.in1c'".format(case), "'unknown'", "'formatted'", 0]
elif 'vectorupdn' == fname:
return [unit, "'{}/{}.vector{}{}{}'".format(scratch, case, so, \
updn, para), "'unknown'","'unformatted'",9000]
elif 'vectordnup' == fname:
return [unit, "'{}/{}.vector{}{}{}'".format(scratch, case, so, \
dnup, para), "'unknown'","'unformatted'",9000]
elif 'klist' == fname:
return [unit, "'{}.klist{}'".format(case, _band), "'old'", \
"'formatted'", 0]
elif 'kgen' == fname:
return [unit, "'{}.kgen'".format(case), "'unknown'", "'formatted'", 0]
elif 'vspupdn' == fname:
return [unit, "'{}.vsp{}'".format(case, updn), "'old'", \
"'formatted'", 0]
elif 'vspdnup' == fname:
return [unit, "'{}.vsp{}'".format(case, dnup), "'unknown'", \
"'formatted'", 0]
elif 'struct' == fname:
return [unit, "'{}.struct'".format(case), "'old'", "'formatted'", 0]
elif 'rotlm' == fname:
return [unit, "'{}.rotlm'".format(case), "'unknown'", "'formatted'", 0]
elif 'energysodum' == fname:
if so == 'so':
sodum = 'dum'
else:
sodum = dnup
return [unit, "'{}.energy{}'".format(case, sodum), \
"'unknown'", "'formatted'", 0]
elif 'energyupdn' == fname:
return [unit, "'{}.energy{}{}{}'".format(case, so, updn, para), \
"'unknown'", "'formatted'", 0]
elif 'energydnup' == fname:
return [unit, "'{}.energy{}{}{}'".format(case, so, dnup, para), \
"'unknown'", "'formatted'", 0]
elif 'clmval' == fname:
return [unit, "'{}.clmval{}'".format(case, updn), "'unknown'", \
"'formatted'", 0]
elif 'recprlist' == fname:
return [unit, "'{}.recprlist'".format(case), "'unknown'", \
"'formatted'", 9000]
elif 'scf2updn' == fname:
return [unit, "'{}.scf2{}'".format(case, updn), \
"'unknown'", "'formatted'", 0]
elif 'normupdn' == fname:
if so == "so" and updn == "":
_updn = "up"
else:
_updn = updn
return [unit, "'{}.norm{}{}{}'".format(case, so, _updn, para), \
"'unknown'", "'formatted'", 0]
elif 'normdnup' == fname:
return [unit, "'{}.norm{}{}{}'".format(case, so, dnup, para), \
"'unknown'", "'formatted'", 0]
else:
raise ValueError('No matching file name {}!'.format(fname))
def fcreate_def_gwien(case, scratch='.', so='', para='', idmf='1', cmplx='',
_band='', updn='', dnup='dn'):
'''create gwien1/2.def file.
'''
fdef = open('gwien{}{}.def'.format(idmf,updn), 'w')
if idmf == '1':
fname_list = ['in2', 'inso', 'indmfl', 'outputdmfupdn', \
'in1c', 'vectorupdn', 'vectordnup', 'klist', \
'kgen', 'vspupdn', 'vspdnup', 'struct', \
'rotlm', 'energydnup', 'energyupdn', 'normupdn', \
'normdnup']
unit_list = [3, 4, 5, 6, \
7, 9, 10, 13, \
14, 18, 19, 20, \
22, 59, 60, 12, \
11]
elif idmf == '2':
fname_list = ['in1c', 'inso', 'in2', 'outputdmfupdn', 'indmfl', \
'clmval', 'vectorupdn', 'vectordnup', 'recprlist', 'kgen', \
'vspupdn', 'struct', 'scf2updn', 'rotlm', 'energyupdn', \
'normupdn', 'normdnup']
unit_list = [3, 4, 5, 6, 7, \
8, 9, 10, 13, 14, \
18, 20, 21, 22, 30, \
12, 11]
for fname, unit in zip(fname_list, unit_list):
fdef.write("{:3d}, {:<15s}, {:<10s}, {:<13s}, {:<4d}\n".format(\
*get_file_info(fname, unit, idmf, case, scratch, so, \
para, cmplx, _band, updn, dnup)))
fdef.close()
def onestep(fday, case, exec_name, w_root, para="", so="", \
band=None, updn=None):
'''wien2k steps.
'''
time_start = time.strftime("%H:%M:%S")
cmd = ['{}/x'.format(w_root), exec_name, '-f', case]
if para != "":
cmd.append(para)
if band == '-band':
cmd.append(band)
if not os.path.isfile('EFLDA.INP'):
shutil.copy2('EFLDA.OUT', 'EFLDA.INP')
if updn in ["-up", "-dn"]:
cmd.append(updn)
if so == "so":
cmd.extend(["-c", "-so"])
print(' '.join(x for x in cmd))
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
fday.write('>{:<10s} ({}) {}\n'.format(exec_name, time_start, out[:-1]))
fday.flush()
for f in glob.glob('{}.error*'.format(exec_name)):
if os.path.getsize(f) > 0:
print('error in {} from file: {}'.format(
f, open(f, 'r').readlines()))
sys.exit(1)
def gonestep(fday, exec_name, mpi, updn=""):
'''gwien1, CyGutz and gwien2 steps.
'''
time_start = time.strftime("%H:%M:%S")
with open(':log', 'a') as f:
f.write('{}> {}\n'.format(time.strftime("%a %b %d %H:%M:%S %Z %Y"), \
exec_name))
cmd = ['/usr/bin/time']
if mpi != '':
cmd.extend(mpi)
cmd.append('{}'.format(exec_name))
if 'gwien' in exec_name:
cmd.append('{}{}.def'.format(exec_name, updn))
print(' '.join(x for x in cmd))
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
with open('{}_info.out'.format(exec_name), 'w') as f:
f.write(out)
fday.write('>{:<10s} ({}) {}\n'.format(exec_name, time_start, \
err.splitlines()[-2]))
fday.flush()
for f in glob.glob('{}.error*'.format(exec_name)):
if os.path.getsize(f) > 0:
print('error in {} from file: {}'.format(
f, open(f, 'r').readlines()))
sys.exit(1)
def get_file_content(fname):
if os.path.exists(fname):
data = '\n------- {} --------\n'.format(fname)
with open(fname, 'r') as f:
data += f.read()
return data
else:
return ''
def scf(case, spinpol):
# scf file content
if spinpol:
f_list = ['{}.scf{}'.format(case, i) for i in ['0', \
'1up', '1dn', 'so', '2up', '2dn', \
'1s', '2s', 'cup', 'cdn']]
else:
f_list = ['{}.scf{}'.format(case, i) for i in ['0', \
'1', 'so', '2', '1s', '2s', 'c']]
data = ''.join(get_file_content(f) for f in f_list)
with open('{}.scf'.format(case), 'a') as f:
f.write(data)
# files saved for mixing.
if spinpol:
f_list = ['clmsum', 'vspup', 'vspdn', 'vnsup', 'vnsdn', 'vrespsum',
'clmdn', 'clmup']
else:
f_list = ['clmsum', 'vsp', 'vns', 'vrespsum']
for i in f_list:
name = '{}.{}'.format(case, i)
if file_exists(name):
shutil.copy2(name, '{}_old'.format(name))
def scfm(case):
f_scf = '{}.scfm'.format(case)
data = get_file_content(f_scf)
with open('{}.scf'.format(case), 'a') as f:
f.write(data)
def diff(fday, case, mix_dc, avg_dc, gskip):
e_que = deque([], 2)
with open('{}.scf'.format(case), 'r') as f:
for line in f:
if ':DIS' in line:
d_rho = float(line.split()[-1])
if ':ENE' in line:
e_que.append(float(line.split()[-1]))
if len(e_que) == 2:
d_etot = np.abs(e_que[1] - e_que[0])
else:
d_etot = 0.0
dcv_err = 0.
if not gskip:
with h5py.File("GPARAM.h5", 'a') as f:
ldc = f["/dc_mode"][0]
if os.path.isfile("GDC_NELF_OUT.h5"):
with h5py.File("GDC_NELF_OUT.h5", 'r') as fp:
nelf_list_inp = fp["/dc_nelf_list_inp"][()]
nelf_list_out = fp["/dc_nelf_list_out"][()]
nelf_diff_list = nelf_list_out - nelf_list_inp
nelf_list_mix = nelf_list_inp + mix_dc*nelf_diff_list
if avg_dc:
valup = np.sum(nelf_list_mix[:,0])/nelf_list_mix.shape[0]
valdn = np.sum(nelf_list_mix[:,1])/nelf_list_mix.shape[0]
nelf_list_mix = [[valup,valdn] for x in nelf_list_inp]
if ldc == 12:
if avg_dc:
dcv_err = np.sum(nelf_diff_list)/len(nelf_list_mix)
else:
dcv_err = np.max(np.abs(nelf_diff_list))
if '/dc_nelf_list' in f:
f["/dc_nelf_list"][()] = nelf_list_mix
else:
f["/dc_nelf_list"] = nelf_list_mix
fday.write(':ENERGY convergence: {}\n'.format(d_etot))
fday.write(':CHARGE convergence: {}\n'.format(d_rho))
fday.write(':VDC convergence: {}\n'.format(dcv_err))
return d_rho, d_etot, dcv_err
def processes_convert(so,updn):
if not file_exists('.processes'):
print('.processes file not present. It must be a serial run.')
return
lines = open('.processes').readlines()
work = {}
nkstart = 0
for line in lines:
data = line.split(':')
if data[0].strip().isdigit():
vecn = ["emmanuel" for i in range(6)]
i, nkp, nprc = map(int,data[::2])
if not so:
fdef = open('{}lapw1_{}.def'.format(updn,i), 'r')
for line in fdef:
data = line.split(',')
data0 = int(data[0])
if data0 == 10 or data0 == 11:
data0 = data0 % 10
m = re.search('.*[\'|\"](.*)_(\d+)', data[1])
assert m is not None, 'vector file to macth ' + \
' lapw1.def not found!'
vecn[data0*2] = '{}_{}'.format(m.group(1), m.group(2))
vecn[data0*2+1] = '{}dn_{}'.format(m.group(1), \
m.group(2))
fdef.close()
else:
fdef = open('{}lapwso_{}.def'.format(updn,i), 'r')
for line in fdef:
data = line.split(',')
if int(data[0])==42:
vecn[0]=data[1].split("'")[1]
elif int(data[0])==41:
vecn[1]=data[1].split("'")[1]
elif int(data[0])==52:
vecn[2]=data[1].split("'")[1]
elif int(data[0])==51:
vecn[3]=data[1].split("'")[1]
elif int(data[0])==46:
vecn[4]=data[1].split("'")[1]
elif int(data[0])==45:
vecn[5]=data[1].split("'")[1]
fdef.close()
if work.has_key(nprc):
work[nprc].append((i, nkp, nkstart, vecn))
else:
work[nprc]=[(i, nkp, nkstart, vecn)]
nkstart += nkp
for prc in sorted(work.keys()):
fo = open('_processes_{}'.format(prc-1), 'w')
for (i, nkp, nkstart, vecn) in work[prc]:
fo.write('{} {} {} "{}" "{}" "{}" "{}" "{}" "{}"\n'.format(\
i, nkp, nkstart, *vecn))
def create_gomp_file():
'''
Create GOMP.h5 file based on GMPI_X.h5 for openMP execution.
'''
with h5py.File('GMPI_0.h5', 'r') as f:
num_procs = f["/nprocs"][0]
nvec = 0
kvec1 = []
kvec2 = []
for iproc in range(num_procs):
with h5py.File('GMPI_' + str(iproc) + '.h5', 'r') as f:
nvec += f["/nvec"][0]
kvec = f["/KVEC"][()].T
kvec1.append(kvec[0])
if kvec.shape[1] == 2:
kvec2.append(kvec[1])
kvec = np.asarray(kvec1 + kvec2)
with h5py.File('GOMP.h5', 'w') as f:
f['/nvec'] = | np.asarray([nvec]) | numpy.asarray |
import sys
import numpy as np
import time
import grid2op
from lightsim2grid.LightSimBackend import LightSimBackend
# env = grid2op.make(track, backend=BACKEND(), reward_class=RedispReward)
start_time = time.time()
BACKEND = LightSimBackend
# track = "l2rpn_icaps_2021_small"
track = "l2rpn_neurips_2020_track1_small"
env = grid2op.make(track, backend=BACKEND())
actions = []
#################################################### from v6_top500_unitary_actions.npz -------->
action14 = env.action_space()
action14.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.8, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action14)
# ---- END OF ACTION ---
action17 = env.action_space()
action17.redispatch = np.array([0, 0, 0, 10.4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action17)
# ---- END OF ACTION ---
action24 = env.action_space()
action24.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action24)
# ---- END OF ACTION ---
action29 = env.action_space()
action29.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2.8, 0, 0])
actions.append(action29)
# ---- END OF ACTION ---
action32 = env.action_space()
action32.redispatch = np.array([0, 0, 0, -10.4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action32)
# ---- END OF ACTION ---
action34 = env.action_space()
action34.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.5, 0])
actions.append(action34)
# ---- END OF ACTION ---
action66 = env.action_space()
action66.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -8.5, 0])
actions.append(action66)
# ---- END OF ACTION ---
action77 = env.action_space()
action77.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2.8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action77)
# ---- END OF ACTION ---
action80 = env.action_space()
action80.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -9.9])
actions.append(action80)
# ---- END OF ACTION ---
action91 = env.action_space()
action91.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -4.3, 0, 0, 0, 0, 0])
actions.append(action91)
# ---- END OF ACTION ---
action112 = env.action_space()
action112.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.375, 0])
actions.append(action112)
# ---- END OF ACTION ---
action157 = env.action_space()
action157.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2.8, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action157)
# ---- END OF ACTION ---
action167 = env.action_space()
action167.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.25, 0])
actions.append(action167)
# ---- END OF ACTION ---
action169 = env.action_space()
action169.redispatch = np.array([0, 0, 0, 7.7999997, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action169)
# ---- END OF ACTION ---
action177 = env.action_space()
action177.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.3, 0, 0, 0, 0, 0])
actions.append(action177)
# ---- END OF ACTION ---
action219 = env.action_space()
action219.redispatch = np.array([0, 0, 0, 5.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action219)
# ---- END OF ACTION ---
action224 = env.action_space()
action224.redispatch = np.array([0, 0, 0, -7.7999997, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action224)
# ---- END OF ACTION ---
action233 = env.action_space()
action233.redispatch = np.array([0, 0, 0, -5.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action233)
# ---- END OF ACTION ---
action276 = env.action_space()
action276.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -7.4249997])
actions.append(action276)
# ---- END OF ACTION ---
action278 = env.action_space()
action278.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action278)
# ---- END OF ACTION ---
action303 = env.action_space()
action303.redispatch = np.array([0.35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action303)
# ---- END OF ACTION ---
action307 = env.action_space()
action307.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -6.375, 0])
actions.append(action307)
# ---- END OF ACTION ---
action383 = env.action_space()
action383.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.95])
actions.append(action383)
# ---- END OF ACTION ---
action389 = env.action_space()
action389.redispatch = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.4, 0, 0, 0, 0, 0, 0, 0, 0])
actions.append(action389)
# ---- END OF ACTION ---
action397 = env.action_space()
action397.redispatch = | np.array([0, 0, 0, -2.6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) | numpy.array |
from enum import Enum
import numpy as np
from PuzzleLib.Backend import gpuarray, Blas
from PuzzleLib.Backend.Kernels.MatVec import addVecToMat, addVecToMatBatch
from PuzzleLib.Variable import Variable
from PuzzleLib.Modules.Module import ModuleError, Module
class GroupMode(str, Enum):
full = "full"
one = "one"
class GroupLinear(Module):
def __init__(self, groups, insize, outsize, wscale=1.0, useW=True, useBias=True, initscheme=None,
inmode="full", wmode="full", batchDim=0, name=None, empty=False, transpW=False):
super().__init__(name)
self.registerBlueprint(locals())
if not(useW or useBias):
raise ModuleError("Not using W and bias is not supported")
self.transpW = transpW
self.useW = useW
self.useBias = useBias
self.inmode = GroupMode(inmode)
self.wmode = GroupMode(wmode)
if batchDim == 0:
self.format = "bgp"
elif batchDim == 1:
self.format = "gbp"
else:
raise ModuleError("Unsupported batch dimension")
self.groupDim = 1 if batchDim == 0 else 0
self.groups = 1 if groups is None else groups
self.W = None
self.b = None
if empty:
return
self.setupW(insize, outsize, initscheme, wscale)
self.setupBias(insize, outsize)
def setupW(self, insize, outsize, initscheme, wscale):
if not self.useW:
return
asize, bsize = (outsize, insize) if self.transpW else (insize, outsize)
groups = self.groups if self.wmode == GroupMode.full else 1
Wshape = (groups, asize, bsize)
W = self.createTensorWithScheme(initscheme, Wshape, wscale, factorShape=(asize, bsize))
W = gpuarray.empty(Wshape, dtype=np.float32) if W is None else gpuarray.to_gpu(W)
self.setVar("W", Variable(W))
def setupBias(self, insize, outsize):
if not self.useBias:
return
size = outsize if self.useW else insize
bshape = (self.groups, size) if self.wmode == GroupMode.full else (1, size)
self.setVar("b", Variable(gpuarray.zeros(bshape, dtype=np.float32)))
def updateData(self, data):
if self.useW:
self.data = Blas.mulTensorBatch(
data, self.W, formatA=self.format, formatB="gbp", transpB=self.transpW, formatOut=self.format
)
else:
self.data = gpuarray.copy(None, data)
if self.useBias:
if self.groupDim == 1:
outdata = self.data.reshape(self.data.shape[0], -1)
addVecToMat(self.b.ravel(), outdata, axis=1, out=outdata)
else:
addVecToMatBatch(self.b, self.data, axis=1, out=self.data)
def updateGrad(self, grad):
if self.useW:
formatOut = self.format if self.inmode == GroupMode.full else "gbp"
self.grad = Blas.mulTensorBatch(
grad, self.W, formatA=self.format, formatB="gbp", transpB=not self.transpW, formatOut=formatOut
)
if self.inmode != GroupMode.full:
self.grad = Blas.sumOnMatrix(self.grad.reshape(self.groups, grad.shape[0] * self.W.shape[1]))
self.grad = self.grad.reshape(grad.shape[0], 1, self.W.shape[1])
else:
self.grad = grad
def accGradParams(self, grad, scale=1.0, momentum=0.0):
if self.wmode == GroupMode.full:
if self.useW:
A, B = (grad, self.inData) if self. transpW else (self.inData, grad)
Blas.mulTensorBatch(
A, B, out=self.vars["W"].grad, formatA=self.format, formatB=self.format,
formatOut="gbp", transpA=True, alpha=scale, beta=momentum
)
if self.useBias:
Blas.sumOnTensorGroup(grad, out=self.vars["b"].grad, formatT=self.format)
else:
if self.useW:
A, B = (grad, self.inData) if self.transpW else (self.inData, grad)
wgrad = Blas.mulTensorBatch(
A, B, transpA=True, formatA=self.format, formatB=self.format, formatOut="gbp",
alpha=scale, beta=momentum
)
Blas.sumOnMatrix(wgrad.reshape(wgrad.shape[0], -1), out=self.vars["W"].grad.ravel())
if self.useBias:
Blas.sumOnMatrix(grad.reshape(grad.shape[0] * grad.shape[1], grad.shape[2]), out=self.vars["b"].grad[0])
def dataShapeFrom(self, shape):
groups = shape[self.groupDim] if self.inmode == GroupMode.full else self.groups
beg = (shape[0], groups) if self.groupDim == 1 else (groups, shape[1])
if self.useW:
return beg + (self.W.shape[1], ) if self.transpW else beg + (self.W.shape[2], )
else:
return beg + (shape[2], )
def checkDataShape(self, shape):
if len(shape) != 3:
raise ModuleError("Data must be 3d tensor")
if self.inmode == GroupMode.one and shape[1] != 1:
raise ModuleError("Expected 1 group in data, %d were given" % (shape[1]))
if self.inmode != GroupMode.one and self.wmode != GroupMode.one and shape[self.groupDim] != self.groups:
raise ModuleError("Expected %d groups in data, %d were given" % (self.groups, shape[self.groupDim]))
if self.useW:
if self.transpW and shape[2] != self.W.shape[2]:
raise ModuleError("Expected %d data dimensions, %d were given" % (self.W.shape[2], shape[2]))
elif shape[2] != self.W.shape[1]:
raise ModuleError("Expected %d data dimensions, %d were given" % (self.W.shape[1], shape[2]))
def gradShapeFrom(self, shape):
beg = (shape[0], self.groups) if self.groupDim == 1 else (self.groups, shape[1])
onebeg = (shape[0], 1) if self.groupDim == 1 else (1, shape[1])
if self.useW:
size = self.W.shape[2 if self.transpW else 1]
return beg + (size, ) if self.inmode == GroupMode.full else onebeg + (size, )
else:
return beg + (shape[2], ) if self.inmode == GroupMode.full else onebeg + (shape[2], )
def checkGradShape(self, shape):
if len(shape) != 3:
raise ModuleError("Grad must be 3d tensor")
if self.wmode == GroupMode.full and shape[self.groupDim] != self.groups:
raise ModuleError("Expected %d groups in grad, %d were given" % (self.groups, shape[self.groupDim]))
if self.useW:
if self.transpW and shape[2] != self.W.shape[1]:
raise ModuleError("Expected %d grad dimensions, %d were given" % (self.W.shape[1], shape[2]))
elif shape[2] != self.W.shape[2]:
raise ModuleError("Expected %d grad dimensions, %d were given" % (self.W.shape[2], shape[2]))
def unittest():
stdCalcTest()
oneInCalcTest()
oneWCalcTest()
batchDimTest()
trainTest()
def stdCalcTest():
groups, insize, outsize = 2, 5, 4
batchsize = 3
data = gpuarray.to_gpu(np.random.randn(batchsize, groups, insize).astype(np.float32))
grpLinear = GroupLinear(groups, insize, outsize)
grpLinear.b.fill(0.5)
grpLinear(data)
hostOutData = np.empty(grpLinear.data.shape, dtype=np.float32)
for i in range(groups):
hostOutData[:, i, :] = np.dot(data.get()[:, i, :], grpLinear.W.get()[i])
hostOutData += grpLinear.b.get()
assert np.allclose(hostOutData, grpLinear.data.get())
grad = gpuarray.to_gpu(np.random.randn(batchsize, groups, outsize).astype(np.float32))
grpLinear.backward(grad)
hostInGrad = np.empty(grpLinear.grad.shape, dtype=np.float32)
for i in range(groups):
hostInGrad[:, i, :] = np.dot(grad.get()[:, i, :], grpLinear.W.get()[i].T)
assert np.allclose(hostInGrad, grpLinear.grad.get())
hostWGrad = np.empty(grpLinear.W.shape, dtype=np.float32)
for i in range(groups):
hostWGrad[i] = np.dot(data.get()[:, i, :].T, grad.get()[:, i, :])
hostBGrad = np.empty(grpLinear.b.shape, dtype=np.float32)
for i in range(groups):
hostBGrad[i] = np.sum(grad.get()[:, i, :], axis=0)
assert np.allclose(hostWGrad, grpLinear.vars["W"].grad.get())
assert np.allclose(hostBGrad, grpLinear.vars["b"].grad.get())
def oneInCalcTest():
batchsize, insize, outsize = 4, 5, 3
groups = 4
data = gpuarray.to_gpu(np.random.randn(batchsize, 1, insize).astype(np.float32))
grpLinear = GroupLinear(groups, insize, outsize, inmode="one")
grpLinear(data)
hostOutData = np.empty(grpLinear.data.shape, dtype=np.float32)
for i in range(groups):
hostOutData[:, i, :] = np.dot(data.get()[:, 0, :], grpLinear.W.get()[i])
hostOutData += grpLinear.b.get()[np.newaxis, :, :]
assert np.allclose(hostOutData, grpLinear.data.get())
grad = gpuarray.to_gpu(np.random.randn(batchsize, groups, outsize).astype(np.float32))
grpLinear.backward(grad)
hostInGrad = np.zeros(data.shape, dtype=np.float32)
for i in range(groups):
hostInGrad[:, 0, :] += np.dot(grad.get()[:, i, :], grpLinear.W.get()[i].T)
assert np.allclose(hostInGrad, grpLinear.grad.get())
hostWGrad = np.empty(grpLinear.W.shape, dtype=np.float32)
for i in range(groups):
hostWGrad[i] = np.dot(data.get()[:, 0, :].T, grad.get()[:, i, :])
hostBGrad = np.empty(grpLinear.b.shape, dtype=np.float32)
for i in range(groups):
hostBGrad[i] = np.sum(grad.get()[:, i, :], axis=0)
assert np.allclose(hostWGrad, grpLinear.vars["W"].grad.get())
assert np.allclose(hostBGrad, grpLinear.vars["b"].grad.get())
def oneWCalcTest():
batchsize, insize, outsize = 4, 3, 4
groups = 3
data = gpuarray.to_gpu(np.random.randn(batchsize, groups, insize).astype(np.float32))
grpLinear = GroupLinear(None, insize, outsize, wmode="one")
grpLinear(data)
hostOutData = np.empty(grpLinear.data.shape, dtype=np.float32)
for i in range(groups):
hostOutData[:, i, :] = np.dot(data.get()[:, i, :], grpLinear.W.get()[0])
hostOutData += grpLinear.b.get()[np.newaxis, :, :]
assert np.allclose(hostOutData, grpLinear.data.get())
grad = gpuarray.to_gpu( | np.random.randn(batchsize, groups, outsize) | numpy.random.randn |
'''
This is a implementation of Quantum State Tomography for Qutrits,
using techniques of following papars.
'Iterative algorithm for reconstruction of entangled states(10.1103/PhysRevA.63.040303)'
'Diluted maximum-likelihood algorithm for quantum tomography(10.1103/PhysRevA.75.042108)'
'Qudit Quantum State Tomography(10.1103/PhysRevA.66.012303)'
'On-chip generation of high-dimensional entangled quantum states and their coherent control(Nature volume 546, pages622-626(2017))'
'''
import numpy as np
from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random
from scipy.linalg import sqrtm
from datetime import datetime
from concurrent import futures
import os
from pathlib import Path
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
"""
Definition of Three Frequency Bases:
fb1 = array([1, 0, 0])
fb2 = array([0, 1, 0])
fb3 = array([0, 0, 1])
"""
zero_base_array1 = zeros((1,3))
zero_base_array1[0][0] = 1
fb1 = zero_base_array1
zero_base_array2 = zeros((1,3))
zero_base_array2[0][1] = 1
fb2 = zero_base_array2
zero_base_array3 = zeros((1,3))
zero_base_array3[0][2] = 1
fb3 = zero_base_array3
""" Make Measurement Bases """
mb1 = (conjugate((fb1 + fb2).T) @ (fb1 + fb2)) / 2
mb2 = (conjugate((fb1 + fb3).T) @ (fb1 + fb3)) / 2
mb3 = (conjugate((fb2 + fb3).T) @ (fb2 + fb3)) / 2
mb4 = (conjugate((exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3)) * fb2).T) @ (exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3) * fb2))) / 2
mb5 = (conjugate((exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3)) * fb2).T) @ (exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3) * fb2))) / 2
mb6 = (conjugate((exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3)) * fb3).T) @ (exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3) * fb3))) / 2
mb7 = (conjugate((exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3)) * fb3).T) @ (exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3) * fb3))) / 2
mb8 = (conjugate((exp( 2*pi*1j/3) * fb2 + (exp(-2*pi*1j/3)) * fb3).T) @ ( | exp( 2*pi*1j/3) | numpy.exp |
import unittest
import os
import random
from itertools import groupby
import numpy as np
import pandas as pd
import datetime
from macrosynergy.panel.make_blacklist import make_blacklist
class TestAll(unittest.TestCase):
@staticmethod
def dataframe_concatenation(arr, cids, n_vals, n_cids):
tracker = 0
n_vals -= n_cids
for cid in cids[:-1]:
index = random.randint(1, n_vals)
arr[tracker:(tracker + index)] = np.repeat(cid, index)
tracker += index
n_vals -= index
residual = arr.size - tracker
arr[tracker:] = np.repeat(cids[-1], residual)
return arr
@staticmethod
def dates_generator(date, n_days):
start = datetime.datetime.strptime(date, "%Y-%m-%d")
dates = [start + datetime.timedelta(days=x) for x in range(0, n_days)]
return dates
def dataframe_generator(self):
cids = ['AUD', 'USD', 'GBP', 'CAD']
xcat = ['FXXR_NSA']
timestamps = 1000
sequence = 20
n_cids = len(cids)
value = [int(round(random.random())) for i in range(timestamps - sequence)]
value = [1 for i in range(sequence)] + value
n_vals = len(value)
cids_ = np.empty(n_vals, dtype=object)
cids_ = self.dataframe_concatenation(cids_, cids, n_vals, n_cids)
# Itertools groubpy Class is a heavily optimised algorithm written in
# high-performance C code by Enthought.
cids_list = [len(list(v)) for k, v in groupby(cids_)]
dates_l = []
for no in cids_list:
dates_l += self.dates_generator("2000-01-01", no)
category = np.repeat(xcat[0], timestamps)
data = np.column_stack((cids_, category, | np.array(dates_l) | numpy.array |
import dgl
from numpy.random.mtrand import seed
import torch
import numpy as np
import numba
from collections import defaultdict
if torch.__version__ < "1.7.0":
def rfft(input, n=None, dim=-1, norm=None):
# no image part
inp_dim = input.dim()
if dim < 0:
dim = inp_dim + dim
if n is not None:
diff = input.size(dim) - n
if diff > 0:
input = torch.split(input, dim=dim, split_size_or_sections=(n, diff))[0]
# else:
# sizes = tuple(input.size())
# padded = torch.zeros((sizes[:dim] + (-diff, ) + sizes[(dim+1):]), dtype=input.dtype, device=input.device)
# input = torch.cat([input, padded], dim=dim)
else:
n = input.size(dim) // 2 + 1
if norm is None or norm == "backward":
normalized = False
elif norm == "forward":
normalized = True
else:
raise ValueError
if dim != inp_dim - 1:
input = input.transpose(dim, inp_dim - 1)
output = torch.rfft(input, signal_ndim=1, normalized=normalized)
if dim != inp_dim - 1:
output = output.transpose(dim, inp_dim - 1)
return output
def irfft(input, n=None, dim=-1, norm=None):
# calculate the dimension of the input and regard the last as the (real, image)
inp_dim = input.dim()
if input.size(-1) != 2:
input = torch.stack([input, torch.zeros_like(input)], dim=-1)
else:
inp_dim -= 1
if dim < 0:
dim = inp_dim + dim
if n is not None:
diff = input.size(dim) - n
if diff > 0:
input = torch.split(input, dim=dim, split_size_or_sections=(n, diff))[0]
# else:
# sizes = tuple(input.size())
# padded = torch.zeros((sizes[:dim] + (-diff, ) + sizes[(dim+1):]), dtype=input.dtype, device=input.device)
# input = torch.cat([input, padded], dim=dim)
else:
n = 2 * (input.size(dim) - 1)
if norm is None or norm == "backward":
normalized = False
elif norm == "forward":
normalized = True
else:
raise ValueError
if dim != inp_dim - 1:
input = input.transpose(dim, inp_dim - 1)
output = torch.irfft(input, signal_ndim=1, normalized=normalized, signal_sizes=[n])
if dim != inp_dim - 1:
output = output.transpose(dim, inp_dim - 1)
return output
else:
def rfft(input, n=None, dim=None, norm=None):
return torch.view_as_real(torch.fft.rfft(input, n=n, dim=dim, norm=norm))
def irfft(input, n=None, dim=None, norm=None):
if not torch.is_complex(input) and input.size(-1) == 2:
input = torch.view_as_complex(input)
return torch.fft.irfft(input, n=n, dim=dim, norm=norm)
def complex_mul(re_x, im_x, re_y, im_y):
return (re_x * re_y - im_x * im_y), (im_x * re_y + re_x * im_y)
def complex_conj(re_x, im_x):
return re_x, -im_x
def uniform_choice_int(N, n):
return np.random.randint(0, N, size=(n,))
def uniform_choice(array, n):
index = np.random.randint(0, len(array), size=(n,))
return array[index]
@numba.jit(numba.int64[:](numba.int64[:], numba.int64), nopython=True)
def _get_enc_len(x, base=10):
lens = np.zeros((len(x), ), dtype=np.int64)
for i, n in enumerate(x):
cnt = 0
while n > 0:
n = n // base
cnt += 1
lens[i] = cnt
return lens
def get_enc_len(x, base=10):
if isinstance(x, int):
return _get_enc_len(np.array([x], dtype=np.int64), base)[0]
elif isinstance(x, float):
return _get_enc_len(np.array([int(x)], dtype=np.int64), base)[0]
if isinstance(x, torch.Tensor):
x = x.numpy()
elif not isinstance(x, np.ndarray):
x = np.array(x)
x = x.astype(np.int64)
x_shape = x.shape
return _get_enc_len(x.reshape(-1), base).reshape(*x_shape)
@numba.jit(
numba.int64[:, :](numba.int64[:], numba.int64, numba.int64),
nopython=True,
nogil=True
)
def _int2multihot(x, len_x, base):
rep = np.zeros((len(x), len_x * base), dtype=np.int64)
for i, n in enumerate(x):
n = n % base**len_x
idx = (len_x - 1) * base
while n:
rep[i, idx + n % base] = 1
n = n // base
idx -= base
while idx >= 0:
rep[i, idx] = 1
idx -= base
return rep
def int2multihot(x, len_x, base=10):
if isinstance(x, int):
return _int2multihot(np.array([x], dtype=np.int64), len_x, base)[0]
elif isinstance(x, float):
return _int2multihot(np.array([int(x)], dtype=np.int64), len_x, base)[0]
if isinstance(x, torch.Tensor):
x = x.numpy()
elif not isinstance(x, np.ndarray):
x = np.array(x)
x = x.astype(np.int64)
return _int2multihot(x, len_x, base)
def load_supervised(args, link, node, train_pool):
num_nodes, num_rels, train_data = 0, 0, []
train_indices = defaultdict(list)
with open(link, "r") as file:
for index, line in enumerate(file):
if index == 0:
num_nodes, num_rels = line[:-1].split(" ")
num_nodes, num_rels = int(num_nodes), int(num_rels)
print(f"#nodes: {num_nodes}, #relations: {num_rels}")
else:
line = np.array(line[:-1].split(" ")).astype(np.int64)
train_data.append(line)
if line[0] in train_pool:
train_indices[line[0]].append(index - 1)
if line[-1] in train_pool:
train_indices[line[-1]].append(index - 1)
if args.attributed == "True":
node_attri = {}
with open(node, "r") as file:
for line in file:
line = line[:-1].split("\t")
node_attri[int(line[0])] = np.array(line[1].split(",")).astype(np.float32)
return np.array(train_data), num_nodes, num_rels, train_indices, len(train_indices), np.array(
[node_attri[k] for k in range(len(node_attri))]
).astype(np.float32)
elif args.attributed == "False":
return np.array(train_data), num_nodes, num_rels, train_indices, len(train_indices), None
def load_label(train_label):
train_pool, train_labels, all_labels, multi = set(), {}, set(), False
with open(train_label, "r") as file:
for line in file:
node, label = line[:-1].split("\t")
node = int(node)
train_pool.add(node)
if multi or "," in label:
multi = True
label = np.array(label.split(",")).astype(np.int64)
for each in label:
all_labels.add(label)
train_labels[node] = label
else:
label = int(label)
train_labels[node] = label
all_labels.add(label)
return train_pool, train_labels, len(all_labels), multi
def load_unsupervised(args, link, node):
num_nodes, num_rels, train_data = 0, 0, []
with open(link, "r") as file:
for index, line in enumerate(file):
if index == 0:
num_nodes, num_rels = line[:-1].split(" ")
num_nodes, num_rels = int(num_nodes), int(num_rels)
print(f"#nodes: {num_nodes}, #relations: {num_rels}")
else:
line = np.array(line[:-1].split(" ")).astype(np.int64)
train_data.append(line)
if args.attributed == "True":
node_attri = {}
with open(node, "r") as file:
for line in file:
line = line[:-1].split("\t")
node_attri[int(line[0])] = np.array(line[1].split(",")).astype(np.float32)
return np.array(train_data), num_nodes, num_rels, np.array([node_attri[k] for k in range(len(node_attri))]
).astype(np.float32)
elif args.attributed == "False":
return np.array(train_data), num_nodes, num_rels, None
def save(args, embs, index=None):
with open(f"{args.output}", "w") as file:
file.write(str(args))
file.write("\n")
if index is None:
for n, emb in enumerate(embs):
file.write(f"{n}\t")
file.write(" ".join(emb.astype(str)))
file.write("\n")
else:
for n, emb in zip(index, embs):
file.write(f"{n}\t")
file.write(" ".join(emb.astype(str)))
file.write("\n")
return
#######################################################################
#
# Utility function for building training and testing graphs
#
#######################################################################
def get_adj_and_degrees(num_nodes, triplets):
""" Get adjacency list and degrees of the graph
"""
degrees = np.zeros(num_nodes).astype(np.int64)
for i, triplet in enumerate(triplets):
degrees[triplet[0]] += 1
degrees[triplet[2]] += 1
return degrees
def sample_subgraph_by_randomwalks(graph, seed_nodes, depth=2, width=10):
if isinstance(seed_nodes, torch.Tensor):
nodes = [seed_nodes]
seed_nodes = set(seed_nodes.numpy())
elif isinstance(seed_nodes, np.ndarray):
nodes = [torch.from_numpy(seed_nodes)]
seed_nodes = set(seed_nodes)
else:
nodes = [torch.tensor(seed_nodes)]
seed_nodes = set(seed_nodes)
for i in range(width - 1):
traces, types = dgl.sampling.random_walk(graph, nodes[0], length=depth)
nodes.append(dgl.sampling.pack_traces(traces, types)[0])
nodes = torch.unique(torch.cat(nodes))
subg = dgl.sampling.sample_neighbors(
graph, nodes, width, edge_dir="in", copy_ndata=True, copy_edata=True
)
# remove nodes
in_deg = subg.in_degrees().float()
out_deg = subg.out_degrees().float()
deg = in_deg + out_deg
del_nids = torch.LongTensor(sorted(set(subg.ndata[dgl.NID][deg == 0].numpy()) - seed_nodes))
subg.remove_nodes(del_nids)
# del deg and norm
for k in ["in_deg", "out_deg", "norm"]:
if k in subg.ndata:
subg.ndata.pop(k)
for k in ["in_deg", "out_deg", "norm"]:
if k in subg.edata:
subg.edata.pop(k)
return subg
def sample_subgraph_by_neighbors(graph, seed_nodes, depth=2, width=10):
if isinstance(seed_nodes, torch.Tensor):
nodes = seed_nodes
seed_nodes = set(seed_nodes.numpy())
elif isinstance(seed_nodes, np.ndarray):
nodes = torch.from_numpy(seed_nodes)
seed_nodes = set(seed_nodes)
else:
nodes = torch.tensor(seed_nodes)
seed_nodes = set(seed_nodes)
for i in range(depth - 1):
subg = dgl.sampling.sample_neighbors(
graph, nodes, width, edge_dir="in", copy_ndata=True, copy_edata=False
)
mask = (subg.ndata["out_deg"] > 0)
nodes = torch.unique(torch.cat([nodes, subg.ndata[dgl.NID][mask]]))
subg = dgl.sampling.sample_neighbors(
graph, nodes, width, edge_dir="in", copy_ndata=True, copy_edata=True
)
# remove nodes
in_deg = subg.in_degrees().float()
out_deg = subg.out_degrees().float()
deg = in_deg + out_deg
del_nids = torch.LongTensor(sorted(set(subg.ndata[dgl.NID][deg == 0].numpy()) - seed_nodes))
subg.remove_nodes(del_nids)
# del deg and norm
for k in ["in_deg", "out_deg", "norm"]:
if k in subg.ndata:
subg.ndata.pop(k)
for k in ["in_deg", "out_deg", "norm"]:
if k in subg.edata:
subg.edata.pop(k)
return subg
def generate_sampled_graph_and_labels_supervised(
graph,
edges,
sampler,
sample_depth,
sample_width,
split_size,
train_indices,
train_labels,
multi,
nlabel,
ntrain,
if_train=True,
label_batch_size=512,
batch_index=0
):
"""Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
labeled_samples, sampled_nodes = labeled_edges_sampling(edges, train_indices, ntrain, if_train, label_batch_size, batch_index)
seed_nodes = np.unique(np.concatenate([edges[:, 0], edges[:, 2], labeled_samples[:, 0], labeled_samples[:, 2]]))
if multi:
matched_labels, matched_index = correct_order_multi(seed_nodes, sampled_nodes, train_labels, nlabel)
else:
matched_labels, matched_index = correct_order_single(seed_nodes, sampled_nodes, train_labels)
seed_nodes = np.unique(np.concatenate([edges[:, 0], edges[:, 2], labeled_samples[:, 0], labeled_samples[:, 2]]))
if sampler == "neighbor":
subg = sample_subgraph_by_neighbors(graph, torch.from_numpy(seed_nodes), sample_depth, sample_width)
elif sampler == "randomwalk":
subg = sample_subgraph_by_randomwalks(graph, torch.from_numpy(seed_nodes), sample_depth, sample_width)
samples = np.concatenate([edges, labeled_samples])
samples[:, 0] = convert_subgraph_nids(samples[:, 0], subg.ndata[dgl.NID].numpy())
samples[:, 2] = convert_subgraph_nids(samples[:, 2], subg.ndata[dgl.NID].numpy())
# randomly delete edges from subgraphs
if split_size < 1.0:
del_eids = np.unique(uniform_choice_int(subg.number_of_edges(), int(subg.number_of_edges() * (1 - split_size))))
subg.remove_edges(del_eids)
return subg, samples, matched_labels, matched_index
def generate_sampled_graph_and_labels_unsupervised(
graph,
edges,
sampler,
sample_depth,
sample_width,
split_size,
negative_rate
):
"""Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
# negative sampling
neg_samples = negative_sampling(edges, graph.number_of_nodes(), negative_rate)
seed_nodes = np.unique(np.concatenate([edges[:, 0], edges[:, 2], neg_samples[:, 0], neg_samples[:, 2]]))
if sampler == "neighbor":
subg = sample_subgraph_by_neighbors(graph, torch.from_numpy(seed_nodes), sample_depth, sample_width)
elif sampler == "randomwalk":
subg = sample_subgraph_by_randomwalks(graph, torch.from_numpy(seed_nodes), sample_depth, sample_width)
samples = np.concatenate([edges, neg_samples])
samples[:, 0] = convert_subgraph_nids(samples[:, 0], subg.ndata[dgl.NID].numpy())
samples[:, 2] = convert_subgraph_nids(samples[:, 2], subg.ndata[dgl.NID].numpy())
# randomly delete edges from subgraphs
if split_size < 1.0:
del_eids = np.unique(uniform_choice_int(subg.number_of_edges(), int(subg.number_of_edges() * (1 - split_size))))
subg.remove_edges(del_eids)
labels = np.zeros((len(samples)), dtype=np.float32)
labels[:len(edges)].fill(1.0)
return subg, samples, labels
def compute_edgenorm(g, norm="in"):
if "in_deg" not in g.ndata:
g.ndata["in_deg"] = g.in_degrees()
if "out_deg" not in g.ndata:
g.ndata["out_deg"] = g.out_degrees()
in_deg = g.ndata["in_deg"].float()
out_deg = g.ndata["out_deg"].float()
u, v = g.all_edges(form="uv", order="eid")
if norm == "in":
norm = in_deg[v].reciprocal().unsqueeze(-1)
elif norm == "out":
norm = out_deg[u].reciprocal().unsqueeze(-1)
elif norm == "both":
norm = torch.pow(out_deg[u] * in_deg[v], 0.5).reciprocal().unsqueeze(-1)
norm.masked_fill_(torch.isnan(norm), norm.min())
norm.masked_fill_(torch.isinf(norm), norm.min())
return norm
def compute_largest_eigenvalues(g):
if "in_deg" not in g.ndata:
g.ndata["in_deg"] = g.in_degrees()
if "out_deg" not in g.ndata:
g.ndata["out_deg"] = g.out_degrees()
u, v = g.all_edges(form="uv", order="eid")
in_deg = g.ndata["in_deg"].float()
out_deg = g.ndata["out_deg"].float()
max_nd = (out_deg[u] + in_deg[v]).max()
max_ed = out_deg.max() + in_deg.max()
node_eigenv = max_nd
edge_eigenv = max_ed
return node_eigenv, edge_eigenv
def build_graph_from_triplets(num_nodes, num_rels, triplets):
""" Create a DGL graph. The graph is bidirectional because RGCN authors
use reversed relations.
This function also generates edge type and normalization factor
(reciprocal of node incoming degree)
"""
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
triplets = triplets.copy()
triplets.view(
[("src", triplets.dtype), ("rel", triplets.dtype), ("dst", triplets.dtype)]
).sort(axis=0, order=["src", "dst", "rel"])
g.add_edges(triplets[:, 0], triplets[:, 2])
g.add_edges(triplets[:, 2], triplets[:, 0])
rel = np.concatenate([triplets[:, 1], triplets[:, 1] + num_rels])
g.edata["type"] = torch.from_numpy(rel).long()
g.edata["norm"] = compute_edgenorm(g)
return g
def labeled_edges_sampling(edges, train_indices, ntrain, if_train, label_batch_size, batch_index=0):
if if_train:
sampled_index = set(uniform_choice_int(ntrain, label_batch_size))
else:
sampled_index = set(
np.arange(batch_index * label_batch_size, min(ntrain, (batch_index + 1) * label_batch_size))
)
new_edges, sampled_nodes = [], set()
for index, (labeled_node, node_edges) in enumerate(train_indices.items()):
if index in sampled_index:
sampled_nodes.add(labeled_node)
new_edges.append(np.array(node_edges))
new_edges = np.unique(np.concatenate(new_edges))
return new_edges, sampled_nodes
@numba.jit(
nopython=True
)
def correct_order_single(node_id, sampled_nodes, train_labels):
matched_labels, matched_index = [], []
for index, each in enumerate(node_id):
if each in sampled_nodes:
matched_labels.append(train_labels[each])
matched_index.append(index)
return np.array(matched_labels), np.array(matched_index)
@numba.jit(
nopython=True
)
def correct_order_multi(node_id, sampled_nodes, train_labels, nlabel):
matched_labels, matched_index = [], []
for index, each in enumerate(node_id):
if each in sampled_nodes:
curr_label = np.zeros(nlabel, dtype=np.int64)
curr_label[train_labels[each]] = 1
matched_labels.append(curr_label)
matched_index.append(index)
return np.array(matched_labels), | np.array(matched_index) | numpy.array |
'''
Camera
1. camera model. 3d vertices project to 2d
2. estimate camera matrix. 2d points and corresponding 3d --> pose parameter
'''
import numpy as np
import math
from math import cos, sin
'''
Camera Models (chapter 6. in MVGCV)
Mapping between the 3D world(object space) and a 2D image.
Given: 3d points, Camera Matrix
Return: projected 2d points.
X: 3x1 for 3d coordinates. 4x1 for homogeneous coordinates.(just add 1)
x: 2x1 for 2d coordinates. 3x1 for homogeneous coordinates.
General projective camera:
x = PX, P = [M | p4]
P: 3x4 homogeneous camera projection matrix.
dof = 11
det(M)
!=0 ,non-sigular: finite projective camera.
=0, sigular: camera at infinity.
http://campar.in.tum.de/twiki/pub/Chair/TeachingWs09Cv2/3D_CV2_WS_2009_Reminder_Cameras.pdf
--------------
Models
1. Finite project camera
P = [M | -MC]
rank(M) = 3
M is non-sigular
P = K[R | -RC] = K[R | t]
K = [fx s px
0 fy py
0 0 1]
s: skew parameter
K: intrinsic camera parameters. 5 dof
R, C: extrinsic camera paramererss, each 3 dof
dof = 11
* CCD camera: (when s = 0. no skew.)
P = K[R | -RC] = K[R | t]
K = [fx 0 px
0 fy py
0 0 1]
aspect ratio = fy/fx
dof = 10
* Basic pinhole model: (when s=0, aspect ratio=1--> fy = fx)
P = K[R | -RC] = K[R | t]
K = [f 0 px
0 f py
0 0 1]
dof = 9
2. Camera at infinity
P = [M|t]
M is sigular
- Affine cameras: (!! the most important in practice)
P: the last row P^(3T) is of the form [0, 0, 0, 1]
(then points at infinity are mapped to points at infinity.
My understanding: infinite points:[X,Y,Z,0] --> PX = [x, y ,0] also infinite points)
Conditions(when the affine camera can approminate the true camera):
(i) The depth relief is small compared to the average depth(d0)
(ii) The distance of the point from the principal ray is small
P = [m11 m12 m13 t1
m21 m22 m23 t2
0 0 0 1]
= [M23 t
0 1]
= [M|t]
= K[R|t]
rank(M) = 2
M: last row zero.
K = [fx s 0
0 fy 0
0 0 1]
R = [r11 r12 r13
r21 r22 r23
0 0 0]
t = [t1
t2
1]
dof = 8
* Weak perspective projection:
P = K[R|t] = [M|t]
M: last row zero. the first two rows orthogonal.
K = [fx 0 0
0 fy 0
0 0 1]
dof = 7
!!!!!!
* Scaled orthographic projection:
P = K[R|t] = [M|t]
M: last row zero. the first two rows orthogonal and of equal norm.
K = [f 0 0
0 f 0
0 0 1]
dof = 6
Or for inhomogeneous coordinates:
x = MX + t2d
(M: the rows are the scalings of rows of a rotation matrix)
x = sP*R*X + t2d
P = [1 0 0
0 1 0]
---------------
Relationship between finite projective camera and orthographic camera
P_orthog = P_proj * H
H = [1 0 0 0
0 1 0 0
0 0 0 1]
http://web.eecs.umich.edu/~silvio/teaching/EECS442_2010/lectures/lecture10.pdf
-------------------
Decomposition of camera matrix
1. Finding the camera center
For finite cameras and cameras at infinity
PC = 0
2. Finding the camera orientation and internal parameters
For a finite camera:
P = [M | -MC] = K[R | -RC] = K[R | t]
K: internal parameters. an upper-trangular matrix
R: canmera orientation. an orthogonal matrix.
So,
decomposing M as M = KR using RQ-decomposition
'''
## For 3D Morphable Models
## often using scaled orthographic projection(SOP)
## which has 6 parameters: s(scale, 1) R(rotation matrix, 3) t2d(2D translation, 2)
## Actually, this can be seen as
## a similarity transform in 3d space (see table 3.2, which has 7 dof)
## then maps parallel world lines to parallel image lines.
## because we don't use the depth in 2d image, so ty can be ignored.
## then also has the same 6 paramters.
def initial_sRt(rx = 0, ry = 0, rz = 0):
'''
'''
s = 6e-04
R = angle2matrix(rx, ry, rz)
t2d = [66, 66]
return s, R, t2d
def initial_pp(rx = 0, ry = 0, rz = 0):
'''
'''
s = 6e-04
t2d = [66, 66]
pp = np.array([s, rx, ry, rz, t2d[0], t2d[1]])[:, np.newaxis]
return pp
def scaled_orthog_project(vertices, s, R, t2d, isKeepZ = False):
''' scaled orthographic projection
assumes: variations in depth over the object is small relative to the mean distance from camera to object
Args:
vertices: (3,nver).
s: (1,).
R: (3,3).
t2d: (2,).
isKeepZ
Returns:
projected_vertices: (2,nver)
'''
nver = vertices.shape[1]
t2d = np.squeeze(np.array(t2d))
if isKeepZ:
t3d = np.ones(3)
t3d[:2] = t2d
projected_vertices = s * R.dot(vertices) + np.tile(t3d[:, np.newaxis], [1, nver])
else:
P = | np.array([[1, 0, 0], [0, 1, 0]]) | numpy.array |
"""
This module contains all functions that are used the load the data.
Todo:
* Clean the code.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
Format for data loaders:
p, x, h, n_full, cate_name
"""
import numpy as np
import scipy as sp
import pickle
from scipy import stats
from scipy.stats import ttest_ind
import matplotlib.pyplot as plt
import adafdr
from adafdr.util import *
from matplotlib import mlab
from adafdr.method import *
import logging
# External datasets
def data_airway():
file_path = adafdr.__path__[0]
file_name = file_path + '/data/airway'
X = np.loadtxt(file_name,skiprows=0,delimiter=',')
x=X[:,2].reshape([-1,1])
p=X[:,0]
return p, x
def data_bottomly():
file_path = adafdr.__path__[0]
file_name = file_path + '/data/bottomly'
X = np.loadtxt(file_name,skiprows=0,delimiter=',')
x=X[:,2].reshape([-1,1])
p=X[:,0]
return p, x
def data_pasilla():
file_path = adafdr.__path__[0]
file_name = file_path + '/data/pasilla'
X = np.loadtxt(file_name,skiprows=0,delimiter=',')
x=X[:,2].reshape([-1,1])
p=X[:,0]
return p, x
def data_small_gtex():
# Hard-coded information of the GTEx dataset.
cate_name = {3: {1: 'TssA', 2: 'TssAFlnk', 3: 'TxFlnk', 4: 'Tx',
5: 'TxWk', 6: 'EnhG', 7: 'Enh', 8: 'ZNF/Rpts',
9: 'Het', 10: 'TssBiv', 11: 'BivFlnk', 12: 'EnhBiv',
13: 'ReprPC', 14: 'ReprPCWk', 15: 'Quies'}}
n_full = 172353475
fname = 'GTEx_small.pickle'
file_path = adafdr.__path__[0]
fname = file_path + '/data/' + fname
with open(fname, 'rb') as handle:
p = pickle.load(handle)
x = pickle.load(handle)
cis_name = pickle.load(handle)
return p, x, n_full, cate_name, cis_name
def data_small_gtex_chr21(opt='Adipose_Subcutaneous'):
np.random.seed(0)
# Hard-coded information of the GTEx dataset.
cate_name = {3: {1: 'TssA', 2: 'TssAFlnk', 3: 'TxFlnk', 4: 'Tx',
5: 'TxWk', 6: 'EnhG', 7: 'Enh', 8: 'ZNF/Rpts',
9: 'Het', 10: 'TssBiv', 11: 'BivFlnk', 12: 'EnhBiv',
13: 'ReprPC', 14: 'ReprPCWk', 15: 'Quies'}}
file_path = adafdr.__path__[0]
file_name = file_path + '/data/%s_chr21_300k'%opt
temp_data = np.loadtxt(file_name, dtype=str, delimiter=',')
p = temp_data[:, 0].astype(float)
cis_name = temp_data[:, 1]
x = temp_data[:, 2:].astype(float)
x[:, 0] = np.log10(x[:, 0]+0.5) + np.random.rand(x.shape[0])*1e-8
return p, x, cate_name, cis_name
## generating the 1d toy example
def toy_data_1d(job_id=0,n_sample=10000,vis=0):
def pi1_gen(x): # need to be fixed
pi1=0.03*sp.stats.norm.pdf(x,loc=0.2,scale=0.05)+0.04*sp.stats.norm.pdf(x,loc=0.8,scale=0.05)
pi1+=0.15*x
return pi1
def plot_pi1_1d(pi1_gen):
x_grid = np.linspace(0,1,100)
pi1_grid = pi1_gen(x_grid)
plt.plot(x_grid,pi1_grid)
plt.xlabel('covariate')
plt.ylabel('alt distribution')
plt.title('the alternative distribution')
np.random.seed(42)
if job_id == 0: # Gaussian mixtures
x = np.random.uniform(0,1,size=n_sample)
pi1 = pi1_gen(x)
p = np.zeros(n_sample)
# generating the hypothesis
h = np.array((np.random.uniform(size=n_sample)<pi1),dtype=int)
n0 = np.sum(h==0)
n1 = np.sum(h==1)
# generating the p-values
p[h==0] = np.random.uniform(size=n0)
p[h==1] = np.random.beta(a=0.4,b=4,size=n1)
#plt.figure()
#plt.hist(p[h==1],bins=100)
#plt.show()
#print(np.mean(p[h==1]))
if vis == 1:
print('### Summary ###')
print('# null: %s, # alt: %s:, null proportion: %s'%(str(np.sum(h==0)),str(np.sum(h==1)),str(np.sum(h==0)/h.shape[0])))
plt.figure(figsize=[16,5])
plt.subplot(121)
plot_pi1_1d(pi1_gen)
plt.subplot(122)
plot_data_1d(p,x,h)
plt.legend()
plt.show()
return p,x,h
def write_simulation_data(p, x, h, filename):
"""Write the simulation data with format:
p, h, x0, x1, x2, ... for the columns
Args:
p ((n,) ndarray): The p-value.
x ((n,d) ndarray): The covaraites.
h ((n,) boolean ndarray): The ground truth. True indicates the
hypothesis is alternative.
filename (str): path of the file.
Returns:
"""
temp_data = np.zeros([x.shape[0], x.shape[1]+2], dtype=float)
temp_data[:, 0] = p
temp_data[:, 1] = h
temp_data[:, 2:] = x
np.savetxt(filename, temp_data, delimiter=",")
return
def load_simulation_data(filename):
"""Load the simulation data with format:
p, h, x0, x1, x2, ... for the columns
Args:
filename (str): path of the file.
Returns:
p ((n,) ndarray): The p-value.
x ((n,d) ndarray): The covaraites.
h ((n,) boolean ndarray): The ground truth. True indicates the
hypothesis is alternative.
"""
temp_data = np.loadtxt(filename, delimiter=',')
p = temp_data[:, 0].astype(float)
h = temp_data[:, 1].astype(bool)
x = temp_data[:, 2:].astype(float)
return p, x, h
def load_x_mixture(opt=0):
"""Generate a mixture data (of x) to test mixture_fit.
Args:
opt (int): 0: 2d slope.
1: 2d bump.
2: 2d slope+bump.
3: 10d data with slope+bump in the first 2d.
Returns:
x ((n,d) ndarray): The mixture data
param (list): Parameters that are used to generate the data.
"""
n_sample = 10000
if opt==0:
a = np.array([2,0],dtype=float)
x_grid = get_grid_2d(101)
n_grid = x_grid.shape[0]
p = f_slope(x_grid,a)
p /= p.sum()
x = np.random.choice(np.arange(n_grid),size=n_sample,p=p)
x = x_grid[x,:]
param = a
elif opt==1:
mu = np.array([0.5,0.05],dtype=float)
sigma = np.array([0.1,0.1],dtype=float)
x_grid = get_grid_2d(101)
n_grid = x_grid.shape[0]
p = f_bump(x_grid,mu,sigma)
p /= p.sum()
x = np.random.choice(np.arange(n_grid),size=n_sample,p=p)
x = x_grid[x,:]
param = (mu,sigma)
elif opt==2:
w = np.array([0.4,0.3,0.3],dtype=float)
a = np.array([2,0],dtype=float)
mu = np.array([[0.2,0.2],[0.7,0.7]],dtype=float)
sigma = np.array([[0.1,0.2],[0.1,0.1]],dtype=float)
x_grid = get_grid_2d(101)
n_grid = x_grid.shape[0]
p = f_all(x_grid,a,mu,sigma,w)
p /= p.sum()
x = np.random.choice(np.arange(n_grid),size=n_sample,p=p)
x = x_grid[x,:]
param = (a,mu,sigma,w)
elif opt==3:
w = np.array([0.4,0.3,0.3],dtype=float)
a = np.array([2,0],dtype=float)
mu = np.array([[0.2,0.2],[0.7,0.7]],dtype=float)
sigma = np.array([[0.1,0.2],[0.1,0.1]],dtype=float)
x_grid = get_grid_2d(101)
n_grid = x_grid.shape[0]
p = f_all(x_grid,a,mu,sigma,w)
p /= p.sum()
x = np.random.choice(np.arange(n_grid),size=n_sample,p=p)
x = x_grid[x,:]
a_ = np.zeros(10)
a_[0:2] = a
mu_ = np.zeros([2,10],dtype=float)+0.5
mu_[:,0:2] = mu
sigma_ = np.ones([2,10],dtype=float)
sigma_[:,0:2] = sigma
param = (a_,mu_,sigma_,w)
x_noise = np.random.uniform(high=1,low=0,size = (n_sample,8))
x = np.concatenate([x,x_noise],1)
else:
pass
return x,param
def load_1d_bump_slope(n_sample=20000, n_dim=2, random_state=0):
"""Generate a 1d simulated data.
Args:
n_sample (int): The number of hypotheses.
n_dim (int): The number of dimensions. If n_dim>2, the rest of dimensions
contains uninformative features.
random_state (int): The random seed
Returns:
p ((n,) ndarray): The p-value.
x ((n,d) ndarray): The covaraites.
h ((n,) boolean ndarray): The ground truth. True indicates the
hypothesis is alternative.
n_full (int): The number of hypotheses before filtering. Same as
n if no filtering is applied.
cate_name (dic of dics): (key,val) gives the (feature index, cate_name_dic) for
discrete features. For each discrete feature, the (key,val) of the sub dic
gives the (val,name) for all categories.
"""
np.random.seed(random_state)
# Generate pi1
x_grid = get_grid_1d(101)
x = np.random.choice(np.arange(x_grid.shape[0]), size=n_sample)
x = x_grid[x,:]
w = np.array([0.5,0.25,0.25],dtype=float)
a = np.array([0.5],dtype=float)
mu = np.array([[0.25], [0.75]],dtype=float)
sigma = np.array([[0.05], [0.05]],dtype=float)
pi1 = (0.1*f_all(x,a,mu,sigma,w)).clip(max=1)
# Generate data
p = np.zeros(n_sample)
h = np.zeros(n_sample, dtype=bool)
rnd = np.random.uniform(size=n_sample)
p[rnd>=pi1] = np.random.uniform(size=np.sum(rnd>=pi1))
p[rnd<pi1] = np.random.beta(a=0.3, b=4, size=np.sum(rnd<pi1))
h[rnd<pi1] = True
# Add non-informative dimensions.
if n_dim>1:
x_noise = np.random.uniform(size=(n_sample, n_dim-2))
x = np.concatenate([x,x_noise],1)
return p,x,h,p.shape[0],{}
def load_2d_bump_slope(n_sample=20000, n_dim=2, random_state=0):
"""Generate a simulated data.
Args:
n_sample (int): The number of hypotheses.
n_dim (int): The number of dimensions. If n_dim>2, the rest of dimensions
contains uninformative features.
random_state (int): The random seed
Returns:
p ((n,) ndarray): The p-value.
x ((n,d) ndarray): The covaraites.
h ((n,) boolean ndarray): The ground truth. True indicates the
hypothesis is alternative.
n_full (int): The number of hypotheses before filtering. Same as
n if no filtering is applied.
cate_name (dic of dics): (key,val) gives the (feature index, cate_name_dic) for
discrete features. For each discrete feature, the (key,val) of the sub dic
gives the (val,name) for all categories.
"""
np.random.seed(random_state)
# Generate pi1
x_grid = get_grid_2d(101)
x = np.random.choice(np.arange(x_grid.shape[0]),size=n_sample)
x = x_grid[x,:]
w = np.array([0.5,0.25,0.25],dtype=float)
a = np.array([0.5,0.5],dtype=float)
mu = np.array([[0.25,0.25],[0.75,0.75]],dtype=float)
sigma = np.array([[0.1,0.1],[0.1,0.1]],dtype=float)
pi1 = (0.1*f_all(x,a,mu,sigma,w)).clip(max=1)
# Generate data
p = np.zeros(n_sample)
h = np.zeros(n_sample, dtype=bool)
rnd = np.random.uniform(size=n_sample)
p[rnd>=pi1] = np.random.uniform(size=np.sum(rnd>=pi1))
p[rnd<pi1] = np.random.beta(a=0.3, b=4, size=np.sum(rnd<pi1))
h[rnd<pi1] = True
# Add non-informative dimensions.
if n_dim>2:
x_noise = np.random.uniform(size=(n_sample, n_dim-2))
x = np.concatenate([x,x_noise],1)
return p,x,h,p.shape[0],{}
def load_data_ihw(random_state=0):
"""data from ihw supp 4.2.2
"""
np.random.seed(random_state)
n_sample = 20000
n_alt = int(20000*0.1)
h = np.zeros([n_sample], dtype=int)
h[0:n_alt] = 1
data_case = np.random.randn(5, n_sample) + h*2
data_control = np.random.randn(5, n_sample)
p = ttest_ind(data_case, data_control)[1]
data_pool = | np.concatenate([data_case, data_control], axis=0) | numpy.concatenate |
import os
import csv
import glob
import numpy as np
import collections
import pandas as pd
# Hold the prices for the small data frame
Prices = collections.namedtuple(
"Prices", field_names=["open", "high", "low", "close", "volume"]
)
# Hold the prices for the large data frame
PricesL = collections.namedtuple(
"Prices",
field_names=[
"open",
"high",
"low",
"close",
"volume",
"vwap",
"histogram",
"macd",
"signal",
"rsi",
"bbands",
"ma10",
"ma20",
"ma50",
],
)
# Helper function for creating the Prices-tuple
def get_tuple_from_df(df, large=False):
if large:
for i in range(10, 20, 50):
df[f"ma{i}"] = df.rolling(window=i)["close"].mean()
df = df.dropna()
return PricesL(
open=np.array(df["open"]),
high= | np.array(df["high"]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import random
from scipy.signal import convolve2d
import math
from scipy import misc
import os
from PIL import Image
from numba import jit
import cv2
import scipy.misc
np.set_printoptions(threshold=np.inf)
def S_UNIWARD(coverPath, payload):
sgm = 1
## Get 2D wavelet filters - Daubechies 8
# 1D high pass decomposition filter
hpdf_list = [-0.0544158422, 0.3128715909, -0.6756307363, 0.5853546837, 0.0158291053,
-0.2840155430, -0.0004724846, 0.1287474266, 0.0173693010, -0.0440882539,
- 0.0139810279, 0.0087460940, 0.0048703530, -0.0003917404, -0.0006754494, -0.0001174768]
# 1D low pass decomposition filter
hpdf_len = range(0, len(hpdf_list))
hpdf_list_reverse = hpdf_list[::-1]
lpdf_list = hpdf_list
for i in range(len(hpdf_list)):
lpdf_list[i] = ((-1) ** hpdf_len[i]) * hpdf_list_reverse[i]
hpdf_array = np.array([hpdf_list])
lpdf_array = | np.array([lpdf_list]) | numpy.array |
__author__ = 'Chronis'
from pySLM.definitions import SLM
import numpy as np
from tkinter import _setit
import PIL
from astropy.io import fits
import pygame, os, time, pickle
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import threading
import matplotlib.image as mplimg
from matplotlib.colors import Normalize
from matplotlib import cm
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
def cart2pol(x,y):
"""
Takes cartesian (2D) coordinates and transforms them into polar.
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return (rho, phi)
class dummyClass:
def __init__(self):
print('Dummy class')
self.maps = {'zero': np.zeros((1024, 768, 3))}
self.SLM_type = 'None'
self.pixelSize = 8
self.dimensions = (1024, 768, 3)
self.width = 1024
self.height = 768
self.size = (1024, 768)
class StdoutRedirector(object):
"""
Redirects all stdout to this object which then can be embeded into a text widget
"""
def __init__(self, text_widget):
self.text_space = text_widget
def write(self, string):
self.text_space.insert('end', string)
self.text_space.see('end')
def flush(self):
pass
def array2PIL(arr, size):
mode = 'RGBA'
arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])
if len(arr[0]) == 3:
arr = np.c_[arr, 255*np.ones((len(arr), 1), np.uint8)]
return PIL.Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)
class DropMenu:
"""
DropMenu is a widget that will contain various functionalities of a menu
"""
def __init__(self, master, window):
# Create dropdown menu
self.path = os.getcwd()
self.window = window
self.master = master
self.menu = Menu(self.master)
self.master.config(menu=self.menu)
# File Option************************************************
self.FileMenu = Menu(self.menu)
self.menu.add_cascade(label='File', menu=self.FileMenu)
self.FileMenu.add_command(label='Open phase map')
self.FileMenu.add_command(label='Save as FITS', command=lambda: self.save_fits())
self.FileMenu.add_command(label='Save weighting function')
self.FileMenu.add_separator()
self.FileMenu.add_command(label='Exit', command=self._quit)
# Settings option***********************************************
self.SettingsMenu = Menu(self.menu)
self.menu.add_cascade(label='Settings', menu=self.SettingsMenu)
self.SettingsMenu.add_command(label='Calibration curve', command=self.calibration_callback)
self.SettingsMenu.add_command(label='Star info', command=self.star_info_callback)
# Tools option**************************************************
self.ToolMenu = Menu(self.menu)
self.menu.add_cascade(label='Tools', menu=self.ToolMenu)
self.ToolMenu.add_command(label='Count')
self.ToolMenu.add_command(label='Histogram')
# Help option ********************************************
self.HelpMenu = Menu(self.menu)
self.menu.add_cascade(label='Help', menu=self.HelpMenu)
self.HelpMenu.add_command(label='Documentation')
self.HelpMenu.add_command(label='App Help')
# Variables **********************************************
try:
self.menu_data = pickle.load(open("SLM_data.p", 'rb'))
self.phase_curve = self.menu_data['phase curve']
except:
file = filedialog.askopenfilename(title="Select phase curve(.npy)")
phase = np.load(file)
self.menu_data = {'phase curve': phase}
self.phase_curve = phase
pickle.dump(self.menu_data, open("SLM_data.p", 'wb'))
# take data point from phase curve and fit a polynomial such that each phase shift value in radians
# corresponds to a gray value. The inverse case gray->rad will just takes these data points
p = np.polyfit(self.phase_curve, np.arange(0, 256), deg=3)
self.rad_2_gray = np.poly1d(p)
# size of SLM pixel in microns (um)
self.slm_pxl = StringVar()
# variables for SLM characteristics and system setup used in Multiple stars
self.slm_pxl.set('36')
self.intensity = StringVar()
self.wavelength = StringVar()
self.Fnum = StringVar()
self.lD = StringVar()
self.lD.set('4')
def star_info_callback(self):
"""
Contains info about the optical bench and SLM
:return:
"""
toplevel_r = Toplevel()
toplevel_r.title('Star info')
toplevel_r.geometry("400x150+300+300")
toplevel = ttk.Frame(toplevel_r)
toplevel.grid(column=0, row=0, sticky=(N, W, E, S))
self.wavelength.set('633')
wavelength_entry = Entry(toplevel, textvariable=self.wavelength,justify='center')
wavelength_lab = Label(toplevel, text='Wavelength (nm):')
self.Fnum.set('230')
Fnum_entry = Entry(toplevel, textvariable=self.Fnum, justify='center')
Fnum_lab = Label(toplevel, text='F # :')
self.intensity.set('1')
intensity_entry = Entry(toplevel, textvariable=self.intensity, justify='center')
intensity_lab = Label(toplevel, text='Intensity :')
"""As discussed, here are the correct parameters for the coordinates conversion in the SLM plane :
F# = 230
Pixel_size = 36 um
The spot size in the SLM plane right now is lambda*F# ~ 145 um ~ 4 pixels.
"""
slm_pxl_lab = Label(toplevel, text='SLM pixel size (um):', justify='center')
slm_pxl_entry = Entry(toplevel, textvariable=self.slm_pxl)
lD_lab = Label(toplevel, text='#pixels per l/D:')
lD_entry = Entry(toplevel, textvariable=self.lD)
separator = ttk.Separator(toplevel, orient=VERTICAL)
set_button = ttk.Button(toplevel, text='Set', command=self.apply_star_info)
wavelength_lab.grid(column=0, row=0)
wavelength_entry.grid(column=1, row=0)
Fnum_lab.grid(column=0, row=1)
Fnum_entry.grid(column=1, row=1)
intensity_lab.grid(column=0, row=2)
intensity_entry.grid(column=1, row=2)
separator.grid(column=2, row=0, rowspan=3, sticky=(N, S))
slm_pxl_lab.grid(column=3, row=0)
slm_pxl_entry.grid(column=3, row=1)
lD_lab.grid(column=3, row=2)
lD_entry.grid(column=3, row=3)
set_button.grid(column=0, row=4)
def apply_star_info(self):
pass
def calibration_callback(self):
"""
Plots the current phase response curve and allows to select a new one
:return:
"""
toplevel_r = Toplevel()
toplevel_r.title('Grayvalues calibration')
toplevel_r.geometry("300x300+300+300")
toplevel = ttk.Frame(toplevel_r)
toplevel.grid(column=0, row=0, sticky=(N, W, E, S))
self.curve_plot, self.ax = plt.subplots(figsize=(3,3))
self.line = self.ax.plot(np.arange(256), self.phase_curve, 'o')
self.ax.set_xlim([-1, 260])
self.ax.set_xlabel("gray values")
self.ax.set_ylabel("Phase shift[$\pi$]")
data_plot = FigureCanvasTkAgg(self.curve_plot, master=toplevel)
data_plot.show()
import_curve_button = ttk.Button(toplevel, text='Import curve', command=self.import_curve_callback)
import_curve_button.grid(column=0, row=2)
data_plot.get_tk_widget().grid(column=1, row=2, columnspan=4, rowspan=4)
return
def import_curve_callback(self):
"""
Used for insertion of new phase curve calibration curve. Expects an numpy array of length 256 corresponding to
each grayvalue
:return:
"""
file = filedialog.askopenfilename(title="Select phase curve(.npy)")
phase = np.load(file)
self.menu_data = {'phase curve': phase}
self.phase_curve = phase
self.line[0].set_data(np.arange(256), phase)
plt.draw()
pickle.dump(self.menu_data, open("SLM_data.p", 'wb'))
return
def save_fits(self, name=None):
"""
Save current open phase mask as a FITS file with the center information
contained in the header
"""
file = filedialog.asksaveasfilename(master=self.master, title='Save as..', initialdir=self.path)
if file is None:
return
self.path = os.path.dirname(file)
file += '.fits'
# current = 0
if name is None:
current = self.window.maps_var.get()
else:
current = name
if current == '':
return
mask = self.window.SLM.maps[current]['data']
if self.window.active:
mask = self.window.image
hdu = fits.PrimaryHDU()
hdu.data = mask[:, :, 0]
hdu.header['center'] = str(self.window.center_position)
if len(self.window.center_position) > 1:
hdu.header['stars'] = str(self.window.multiple_star_position) + "\ ([l/D, azimuth]"
"""
if mask['star info']:
for k, val in mask['star info']:
hdu.header[k] = val
"""
hdu.header['DATE'] = time.strftime("%d/%m/%Y")
hdu.writeto(file)
return
def _quit(self):
self.window.SLM.quit()
self.master.quit() # stops mainloop
self.master.destroy()
return
class SLMViewer:
"""
Basic GUI that enables communication with SLM , on/off switch and
import/manipulation of phase maps
"""
def __init__(self, root):
self.master = Frame(root)
self.master.grid(column=0, row=0, sticky=(N, W, E, S))
root.title('SLM Controller')
try:
self.SLM = SLM()
print("SLM type is %s"%self.SLM.SLM_type)
except UserWarning:
self.SLM = dummyClass()
#raise UserWarning('No SLM connected.')
self.menu = DropMenu(root, self) # add drop-down menu
#self.SLM.pixelSize = int(self.menu.slm_pxl.get())
# =====================================================================================
# make canvas
self.off_image = np.zeros(self.SLM.dimensions)
self.image = self.off_image
self.fig, self.ax = plt.subplots()
self.norm = Normalize(vmin=0, vmax=255)
self.cmap = cm.gray
self.im = plt.imshow(self.image[:, :, 0].T, cmap=self.cmap, norm=self.norm)
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
# get image plot onto canvas and app
self.data_plot = FigureCanvasTkAgg(self.fig, master=self.master)
self.data_plot.get_tk_widget().configure(borderwidth=0)
self.fig.suptitle('SLM type : %s'%self.SLM.SLM_type, fontsize=12, fontweight='bold')
self.data_plot.show()
self.fig.canvas.mpl_connect('button_press_event', self.click_callback)
# ====================================================================================
# import phase maps frame
self.import_maps_frame = ttk.LabelFrame(self.master, text='Phase maps')
self.import_map_button = ttk.Button(self.import_maps_frame,
text='Import map', command=self.import_map_callback)
self.clear_list_button = ttk.Button(self.import_maps_frame, text='Clear', command=self.clear_maps)
self.maps_var = StringVar()
self.maps_var.set('')
if len(self.SLM.maps) > 0:
self.maps = [m for m in self.SLM.maps]
else:
self.maps = ['Zeros']
self.maps_options = OptionMenu(self.import_maps_frame, self.maps_var, *self.maps)
self.maps_options.grid(column=0, row=0)
self.import_map_button.grid(column=1, row=0)
self.clear_list_button.grid(column=1, row=1)
# ============================================================================================
# Set up center(s) position
# =============================================================================================
# default mouse position for center is center of SLM
self.mouse_coordinates = (int(self.SLM.width/2), int(self.SLM.height/2))
self.center_position = [[int(self.SLM.width/2), int(self.SLM.height/2)]]
self.plot_update()
self.center_step = 1
# =============================================================================================
# Phase mask activation/de-activation
# =============================================================================================
self.active_frame = LabelFrame(self.master, text='Activate')
self.active_var = StringVar()
self.active_var.set('OFF')
self.activation_button = Button(self.active_frame, textvariable=self.active_var,
command=self.activate, bg='firebrick2')
self.activation_button.grid(column=0, row=0)
self.active = False
# ==========================================================================================
# OPTIONS FRAME
# ==========================================================================================
self.notebook = ttk.Notebook(self.master)
self.fqpm_frame = Frame(self.notebook)
self.vortex_frame = Frame(self.notebook)
self.multiple_frame = Frame(self.notebook)
self.zernike_frame = Frame(self.notebook)
self.rotate_frame = Frame(self.notebook)
self.notebook.add(self.fqpm_frame, text='FQ/EO')
self.notebook.add(self.vortex_frame, text='Vortex')
self.notebook.add(self.multiple_frame, text='Multiple')
self.notebook.add(self.zernike_frame, text='Zernike')
self.notebook.add(self.rotate_frame, text='Phase shift')
self.notebook.grid()
# ===========================================================================================
# Star info in multiple star frame
# ===========================================================================================
self.stars_frame = ttk.LabelFrame(self.multiple_frame, text='Stars')
self.star_1 = Label(self.stars_frame, text='Star 1')
self.star_2 = Label(self.stars_frame, text='Star 2', state=DISABLED)
self.star_3 = Label(self.stars_frame, text='Star 3', state=DISABLED)
self.star_1.grid(column=0, row=1)
self.star_2.grid(column=0, row=2)
self.star_3.grid(column=0, row=3)
I_lab = ttk.Label(self.stars_frame, text='Intensity', width=10)
magn_lab = ttk.Label(self.stars_frame, text='Magnitude', width=10)
l_lab = ttk.Label(self.stars_frame, text='Wavelength(nm)', width=10)
F_lab = ttk.Label(self.stars_frame, text='F #', width=10)
lD_lab = ttk.Label(self.stars_frame, text='l/D', width=10)
phi_lab= ttk.Label(self.stars_frame, text='phi(pi)', width=10)
C_lab = ttk.Label(self.stars_frame, text='Center(x,y)', width=10)
magn_lab.grid(column=1, row=0)
I_lab.grid(column=2, row=0)
l_lab.grid(column=3, row=0)
F_lab.grid(column=4, row=0)
lD_lab.grid(column=5, row=0)
phi_lab.grid(column=6, row=0)
C_lab.grid(column=7, row=0)
# 1st star -- always visible
self.M1 = StringVar()
self.M1.set('0')
M1_entry = ttk.Entry(self.stars_frame, textvariable=self.M1, width=10)
M1_entry.grid(column=1, row=1)
self.I1_num = StringVar()
self.I1_num.set('1')
self.I1_entry = ttk.Entry(self.stars_frame, textvariable=self.I1_num, width=10)
self.I1_entry.grid(column=2, row=1)
self.l1_num = StringVar()
self.l1_num.set('633')
self.l1_entry = ttk.Entry(self.stars_frame, textvariable=self.l1_num, width=10)
self.l1_entry.grid(column=3, row=1)
self.F1_num = StringVar()
self.F1_num.set('230')
self.F1_entry = ttk.Entry(self.stars_frame, textvariable=self.F1_num, width=10)
self.F1_entry.grid(column=4, row=1)
self.starc1 = StringVar()
self.starc1.set('%i,%i' % (int(self.SLM.width/2), int(self.SLM.height/2)))
self.center1_lab = Entry(self.stars_frame, textvariable=self.starc1, width=10)
self.center1_lab.grid(column=7, row=1)
# star 2
self.M2 = StringVar()
self.M2.set('0')
self.M2_entry = ttk.Entry(self.stars_frame, textvariable=self.M2,
width=10, state=DISABLED)
self.M2_entry.grid(column=1, row=2)
self.M2_entry.bind("<Return>", self.magnitude_to_intensity)
self.I2_num = StringVar()
self.I2_num.set('1')
self.I2_entry = ttk.Entry(self.stars_frame, textvariable=self.I2_num,
width=10, state=DISABLED)
self.I2_entry.bind("<Return>", self.magnitude_to_intensity)
self.I2_entry.grid(column=2, row=2)
self.l2_num = StringVar()
self.l2_num.set('633')
self.l2_entry = ttk.Entry(self.stars_frame, textvariable=self.l2_num,
width=10, state=DISABLED)
self.l2_entry.grid(column=3, row=2)
self.F2_num = StringVar()
self.F2_num.set('230')
self.F2_entry = ttk.Entry(self.stars_frame, textvariable=self.F2_num,
width=10, state=DISABLED)
self.F2_entry.grid(column=4, row=2)
self.starc2 = StringVar()
self.starc2.set('0,0')
self.lD_star2 = StringVar()
self.lD_star2.set('1')
self.lD_star2_entry = Entry(self.stars_frame, textvariable=self.lD_star2,
width=10, state=DISABLED)
self.lD_star2_entry.grid(column=5, row=2)
self.phi_star2 = StringVar()
self.phi_star2.set('0')
self.phi_star2_entry = Entry(self.stars_frame, textvariable=self.phi_star2,
width=10, state=DISABLED)
self.phi_star2_entry.grid(column=6, row=2)
self.center2_lab = Entry(self.stars_frame, textvariable=self.starc2,
width=10, state=DISABLED)
self.center2_lab.grid(column=7, row=2)
self.center2_lab.bind("<Return>", self.l_over_D_callback)
# star 3
self.M3 = StringVar()
self.M3.set('0')
self.M3_entry = ttk.Entry(self.stars_frame, textvariable=self.M3,
width=10, state=DISABLED)
self.M3_entry.grid(column=1, row=3)
self.M3_entry.bind("<Return>", self.magnitude_to_intensity)
self.I3_num = StringVar()
self.I3_num.set('1')
self.I3_entry = ttk.Entry(self.stars_frame, textvariable=self.I3_num,
width=10, state=DISABLED)
self.I3_entry.grid(column=2, row=3)
self.I3_entry.bind("<Return>", self.magnitude_to_intensity)
self.l3_num = StringVar()
self.l3_num.set('633')
self.l3_entry = ttk.Entry(self.stars_frame, textvariable=self.l3_num,
width=10, state=DISABLED)
self.l3_entry.grid(column=3, row=3)
self.F3_num = StringVar()
self.F3_num.set('230')
self.F3_entry = ttk.Entry(self.stars_frame, textvariable=self.F3_num,
width=10, state=DISABLED)
self.F3_entry.grid(column=4, row=3)
self.starc3 = StringVar()
self.starc3.set('0,0')
self.lD_star3 = StringVar()
self.lD_star3.set('1')
self.lD_star3_entry = Entry(self.stars_frame, textvariable=self.lD_star3,
width=10, state=DISABLED)
self.lD_star3_entry.grid(column=5, row=3)
self.phi_star3 = StringVar()
self.phi_star3.set('0')
self.phi_star3_entry = Entry(self.stars_frame, textvariable=self.phi_star3,
width=10, state=DISABLED)
self.phi_star3_entry.grid(column=6, row=3)
self.center3_lab = Entry(self.stars_frame, textvariable=self.starc3,
width=10, state=DISABLED)
self.center3_lab.grid(column=7, row=3)
self.center3_lab.bind("<Return>", self.l_over_D_callback)
# ============================================================================================
# FQPM and EOPM frame
# ============================================================================================
self.center1_lab_fqpm = Entry(self.fqpm_frame, textvariable=self.starc1)
self.center1_lab_fqpm.grid(column=4, row=0)
self.single_button = ttk.Button(self.fqpm_frame, text='Make map',
command=lambda: self.make_map('single'))
self.single_button.grid(column=0, row=0)
map_types = ['FQPM', 'EOPM', 'FLAT']
self.map_type_var = StringVar()
self.map_type_var.set('FQPM')
self.map_type_menu = OptionMenu(self.fqpm_frame, self.map_type_var, *map_types)
self.map_type_menu.grid(row=0, column=2)
# =========================================================================================================
# CONTROL FRAME
# =========================================================================================================
self.control_frame = ttk.LabelFrame(self.master, text='Center Controls')
self.cstep_var = StringVar()
self.cstep_var.set('1')
self.center_step_entry = Entry(self.control_frame, textvariable=self.cstep_var, justify='center')
self.center_step_entry.bind("<Return>", self.set_center_step)
self.center_control_up = ttk.Button(self.control_frame, text='^', command=lambda: self.center_move('up', 0))
self.center_control_down = ttk.Button(self.control_frame, text='v', command=lambda: self.center_move('down',0))
self.center_control_left = ttk.Button(self.control_frame, text='<', command=lambda: self.center_move('left',0))
self.center_control_right = ttk.Button(self.control_frame, text='>', command=lambda: self.center_move('right',0))
self.center_control_up.grid(column=1, row=0)
self.center_control_down.grid(column=1, row=2)
self.center_control_left.grid(column=0, row=1)
self.center_control_right.grid(column=2, row=1)
self.center_step_entry.grid(column=1, row=1)
self.center_num = ['1']
self.center_var = StringVar()
self.center_var.set('1')
# Set gray values
self.val_1 = 0
self.val_2 = 1
self.grayval_frame = ttk.LabelFrame(self.fqpm_frame, text='Gray values')
self.gray_1_val = StringVar()
self.gray_1_val.set('0')
self.gray_1_entry = Entry(self.grayval_frame, textvariable=self.gray_1_val, justify='center')
self.gray_1_entry.bind("<Return>", self.arrow_return)
self.gray_1_entry.bind("<Up>", self.arrow_return)
self.gray_1_entry.bind("<Down>", self.arrow_return)
self.gray_1_entry.bind("<Left>", self.arrow_return)
self.gray_1_entry.bind("<Right>", self.arrow_return)
self.gray_2_val = StringVar()
self.gray_2_val.set('0')
self.gray_2_entry = Entry(self.grayval_frame, textvariable=self.gray_2_val, justify='center')
self.gray_2_entry.bind("<Return>", self.arrow_return)
self.gray_2_entry.bind("<Up>", self.arrow_return)
self.gray_2_entry.bind("<Down>", self.arrow_return)
self.gray_2_entry.bind("<Left>", self.arrow_return)
self.gray_2_entry.bind("<Right>", self.arrow_return)
self.gray_1_lab = ttk.Label(self.grayval_frame, text='Gray-value 1')
self.gray_2_lab = ttk.Label(self.grayval_frame, text='Gray-value 2')
self.phase_1_val = StringVar()
self.phase_1_val.set('Phase: %.3f rad'%self.menu.phase_curve[int(self.gray_1_val.get())])
self.phase_2_val = StringVar()
self.phase_2_val.set('Phase: %.3f rad'%self.menu.phase_curve[int(self.gray_2_val.get())])
self.phase_1_lab = ttk.Label(self.grayval_frame, textvariable=self.phase_1_val)
self.phase_2_lab = ttk.Label(self.grayval_frame, textvariable=self.phase_2_val)
self.gray_1_lab.grid(column=0, row=0)
self.gray_2_lab.grid(column=0, row=1)
self.gray_1_entry.grid(column=1, row=0)
self.gray_2_entry.grid(column=1, row=1)
self.phase_1_lab.grid(column=2, row=0)
self.phase_2_lab.grid(column=2, row=1)
# ============================================================================================
# ZERNIKE TAB
# ============================================================================================
# implement various zernike terms which can be used to correct aberrations due to the SLM back-plate
#DEFOCUS
defocus_coeff_lab = ttk.Label(self.zernike_frame, text='Defocus:')
defocus_coeff_lab.grid(column=0, row=0)
self.defocus_coeff = DoubleVar()
self.defocus_coeff.set(0)
defocus_coeff_entry = Entry(self.zernike_frame, textvariable=self.defocus_coeff)
defocus_coeff_entry.grid(column=1, row=0)
#OBLIQUE ASTIGMATISM
astigm_coeff_lab = ttk.Label(self.zernike_frame, text='Obliq. Astigmatism:')
astigm_coeff_lab.grid(column=2, row=1)
self.astigm_coeff = DoubleVar()
self.astigm_coeff.set(0)
astigm_coeff_entry = Entry(self.zernike_frame, textvariable=self.astigm_coeff)
astigm_coeff_entry.grid(column=3, row=1)
# VERTICAL ASTIGMATISM
secastigm_coeff_lab = ttk.Label(self.zernike_frame, text='Vert. Astigmatism:')
secastigm_coeff_lab.grid(column=0, row=1)
self.secastigm_coeff = DoubleVar()
self.secastigm_coeff.set(0)
secastigm_coeff_entry = Entry(self.zernike_frame, textvariable=self.secastigm_coeff)
secastigm_coeff_entry.grid(column=1, row=1)
#TILT
tilt_coeff_lab = ttk.Label(self.zernike_frame, text='Tilt:')
tilt_coeff_lab.grid(column=2, row=2)
self.tilt_coeff = DoubleVar()
self.tilt_coeff.set(0)
tilt_coeff_entry = Entry(self.zernike_frame, textvariable=self.tilt_coeff)
tilt_coeff_entry.grid(column=3, row=2)
#TIP
tip_coeff_lab = ttk.Label(self.zernike_frame, text='Tip:')
tip_coeff_lab.grid(column=0, row=2)
self.tip_coeff = DoubleVar()
self.tip_coeff.set(0)
tip_coeff_entry = Entry(self.zernike_frame, textvariable=self.tip_coeff)
tip_coeff_entry.grid(column=1, row=2)
# X AND Y GRADIENTS
xgrad_coeff_lab = ttk.Label(self.zernike_frame, text='X gradient:')
xgrad_coeff_lab.grid(column=2, row=3)
self.xgrad_coeff = DoubleVar()
self.xgrad_coeff.set(0)
xgrad_coeff_entry = Entry(self.zernike_frame, textvariable=self.xgrad_coeff)
xgrad_coeff_entry.grid(column=3, row=3)
ygrad_coeff_lab = ttk.Label(self.zernike_frame, text='Y gradient:')
ygrad_coeff_lab.grid(column=0, row=3)
self.ygrad_coeff = DoubleVar()
self.ygrad_coeff.set(0)
ygrad_coeff_entry = Entry(self.zernike_frame, textvariable=self.ygrad_coeff)
ygrad_coeff_entry.grid(column=1, row=3)
# Phase shift of the zernike correction
zernike_range_lab = Label(self.zernike_frame, text='Phase shift of zernike')
zernike_range_lab.grid(column=0, row=4)
self.zernike_min = DoubleVar()
self.zernike_min.set(0)
zernike_min_entry = Entry(self.zernike_frame, textvariable=self.zernike_min)
zernike_min_entry.grid(column=1, row=4)
self.zernike_max = DoubleVar()
self.zernike_max.set(1)
zernike_max_entry = Entry(self.zernike_frame, textvariable=self.zernike_max)
zernike_max_entry.grid(column=2, row=4)
# Apply zernike corrections to the phase mask currently active or selected
apply_zernike = ttk.Button(self.zernike_frame, text='Apply', command=self.apply_zernike)
apply_zernike.grid(column=4, row=0)
# functions implementing the various zernike polynomials
self.Defocus = lambda r: np.sqrt(3)*(2*r**2)
self.Astigm = lambda r, theta: np.sqrt(6)*(r**2)*np.sin(2*theta)
self.VertAstigm = lambda r, theta: np.sqrt(6) * (r ** 2) * np.cos(2 * theta)
self.SecAstigm = lambda r, theta: np.sqrt(10)*(4*r**4-3*r**3)*np.sin(2*theta)
self.XGrad = lambda x: abs(x)
self.YGrad = lambda y: abs(y)
self.Tip = lambda r, theta: 2*r*np.cos(theta)
self.Tilt = lambda r, theta: 2*r*np.sin(theta)
# mesh grid used to create the 2d zernike polynomials in cartesian and polar coordinates
self.xx, self.yy = np.meshgrid(np.arange(-self.SLM.width/2, self.SLM.width/2),
np.arange(-self.SLM.height/2, self.SLM.height/2))
self.R, self.Theta = cart2pol(self.xx, self.yy)
# zernike_gray1_lab = Label(self.zernike_frame, text='Gray1')
# zernike_gray1_lab.grid(column=0, row=3)
# self.zernike_gray1 = IntVar()
# self.zernike_gray1.set(85)
# zernike_gray1_entry = Entry(self.zernike_frame, textvariable=self.zernike_gray1)
# zernike_gray1_entry.grid(column=1, row=3)
#
# zernike_gray2_lab = Label(self.zernike_frame, text='Gray2')
# zernike_gray2_lab.grid(column=0, row=4)
# self.zernike_gray2 = IntVar()
# self.zernike_gray2.set(255)
# zernike_gray2_entry = Entry(self.zernike_frame, textvariable=self.zernike_gray2)
# zernike_gray2_entry.grid(column=1, row=4)
self.zernike_gray1_old = 85
self.zernike_gray2_old = 255
# ======================================================================================
self.grayval_frame.grid(column=0, row=1, columnspan=5)
self.control_frame.grid(column=0, row=2, columnspan=5)
# ======================================================================================
# Multiple sources
# ======================================================================================
# Pack star center vars together for easy access
self.center_labels = [self.starc1, self.starc2, self.starc3]
# make frame where a binary star map or triple star map can be created
# binary phase map using airy pattern distribution for each star
self.binary_frame = ttk.Frame(self.multiple_frame)
self.binary_button = ttk.Button(self.binary_frame, text='Binary',
command=lambda: self.make_map('binary'), state=DISABLED)
self.binary_button.grid(column=1, row=1)
self.checkbox_val = IntVar()
binary_checkbox = Checkbutton(self.binary_frame, text='Save map', variable=self.checkbox_val)
binary_checkbox.grid(column=3, row=1)
self.tertiary_button = ttk.Button(self.binary_frame, text='Tertiary star',
command=lambda: self.make_map('triple'), state=DISABLED)
self.tertiary_button.grid(column=2, row=1)
self.new_map_name = StringVar()
self.new_map_name.set('Map name')
self.new_map_name_entry = Entry(self.binary_frame, textvariable=self.new_map_name)
self.new_map_name_entry_single = Entry(self.fqpm_frame, textvariable=self.new_map_name)
self.new_map_name_entry_single.grid(column=3, row=0)
self.new_map_name_entry.grid(column=0, row=1)
self.save_filetypes = [('Windows Bitmap', '*.bmp'), ('Text File', '*.txt'), ('Fits File', '*.fits')]
add_center_button = ttk.Button(self.binary_frame, text='Add', command=self.add_center)
add_center_button.grid(column=0, row=0)
self.centers_options = OptionMenu(self.binary_frame, self.center_var, *self.center_num)
self.centers_options.grid(column=1, row=0)
self.stars_frame.grid(column=0, row=0)
self.binary_frame.grid(column=0, row=2)
# =====================================================================================================
# Vortex tab
# =====================================================================================================
self.make_vortex = ttk.Button(self.vortex_frame, text='Make vortex',
command=lambda: self.make_map('vortex'))
self.make_vortex.grid(column=0, row=0)
# charge of the vortex
charge_lab = ttk.Label(self.vortex_frame, text='charge')
charge_lab.grid(column=2, row=1)
self.charge = IntVar()
self.charge.set(2)
self.charge_entry = Entry(self.vortex_frame, textvariable=self.charge, width=10)
self.charge_entry.bind("<Return>", self.charge_callback)
self.charge_entry.grid(column=3, row=1)
# coordinates entry
coordinates_lab = ttk.Label(self.vortex_frame, text='Coordinates')
coordinates_lab.grid(column=0, row=1)
self.vortex_coordinates = StringVar()
self.vortex_coordinates.set('%i, %i' % (int(self.SLM.width/2), int(self.SLM.height/2)))
self.vortex_coordinates_entry = Entry(self.vortex_frame, textvariable=self.vortex_coordinates, width=10)
self.vortex_coordinates_entry.grid(column=1, row=1)
# label indicating gray values
gray_lab = ttk.Label(self.vortex_frame, text='Gray values')
gray_lab.grid(column=1, row=3, columnspan=2)
# gray value for the 0 pi phase
gray0_lab = ttk.Label(self.vortex_frame, text='0:', width=10)
gray0_lab.grid(column=0, row=4)
self.gray0 = IntVar()
self.gray0.set(0)
self.gray0_entry = Entry(self.vortex_frame, textvariable=self.gray0, width=10)
self.gray0_entry.grid(column=1, row=4)
# gray value for 2pi phase
gray2pi_lab = ttk.Label(self.vortex_frame, text='2pi:', width=10)
gray2pi_lab.grid(column=2, row=4)
self.gray2pi = IntVar()
self.gray2pi.set(0)
self.gray2pi_entry = Entry(self.vortex_frame, textvariable=self.gray2pi, width=10)
self.gray2pi_entry.grid(column=3, row=4)
# button to change gray values of vortex on the fly
self.gray_vortex_button = ttk.Button(self.vortex_frame, text='Change', command=self.vortex_change_grayvalues)
self.gray_vortex_button.grid(column=4, row=4)
# ============================================================================================================
# ZERNIKE WAVEFRONT SENSING
# ============================================================================================================
create_rotating_button = ttk.Button(self.rotate_frame, text='Create',
command=lambda: self.make_map('rotate'))
self.rotate_button = ttk.Button(self.rotate_frame, text='Rotate', command=self.rotate_callback, state=DISABLED)
self.rotating_var = StringVar()
self.rotating_var.set('0-pi')
self.rotating_label = ttk.Label(self.rotate_frame, textvariable=self.rotating_var, state=DISABLED)
self.rotating_list = ['0', 'pi/2', 'pi', '3pi/2']
self.whichZernike = 0
self.rotateZernike_dict = {}
lD_label = Label(self.rotate_frame, text="l/D", width=10)
self.lD_var = IntVar()
self.lD_var.set(10)
l_over_D_entry = ttk.Entry(self.rotate_frame, textvariable=self.lD_var, width=10)
create_rotating_button.grid(column=0, row=0)
self.rotate_button.grid(column=1, row=0)
self.rotating_label.grid(column=2, row=0)
lD_label.grid(column=1, row=1)
l_over_D_entry.grid(column=2, row=1)
# ======================================================================================================
self.multiple_star_position = []
# =============================================================================================================
# Text frame
# ========================================================================================================
self.text_frame = ttk.Frame(self.master)
scrollbar = Scrollbar(self.text_frame)
scrollbar.grid(column=4, row=0)
self.text = Text(self.text_frame, height=5, width=40, wrap='word', yscrollcommand=scrollbar.set)
self.text.insert(INSERT, "Initializing SLM..\n")
self.text.grid(column=0, row=0, columnspan=4)
sys.stdout = StdoutRedirector(self.text) # assign stdout to custom class
def rotating_mask(self):
"""
Create map with 1s in the center circle and 0 else
:return:
"""
if self.active:
self.image = np.zeros(self.SLM.dimensions, dtype=np.uint8)
self.active = False
self.activation_button.config(bg='firebrick2')
self.active_var.set('OFF')
m = np.zeros(self.SLM.size)
if self.lD_var.get() < 0:
return
m[np.where(self.R.T <= self.lD_var.get())] = 1
v0 = int(self.menu.rad_2_gray(0))
v1 = int(self.menu.rad_2_gray(0.5))
v2 = int(self.menu.rad_2_gray(1))
v3 = int(self.menu.rad_2_gray(1.5))
print(v0, v1, v2, v3)
# 0 - pi
p0 = np.zeros(self.SLM.size)
phase_map0 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map0[:, :, 0] = p0
phase_map0[:, :, 1] = p0
phase_map0[:, :, 2] = p0
self.rotateZernike_dict[self.rotating_list[0]] = phase_map0
# pi/2 - 3pi/2 FQ phase mask
p1 = np.zeros(self.SLM.size, dtype=np.uint8)
p1[np.where(m==1)] = v1
phase_map1 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map1[:, :, 0] = p1
phase_map1[:, :, 1] = p1
phase_map1[:, :, 2] = p1
self.rotateZernike_dict[self.rotating_list[1]] = phase_map1
# pi-0 FQ phase mask
p2 = np.zeros(self.SLM.size, dtype=np.uint8)
p2[np.where(m == 1)] = v2
phase_map2 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map2[:, :, 0] = p2
phase_map2[:, :, 1] = p2
phase_map2[:, :, 2] = p2
self.rotateZernike_dict[self.rotating_list[2]] = phase_map2
# 3pi/2 - pi/2 FQ phase mask
p3 = np.zeros(self.SLM.size, dtype=np.uint8)
p3[np.where(m == 1)] = v3
phase_map3 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map3[:, :, 0] = p3
phase_map3[:, :, 1] = p3
phase_map3[:, :, 2] = p3
self.rotateZernike_dict[self.rotating_list[3]] = phase_map3
self.rotate_button.config(state=NORMAL)
self.rotating_label.config(state=NORMAL)
return
def rotate_callback(self):
"""
Rotate through masks in rotateFQ_dict which will result in rotating FQ mask with
different pi values
:return:
"""
# make activate button green to indicate that mask is alive
self.active = True
self.activation_button.config(bg='PaleGreen2')
self.active_var.set('ON')
# get mask and apply it
which = self.rotating_list[self.whichZernike]
m = self.rotateZernike_dict[which]
self.image = m
self.SLM.draw(m)
self.plot_update()
self.rotating_var.set(which)
# go to next mask in line
self.whichZernike = (self.whichZernike + 1) % 4
def calculate_charge_gray(self, p):
"""
Calculates a vortex phase mask from a map that gives the geometry. Also used every time one changes charge
:param p: complex matrix in which implements the vortex
:return:
"""
if not(0 <= self.gray0.get() <= 255 and 0 <= self.gray2pi.get() <= 255):
print('invalid values')
return
if self.charge.get() % 2 != 0:
print("Odd charge -> change to closest even number")
self.charge.set(self.charge.get() + 1)
#if 'vortex' not in self.SLM.maps.keys():
# return
# z = p^n, apply charge
z = p**self.charge.get()
# transform to radians
z = (np.angle(z) + np.pi)/(np.pi)
# map radians to gray values
z = self.map_to_interval(abs(z))
self.gray0.set(str(np.min(z)))
self.gray2pi.set(str(np.max(z)))
#z = z*abs(self.gray2pi.get() - self.gray0.get()) + self.gray0.get()
# create phase mask for SLM
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map[:, :, 0] = z
phase_map[:, :, 1] = z
phase_map[:, :, 2] = z
return phase_map
def map_to_interval(self, p):
"""
Takes vortex with values 0-2.0 and maps them to an interval in radians
determined by the higher value of gray2pi"""
# val2pi = self.gray_to_rad(self.gray2pi.get())
# val0 = val2pi - 2.0
# if val0 < 0 :
# val0 = 0
# val2pi = 2.0
# p += val0
return self.rad_to_gray(p)
def charge_callback(self, event):
"""
Callback when charge of vortex phase mask is changed
:return:
"""
p = self.SLM.maps[self.maps_var.get()]['map']
phase_map = self.calculate_charge_gray(p)
self.image = phase_map
print('Changed charge to %i' % self.charge.get())
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def make_vortex_callback(self):
"""
Create single vortex mask at center denoted by star center
:return:
"""
try:
c = self.vortex_coordinates.get().split(sep=',')
xc = int(c[0])
yc = int(c[1])
except ValueError:
print('Error with coordinates')
return
print('Calculating vortex with charge %i, gray %i-%i, coord %i,%i' %
(self.charge.get(), self.gray0.get(), self.gray2pi.get(), xc, yc))
p = self.SLM.Vortex_coronagraph(xc, yc)
phase_map = self.calculate_charge_gray(p)
name = "Vortex_coord:%i,%i" % (xc, yc)
print('Finished, map-name %s' % name)
# in 'data' the phase mask ready to apply to the SLM is stored
self.SLM.maps[name] = {'data': phase_map}
# in 'map' the complex map that creates the vortex is stored
self.SLM.maps[name]['map'] = p
# vortex map in gray values but with 1 depth dim
self.SLM.maps[name]['vortex_map'] = phase_map[:,:,0]
self.SLM.maps[name]['center'] = [[xc, yc]]
self.SLM.maps[name]['type'] = 'vortex'
self.maps.append(name)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
self.image = phase_map
self.plot_update()
# save map to bitmap if option is checked
if self.checkbox_val.get():
filename = filedialog.asksaveasfilename(parent=self.master, filetypes=self.save_filetypes,
title='Save map as..')
filename += '.bmp'
surf = pygame.surfarray.make_surface(phase_map)
pygame.image.save(surf, filename)
return
def vortex_change_grayvalues(self):
"""
Changes the values of the vortex by scaling them with new_range
:return:
"""
p = self.SLM.maps[self.maps_var.get()]['map']
phase_map = self.calculate_charge_gray(p)
self.image = phase_map
print('Changed gray value range to %i-%i' % (self.gray0.get(), self.gray2pi.get()))
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def l_over_D_callback(self, event):
"""
Transforms l/D and azimuthial information to pixel coordinates with respect to the first star
x = n*l/D*cos(phi*pi)
y = n*l/D*sin(phi*pi)
:param which: which star
:return:
"""
if event.widget == self.center2_lab:
x = int(float(self.lD_star2.get())*int(self.menu.lD.get())*np.cos(float(self.phi_star2.get())*np.pi-np.pi))
y = int(float(self.lD_star2.get())*int(self.menu.lD.get())*np.sin(float(self.phi_star2.get())*np.pi-np.pi))
self.multiple_star_position[0] = [float(self.lD_star2.get()), float(self.phi_star2.get())]
x += self.center_position[0][0]
y += self.center_position[0][1]
self.center_position[1] = [x, y]
self.multiple_star_position[0] = [x, y]
self.starc2.set('%i,%i' % (x, y))
elif event.widget == self.center3_lab:
x = int(float(self.lD_star3.get())*int(self.menu.lD.get())*np.cos(float(self.phi_star3.get())*np.pi-np.pi))
y = int(float(self.lD_star3.get())*int(self.menu.lD.get())*np.sin(float(self.phi_star3.get())*np.pi-np.pi))
self.multiple_star_position[1] = [float(self.lD_star3.get()), float(self.phi_star3.get())]
x += self.center_position[0][0]
y += self.center_position[0][1]
self.center_position[2] = [x, y]
self.multiple_star_position[1] = [x, y]
self.starc3.set('%i,%i' % (x, y))
else:
pass
return
def magnitude_to_intensity(self, event):
"""
Transform magnitude difference between star and primary star into intensity difference
:param which: which star
:return:
"""
if event.widget == self.M2_entry:
m = float(self.M2.get())
self.I2_num.set(str(10**(-m/2.5)))
elif event.widget == self.M3_entry:
m = float(self.M3.get())
self.I2_num.set(str(10**(-m/2.5)))
elif event.widget == self.I2_entry:
I = float(self.I2_num.get())
self.M2.set(-2.5*np.log10(I))
elif event.widget == self.I3_entry:
I = float(self.I3_num.get())
self.M3.set(-2.5*np.log10(I))
else:
pass
return
def clear_maps(self):
"""
Clears list of maps
:return:
"""
self.maps = []
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
return
def make_map(self, which):
"""
Make thread that starts to calculate phase map
:param which:
:return:
"""
if which == 'single':
self.map_thread = threading.Thread(target=self.single_mask, daemon=True)
self.map_thread.start()
elif which == 'binary':
self.map_thread = threading.Thread(target=self.binary_mask, daemon=True)
self.map_thread.start()
elif which == 'triple':
self.map_thread = threading.Thread(target=self.triple_mask, daemon=True)
self.map_thread.start()
elif which == 'vortex':
self.map_thread = threading.Thread(target=self.make_vortex_callback, daemon=True)
self.map_thread.start()
elif which == 'rotate':
self.map_thread = threading.Thread(target=self.rotating_mask, daemon=True)
self.map_thread.start()
else:
pass
print('Thread started')
return
def refresh_optionmenu(self, menu, var, options):
"""
Refreshes option menu
:param menu: handle to optionmenu widget
:param var: handle to variable of menu
:param options: options to insert
:return:
"""
var.set('')
menu['menu'].delete(0, 'end')
if len(options) == 0:
menu['menu'].add_command(label='', command=_setit(var, ''))
return
for option in options:
menu['menu'].add_command(label=option, command=_setit(var, option))
var.set(options[-1])
return
def add_center(self):
"""
Add new center which represents a new star or other object on the phase map
Gets center coordinates from right click mouse position on figure.
:return:
"""
if len(self.center_num) > 2: # up to a total of 3 objects can be defined
return
num = int(self.center_num[-1]) + 1
self.center_num.append(str(num))
self.center_position.append([0, 0])
self.multiple_star_position.append([1, 0]) # [l/D, phi]
self.refresh_optionmenu(self.centers_options, self.center_var, self.center_num)
if num == 2:
self.M2_entry.config(state=NORMAL)
self.I2_entry.config(state=NORMAL)
self.l2_entry.config(state=NORMAL)
self.F2_entry.config(state=NORMAL)
self.lD_star2_entry.config(state=NORMAL)
self.phi_star2_entry.config(state=NORMAL)
self.star_2.config(state=NORMAL)
self.binary_button.config(state=NORMAL)
self.center2_lab.config(state=NORMAL)
self.multiple_star_position.append([0, 0])
else:
self.M3_entry.config(state=NORMAL)
self.I3_entry.config(state=NORMAL)
self.l3_entry.config(state=NORMAL)
self.F3_entry.config(state=NORMAL)
self.lD_star3_entry.config(state=NORMAL)
self.phi_star3_entry.config(state=NORMAL)
self.tertiary_button.config(state=NORMAL)
self.star_3.config(state=NORMAL)
self.center3_lab.config(state=NORMAL)
self.multiple_star_position.append([0, 0])
return
def arrow_return(self, event):
"""
Changes grayvalue with arrow key hits
:param event:
:return:
"""
which = event.widget
what = event.keycode
if what == 37 or what == 40:
what = -1
elif what == 38 or what == 39:
what = 1
elif what == 13:
what = 0
else:
return
if which == self.gray_1_entry:
try:
val_old = self.val_1
val = int(self.gray_1_val.get())
val += what
if val > 255:
val = 255
if val < 0:
val = 0
self.gray_1_val.set(str(val))
self.phase_1_val.set('Phase: %.3f rad'%self.menu.phase_curve[val])
self.val_1 = val
self.set_value(val_old, val)
except ValueError:
return
elif which == self.gray_2_entry:
try:
val_old = self.val_2
val = int(self.gray_2_val.get())
val += what
if val > 255:
val = 255
if val < 0:
val = 0
self.gray_2_val.set(str(val))
self.phase_2_val.set('Phase: %.3f rad'%self.menu.phase_curve[val])
self.val_2 = val
self.set_value(val_old, val)
except ValueError:
return
else:
return
def set_value(self, val_old, val):
"""
Find all pixels with value val and replace them with the new one
:param val_old: old value to replace
:param val: value to replace with
:return:
"""
self.image[self.image == val_old] = val
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def activate(self):
"""
Activate and deactivate SLM
:return:
"""
if self.active:
self.active = False
self.activation_button.config(bg='firebrick2')
self.active_var.set('OFF')
self.send_map('OFF')
else:
self.active = True
self.activation_button.config(bg='PaleGreen2')
self.active_var.set('ON')
self.send_map('ON')
return
def get_grayvals(self, image):
"""
Get the values from the phase mask
:param image: applied mask
:return:
"""
vals = np.unique(image)
self.val_1 = vals.min()
self.val_2 = vals.max()
self.gray_1_val.set(str(self.val_1))
self.gray_2_val.set(str(self.val_2))
return
def send_map(self, status):
"""
Send map to SLM
:param status: Phase map for ON and zero map for OFF
:return:
"""
if status == 'ON':
map_array = self.SLM.maps[self.maps_var.get()]['data'] # should get the matrix of the chosen map
self.get_grayvals(map_array)
self.image = map_array
self.SLM.draw(map_array)
self.plot_update()
elif status == 'OFF':
try:
self.SLM.maps[self.maps_var.get()]['data'] = self.image # save current state of map to dictionary
except KeyError:
pass
self.image = self.off_image
self.SLM.draw(self.off_image)
self.plot_update()
def plot_update(self):
self.im.set_data(self.image[:, :, 0].T)
self.fig.canvas.draw()
return
def import_map_callback(self):
"""
Import new map from file. Accepted extensions are bmp, txt, fits
:return:
"""
mfile = filedialog.askopenfilename()
try:
mname, flag = self.SLM.import_phase_map(mfile)
mname = os.path.basename(mname)
if flag:
p = self.SLM.maps[mname]['map']
p = self.rad_to_gray(p)
m = np.zeros(self.SLM.dimensions, dtype=np.uint8)
m[:, :, 0] = p
m[:, :, 1] = p
m[:, :, 2] = p
self.SLM.maps[mname]['data'] = m
#self.SLM.maps[mname] = {'data': m}
self.SLM.maps[mname]['type'] = 'custom'
self.maps.append(mname)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
except Exception as e:
print(e)
return
return
def click_callback(self, event):
_x = event.x
_y = event.y
inv = self.ax.transData.inverted()
data_pos = inv.transform((_x, _y))
data_pos = tuple([int(e) for e in data_pos])
if event.button == 1:
self.center_move('mouse', data_pos)
elif event.button == 3:
self.mouse_coordinates = data_pos
else:
pass
return
def center_move(self, d, pos):
"""
Move center of phase map
:param d: direction to move
:return:
"""
which = int(self.center_var.get())-1 # which center is currently active
if d == 'up':
self.center_position[which][1] -= self.center_step
self.image = np.roll(self.image, shift=-self.center_step, axis=1)
elif d == 'down':
self.center_position[which][1] += self.center_step
self.image = np.roll(self.image, shift=self.center_step, axis=1)
elif d == 'left':
self.center_position[which][0] -= self.center_step
self.image = np.roll(self.image, shift=-self.center_step, axis=0)
elif d == 'right':
self.center_position[which][0] += self.center_step
self.image = np.roll(self.image, shift=self.center_step, axis=0)
else:
self.center_position[which] = list(pos)
# update plot and SLM
if self.active:
self.SLM.draw(self.image)
self.plot_update()
# update label showing center
s = '%i,%i'%(self.center_position[which][0], self.center_position[which][1])
self.center_labels[which].set(s)
return
def set_center_step(self, event):
"""
Callback for setting the center move step size
"""
try:
val = int(self.cstep_var.get())
if val > 384:
raise ValueError
self.center_step = val
except ValueError:
self.cstep_var.set(str(self.center_step))
return
def binary_mask(self):
"""
Create binary mask
:return:
"""
c1 = self.starc1.get().split(sep=',')
c1 = (int(c1[0]), int(c1[1]))
c2 = self.starc2.get().split(sep=',')
c2 = (int(c2[0]), int(c2[1]))
try:
I1, l1, F1 = float(self.I1_num.get()), float(self.l1_num.get()), float(self.F1_num.get())
I2, l2, F2 = float(self.I2_num.get()), float(self.l2_num.get()), float(self.F2_num.get())
val1 = self.menu.phase_curve[int(self.gray_1_val.get())]
val2 = self.menu.phase_curve[int(self.gray_2_val.get())]
print('Binary map with values :%f, %f'%(val1, val2))
except ValueError:
print('ValueError')
return
self.f = lambda x, y: self.SLM.pixel_value(x, y, c1, c2, I1, I2, val1, val2, F1, F2, l1, l2,
mask=self.map_type_var.get())
print('Calculating binary %s' % self.map_type_var.get())
p = np.zeros(np.SLM.size)
print('Running binary weight-values calculation..')
for (x, y), val in np.ndenumerate(p):
p[x, y] = self.f(x, y)
try:
print("Running rad to gray conversion..")
p = self.rad_to_gray(p) # convert rad values to the corresponding gray values
except Exception as e:
print(e)
return
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8) # dimensions are (width,height, 3)
phase_map[:, :, 0] = p
phase_map[:, :, 1] = p
phase_map[:, :, 2] = p
name = self.new_map_name.get()
self.SLM.maps[name] = {'data': phase_map}
self.SLM.maps[name]['center'] = [[c1[0], c1[1]], [c2[0], c2[1]]]
self.SLM.maps[name]['star info'] = [[I1, l1, F1], [I2, l2, F2]]
self.maps.append(name)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
self.image = phase_map
self.plot_update()
# save map to bitmap if option is checked
if self.checkbox_val.get():
self.menu.save_fits(name=name)
print('File saved')
return
def triple_mask(self):
"""
Create binary mask
:return:
"""
c1 = self.starc1.get().split(sep=',')
c1 = (int(c1[0]), int(c1[1]))
c2 = self.starc2.get().split(sep=',')
c2 = (int(c2[0]), int(c2[1]))
c3 = self.starc3.get().split(sep=',')
c3 = (int(c3[0]), int(c3[1]))
try:
I1, l1, F1 = int(self.I1_num.get()), int(self.l1_num.get()), int(self.F1_num.get())
I2, l2, F2 = int(self.I2_num.get()), int(self.l2_num.get()), int(self.F2_num.get())
I3, l3, F3 = int(self.I3_num.get()), int(self.l3_num.get()), int(self.F3_num.get())
val1 = int(self.gray_1_val.get())
val2 = int(self.gray_2_val.get())
except ValueError:
print('Error')
return
#self.f = lambda x, y: self.SLM.pixel_value_triple(x, y, c1, c2, I1, I2, val1, val2, F1, F2, l1, l2)
p = | np.zeros(self.SLM.size, dtype=np.uint8) | numpy.zeros |
import unittest
import os
import pandas as pds
import numpy as np
from numpy.testing import assert_allclose
from sklearn.model_selection import KFold
from pyChemometrics import ChemometricsScaler, ChemometricsPCA
"""
Suite of tests to assess coherence and functionality of the PCA object.
"""
class TestPCA(unittest.TestCase):
"""
Verify outputs of the ChemometricsPCA object
"""
def setUp(self):
"""
:return:
"""
try:
# Generate a fake classification dataset
t_dset = pds.read_csv(os.path.join(os.path.dirname(__file__), './test_data/classification_twoclass.csv'))
self.xmat = t_dset.iloc[:, 1::].values
except (IOError, OSError, FileNotFoundError) as ioerr:
import tests.gen_synthetic_datasets
#os.system('python gen_synthetic_datasets.py')
t_dset = pds.read_csv(os.path.join(os.path.dirname(__file__), './test_data/classification_twoclass.csv'))
self.xmat = t_dset.iloc[:, 1::].values
self.expected_modelParameters = {'R2X': 0.12913056143673818,
'S0': 0.9803124001345157,
'VarExp': np.array([9.44045066, 8.79710591, 8.11561924]),
'VarExpRatio': np.array([0.04625821, 0.04310582, 0.03976653])}
self.expected_cvParameters = {'Q2X': -0.10571035538454221, 'Mean_VarExp_Test': -0.0090083829247783621,
'Stdev_VarExp_Test': 0.0037778709253728452,
'Mean_VarExpRatio_Training': np.array([0.05108043, 0.04669199, 0.04380617]),
'Stdev_VarExpRatio_Training': np.array([0.00130025, 0.00094489, 0.00044059])}
self.expected_scores = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_scores.csv'), delimiter=',')
self.expected_loadings = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_loadings.csv'), delimiter=',')
self.expected_scores_mc = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_scores_mc.csv'), delimiter=',')
self.expected_loadings_mc = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_loadings_mc.csv'), delimiter=',')
self.expected_scores_par = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_scores_par.csv'), delimiter=',')
self.expected_loadings_par = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_loadings_par.csv'), delimiter=',')
self.expected_dmodx = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_dmodx.csv'), delimiter=',')
cvloadings = np.loadtxt(os.path.join(os.path.dirname(__file__), './test_data/pca_cvloads.csv'), delimiter=',')
self.expected_cv_meanloadings = cvloadings[0:3, :]
self.expected_cv_stdevloadings = cvloadings[3::, :]
self.expected_t2 = np.array([ 9.00313686, 8.69095296, 8.34753638])
self.expected_outlier_dmodx = np.array([])
self.expected_outlier_t2 = np.array([14])
self.x_scaler = ChemometricsScaler(1)
self.pcamodel = ChemometricsPCA(ncomps=3, scaler=self.x_scaler)
def test_fit(self):
"""
:return:
"""
self.pcamodel.fit(self.xmat)
for key, item in self.expected_modelParameters.items():
assert_allclose(self.pcamodel.modelParameters[key], item, rtol=1e-05)
assert_allclose(self.pcamodel.scores, self.expected_scores)
assert_allclose(self.pcamodel.loadings, self.expected_loadings)
def test_transform(self):
"""
:return:
"""
self.pcamodel.fit(self.xmat)
assert_allclose(self.pcamodel.transform(self.xmat), self.expected_scores)
def test_fit_transform(self):
"""
:return:
"""
assert_allclose(self.pcamodel.fit_transform(self.xmat), self.expected_scores)
def test_cv(self):
"""
:return:
"""
# Restart the seed and perform cross validation
np.random.seed(0)
self.pcamodel.cross_validation(self.xmat, cv_method=KFold(7, True))
assert_allclose(self.pcamodel.cvParameters['Q2X'], self.expected_cvParameters['Q2X'], rtol=1e-5)
assert_allclose(self.pcamodel.cvParameters['Mean_VarExpRatio_Training'], self.expected_cvParameters['Mean_VarExpRatio_Training'], rtol=1e-5)
assert_allclose(self.pcamodel.cvParameters['Stdev_VarExpRatio_Training'], self.expected_cvParameters['Stdev_VarExpRatio_Training'], rtol=1e-5)
| assert_allclose(self.pcamodel.cvParameters['Mean_VarExp_Test'], self.expected_cvParameters['Mean_VarExp_Test'], rtol=1e-5) | numpy.testing.assert_allclose |
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
for _ in range(10):
# Check nrandom
rays = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
lx=1.0, nx=1,
nrandom=1000, dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
# Check that projected points are inside region
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.z, 0.0)
np.testing.assert_array_less(rays.x, 0.5)
np.testing.assert_array_less(rays.y, 0.5)
np.testing.assert_array_less(-0.5, rays.x)
np.testing.assert_array_less(-0.5, rays.y)
assert len(rays) == 1000
@timer
def test_asPolar():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
nrad = rng.integers(1, 11)
naz = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays)%6 == 0
# If we set inner=0, then last ray should
# intersect the center of the pupil
inner = 0.0
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
assert len(rays)%6 == 1
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.x[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.y[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.z[-1], 0, atol=1e-14)
@timer
def test_asSpokes():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
rings = rng.integers(1, 11)
spokes = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
spokes=spokes, rings=rings,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings*i:rings*(i+1)],
np.linspace(inner, outer, rings, endpoint=True)
)
for i in range(rings):
checkAngle(ths[i::rings], np.linspace(0, 2*np.pi, spokes, endpoint=False))
# Check explicit rings and spokes
rings = rng.uniform(inner, outer, rings)
spokes = rng.uniform(0, 2*np.pi, spokes)
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
rings=rings, spokes=spokes,
dirCos=dirCos
)
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Check Gaussian Quadrature
rings = rng.integers(5, 11)
spokes = 2*rings+1
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer,
rings=rings,
spacing='GQ',
dirCos=dirCos
)
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
Li, w = np.polynomial.legendre.leggauss(rings)
rings = np.sqrt((1+Li)/2)*outer
flux = w*np.pi/(2*spokes)
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
np.testing.assert_allclose(
rays.flux[len(rings)*i:len(rings)*(i+1)],
flux
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Sanity check GQ grids against literature
# Values from Forbes JOSA Vol. 5, No. 11 (1988) Table 1
rings = [1, 2, 3, 4, 5, 6]
rad = [
[0.70710678],
[0.45970084, 0.88807383],
[0.33571069, 0.70710678, 0.94196515],
[0.26349923, 0.57446451, 0.81852949, 0.96465961],
[0.21658734, 0.48038042, 0.70710678, 0.87706023, 0.97626324],
[0.18375321, 0.41157661, 0.61700114, 0.78696226, 0.91137517, 0.98297241]
]
w = [
[0.5],
[0.25, 0.25],
[0.13888889, 0.22222222, 0.13888889],
[0.08696371, 0.16303629, 0.16303629, 0.08696371],
[0.05923172, 0.11965717, 0.14222222, 0.11965717, 0.05923172],
[0.04283112, 0.09019039, 0.11697848, 0.11697848, 0.09019039, 0.04283112]
]
for rings_, rad_, w_ in zip(rings, rad, w):
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=1,
rings=rings_,
spacing='GQ',
dirCos=[0,0,-1]
)
spokes = rings_*2+1
radii = np.hypot(rays.x, rays.y)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings_*i:rings_*(i+1)],
rad_
)
np.testing.assert_allclose(
rays.flux[rings_*i:rings_*(i+1)]*spokes/(2*np.pi),
w_
)
@timer
def test_factory_optic():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
grid1 = batoid.RayVector.asGrid(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
nx=16
)
grid2 = batoid.RayVector.asGrid(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, lx=telescope.pupilSize,
nx=16
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asPolar(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
naz=100, nrad=20
)
grid2 = batoid.RayVector.asPolar(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
inner=telescope.pupilSize/2*telescope.pupilObscuration,
naz=100, nrad=20
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asSpokes(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
rings=10, spokes=21
)
grid2 = batoid.RayVector.asSpokes(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
rings=10, spokes=21
)
rays_allclose(grid1, grid2)
@timer
def test_getitem():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=10, naz=60
)
telescope.trace(rv)
# Single item indexing
for i in range(-len(rv), len(rv)):
rv1 = rv[i]
np.testing.assert_equal(rv1.r[0], rv.r[i])
np.testing.assert_equal(rv1.x[0], rv.x[i])
np.testing.assert_equal(rv1.y[0], rv.y[i])
np.testing.assert_equal(rv1.z[0], rv.z[i])
np.testing.assert_equal(rv1.v[0], rv.v[i])
np.testing.assert_equal(rv1.vx[0], rv.vx[i])
np.testing.assert_equal(rv1.vy[0], rv.vy[i])
np.testing.assert_equal(rv1.vz[0], rv.vz[i])
np.testing.assert_equal(rv1.t[0], rv.t[i])
np.testing.assert_equal(rv1.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv1.flux[0], rv.flux[i])
np.testing.assert_equal(rv1.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv1.failed[0], rv.failed[i])
assert rv1.r.flags.f_contiguous
assert rv1.v.flags.f_contiguous
# slice indexing
for i in range(-len(rv)//10, len(rv)//10):
slc = slice(i*10, (i+1)*10, 2)
rv2 = rv[slc]
np.testing.assert_equal(rv2.r, rv.r[slc])
np.testing.assert_equal(rv2.x, rv.x[slc])
np.testing.assert_equal(rv2.y, rv.y[slc])
np.testing.assert_equal(rv2.z, rv.z[slc])
np.testing.assert_equal(rv2.v, rv.v[slc])
np.testing.assert_equal(rv2.vx, rv.vx[slc])
np.testing.assert_equal(rv2.vy, rv.vy[slc])
np.testing.assert_equal(rv2.vz, rv.vz[slc])
np.testing.assert_equal(rv2.t, rv.t[slc])
np.testing.assert_equal(rv2.wavelength, rv.wavelength[slc])
np.testing.assert_equal(rv2.flux, rv.flux[slc])
np.testing.assert_equal(rv2.vignetted, rv.vignetted[slc])
np.testing.assert_equal(rv2.failed, rv.failed[slc])
assert rv2.r.flags.f_contiguous
assert rv2.v.flags.f_contiguous
# integer array indexing
idx = [0, -1, 1, -2, 2, -3, 50]
rv3 = rv[idx]
np.testing.assert_equal(rv3.r, rv.r[idx])
np.testing.assert_equal(rv3.x, rv.x[idx])
np.testing.assert_equal(rv3.y, rv.y[idx])
np.testing.assert_equal(rv3.z, rv.z[idx])
np.testing.assert_equal(rv3.v, rv.v[idx])
np.testing.assert_equal(rv3.vx, rv.vx[idx])
np.testing.assert_equal(rv3.vy, rv.vy[idx])
np.testing.assert_equal(rv3.vz, rv.vz[idx])
np.testing.assert_equal(rv3.t, rv.t[idx])
np.testing.assert_equal(rv3.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv3.flux, rv.flux[idx])
np.testing.assert_equal(rv3.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv3.failed, rv.failed[idx])
assert rv3.r.flags.f_contiguous
assert rv3.v.flags.f_contiguous
# boolean array indexing
idx = np.zeros(len(rv), dtype=bool)
idx[[0, -1, 5]] = True
rv4 = rv[idx]
np.testing.assert_equal(rv4.r, rv.r[idx])
np.testing.assert_equal(rv4.x, rv.x[idx])
np.testing.assert_equal(rv4.y, rv.y[idx])
np.testing.assert_equal(rv4.z, rv.z[idx])
np.testing.assert_equal(rv4.v, rv.v[idx])
np.testing.assert_equal(rv4.vx, rv.vx[idx])
np.testing.assert_equal(rv4.vy, rv.vy[idx])
np.testing.assert_equal(rv4.vz, rv.vz[idx])
np.testing.assert_equal(rv4.t, rv.t[idx])
np.testing.assert_equal(rv4.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv4.flux, rv.flux[idx])
np.testing.assert_equal(rv4.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv4.failed, rv.failed[idx])
assert rv4.r.flags.f_contiguous
assert rv4.v.flags.f_contiguous
# test iteration
for i, rv5 in enumerate(rv):
np.testing.assert_equal(rv5.r[0], rv.r[i])
np.testing.assert_equal(rv5.x[0], rv.x[i])
np.testing.assert_equal(rv5.y[0], rv.y[i])
np.testing.assert_equal(rv5.z[0], rv.z[i])
np.testing.assert_equal(rv5.v[0], rv.v[i])
np.testing.assert_equal(rv5.vx[0], rv.vx[i])
np.testing.assert_equal(rv5.vy[0], rv.vy[i])
np.testing.assert_equal(rv5.vz[0], rv.vz[i])
np.testing.assert_equal(rv5.t[0], rv.t[i])
np.testing.assert_equal(rv5.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv5.flux[0], rv.flux[i])
np.testing.assert_equal(rv5.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv5.failed[0], rv.failed[i])
assert rv5.r.flags.f_contiguous
assert rv5.v.flags.f_contiguous
for i, rv6 in enumerate(reversed(rv)):
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.x[0], rv.x[-i-1])
np.testing.assert_equal(rv6.y[0], rv.y[-i-1])
np.testing.assert_equal(rv6.z[0], rv.z[-i-1])
np.testing.assert_equal(rv6.v[0], rv.v[-i-1])
np.testing.assert_equal(rv6.vx[0], rv.vx[-i-1])
np.testing.assert_equal(rv6.vy[0], rv.vy[-i-1])
np.testing.assert_equal(rv6.vz[0], rv.vz[-i-1])
np.testing.assert_equal(rv6.t[0], rv.t[-i-1])
np.testing.assert_equal(rv6.wavelength[0], rv.wavelength[-i-1])
np.testing.assert_equal(rv6.flux[0], rv.flux[-i-1])
np.testing.assert_equal(rv6.vignetted[0], rv.vignetted[-i-1])
np.testing.assert_equal(rv6.failed[0], rv.failed[-i-1])
assert rv6.r.flags.f_contiguous
assert rv6.v.flags.f_contiguous
with np.testing.assert_raises(IndexError):
rv[len(rv)]
with | np.testing.assert_raises(IndexError) | numpy.testing.assert_raises |
#!/usr/bin/env python
#
# 20190222
# copied from "calc_stellar_mass_function.py", this code will superceed "calc_stellar_mass_function.py".
#
from __future__ import print_function
import os, sys, re, json, time, astropy
import numpy as np
from astropy.table import Table, Column, hstack
from copy import copy
from numpy import log, log10, power, sum, sqrt, pi, exp
pow = power
lg = log10
ln = log
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
cosmo = apply_cosmology.cosmo
if sys.version_info.major >= 3:
long = int
else:
pass
#
# def
#
def Schechter_Function(lgM, phi, lg_M0, alpha):
#
# Schechter (1976)
#
# Phi(M) dM = (Phi_*) * (M/M_*)**(alpha) * exp(-M/M_*) dM/M_*
# = (Phi_*) * x**(alpha) * exp(-x) dx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlnx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlgx * ln(10)
# = (Phi_*) * 10**((lgM-lgM_*)*(alpha+1)) * exp(-10**(lgM-lgM_*)) * ln(10) dlgx
# = (Our_Phi_Phi_Schechter) dlgx
#
lgx = lgM-lg_M0
Phi_Schechter = phi * (10**(lgx*(alpha+1))) * (np.exp(-10**lgx)) * ln(10) # per dex and already multiplied ln(10), so that its integral directly equals \int Phi(M) / M dM
return Phi_Schechter
#
# def
#
def calc_SMF_Davidzon2017(z, lgMstar=None, galaxy_type = 'SFG'):
#
# Davidzon 2017 arXiv:1701.02734
# IMF: Chabrier 2003
# Outputs: lgMstar_grid, lgPhiMstar_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgMstar
if lgMstar is None:
lgMstar_grid = np.linspace(6.0, 13.0, num=1000, endpoint=True)
else:
lgMstar_grid = lgMstar
#
# read SMF
tb_SMF = Table.read(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables/datatables_SMF/datatable_Davidzon2017_SMF_'+galaxy_type+'.txt', format='ascii') # zLo zHi lgMchar Phi_1 alpha_1 Phi_2 alpha_2
SMF_zmin = np.min(tb_SMF['zLo'])
SMF_zmax = np.max(tb_SMF['zHi'])
#
# check z
if z < SMF_zmin or z > SMF_zmax:
raise ValueError('calc_SMF_Davidzon2017: The input redshift is out of the allowed range of %s -- %s!'%(SMF_zmin, SMF_zmax))
#
# spline SMF #<20190214># old method
#<20190214># SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
#<20190214># SMF_phi_1 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['Phi_1'].data, k=1)(z)
#<20190214># SMF_phi_2 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['Phi_2'].data, k=1)(z)
#<20190214># SMF_alpha_1 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['alpha_1'].data, k=1)(z)
#<20190214># SMF_alpha_2 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['alpha_2'].data, k=1)(z)
#<20190214># SMF_lgMchar = InterpolatedUnivariateSpline(SMF_z, tb_SMF['lgMchar'].data, k=1)(z)
#<20190214># #print('z, lgMchar, alpha_1, phi_1, alpha_2, phi_2 =', z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2)
#<20190214># SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
#<20190214># Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
#<20190214># lgPhiMstar_grid = np.log10(SMF_PhiMstar)
#
# spline SMF
lgPhiMstar_matrix = []
for k in range(len(tb_SMF)):
SMF_z = (tb_SMF['zLo'][k] + tb_SMF['zHi'][k]) / 2.0
SMF_phi_1 = tb_SMF['Phi_1'][k]
SMF_phi_2 = tb_SMF['Phi_2'][k]
SMF_alpha_1 = tb_SMF['alpha_1'][k]
SMF_alpha_2 = tb_SMF['alpha_2'][k]
SMF_lgMchar = tb_SMF['lgMchar'][k]
#print('z, lgMchar, alpha_1, phi_1, alpha_2, phi_2 =', z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2)
SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
lgPhiMstar_grid = np.log10(SMF_PhiMstar)
lgPhiMstar_matrix.append(copy(lgPhiMstar_grid))
#
SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
lgPhiMstar_matrix = np.array(lgPhiMstar_matrix) # shape == (N_SMF_z, N_SMF_lgMstar, )
if z <= np.min(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[0]
elif z >= np.max(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[-1]
else:
lgPhiMstar_grid = interp1d(SMF_z, lgPhiMstar_matrix, axis=0, kind='linear')(z)
#print(lgPhiMstar_matrix.shape, SMF_z.shape, lgPhiMstar_grid.shape)
# fix nan
lgPhiMstar_grid[np.isnan(lgPhiMstar_grid)] = -100
lgPhiMstar_grid[(lgPhiMstar_grid<-100)] = -100
#
if lgMstar is None:
return lgMstar_grid, lgPhiMstar_grid
else:
return lgPhiMstar_grid
def calc_SMF_Moutard2016(z, lgMstar=None, galaxy_type = 'SFG'):
#
# <NAME>16 - SMF - https://ui.adsabs.harvard.edu/abs/2016A%26A...590A.103M/abstract
# IMF: Chabrier 2003
# Outputs: lgMstar_grid, lgPhiMstar_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgMstar
if lgMstar is None:
lgMstar_grid = np.linspace(6.0, 13.0, num=1000, endpoint=True)
else:
lgMstar_grid = lgMstar
#
# read SMF
tb_SMF = Table.read(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables/datatables_SMF/datatable_Moutard2016_SMF_'+galaxy_type+'.txt', format='ascii') # zLo zHi lgMchar Phi_1 alpha_1 Phi_2 alpha_2
SMF_zmin = np.min(tb_SMF['zLo'])
SMF_zmax = np.max(tb_SMF['zHi'])
#
# check z
if z < SMF_zmin or z > SMF_zmax:
raise ValueError('calc_SMF_Moutard2016: The input redshift is out of the allowed range of %s -- %s!'%(SMF_zmin, SMF_zmax))
#
# spline SMF
lgPhiMstar_matrix = []
for k in range(len(tb_SMF)):
SMF_z = (tb_SMF['zLo'][k] + tb_SMF['zHi'][k]) / 2.0
SMF_phi_1 = tb_SMF['Phi_1'][k]
SMF_phi_2 = tb_SMF['Phi_2'][k]
SMF_alpha_1 = tb_SMF['alpha_1'][k]
SMF_alpha_2 = tb_SMF['alpha_2'][k]
SMF_lgMchar = tb_SMF['lgMchar'][k]
#print('calc_SMF_Moutard2016: z %r, lgMchar %r, alpha_1 %r, phi_1 %r, alpha_2 %r, phi_2 %r'%(z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2))
SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
lgPhiMstar_grid = np.log10(SMF_PhiMstar)
lgPhiMstar_matrix.append(copy(lgPhiMstar_grid))
#
SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
lgPhiMstar_matrix = np.array(lgPhiMstar_matrix) # shape == (N_SMF_z, N_SMF_lgMstar, )
if z <= np.min(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[0]
elif z >= | np.max(SMF_z) | numpy.max |
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pickle
import glob
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from utils.utils import f1_score, best_f1_linspace, normalize_scores
from utils.utils_data import get_random_occlusion_mask
MACHINES = ['machine-1-1','machine-1-2','machine-1-3','machine-1-4','machine-1-5','machine-1-6','machine-1-7','machine-1-8',
'machine-2-1', 'machine-2-2','machine-2-3','machine-2-4','machine-2-5','machine-2-6','machine-2-7','machine-2-8','machine-2-9',
'machine-3-1', 'machine-3-2', 'machine-3-3', 'machine-3-4','machine-3-5','machine-3-6','machine-3-7','machine-3-8', 'machine-3-9','machine-3-10', 'machine-3-11']
# ------------------------------------------------------- SMD -------------------------------------------------------
def smd_load_scores(scores_dir, data_dir, machines):
labels_list = []
scores_list = []
for machine in machines:
results_file = f'{scores_dir}/SMD/{machine}/results.p'
results = pickle.load(open(results_file,'rb'))
scores = results['score']
labels = np.loadtxt(f'{data_dir}/ServerMachineDataset/test_label/{machine}.txt',delimiter=',')
scores = scores.repeat(10)[-len(labels):]
assert scores.shape == labels.shape, 'Wrong dimensions'
labels_list.append(labels)
scores_list.append(scores)
return scores_list, labels_list
def smd_compute_f1(scores_dir, n_splits, data_dir):
scores, labels = smd_load_scores(scores_dir=scores_dir, data_dir=data_dir, machines=MACHINES)
scores_normalized = normalize_scores(scores=scores, interval_size=64*4*10) # 64*1*10
scores_normalized = np.hstack(scores_normalized)
labels = np.hstack(labels)
f1, precision, recall, *_ = best_f1_linspace(scores=scores_normalized, labels=labels, n_splits=n_splits, segment_adjust=True) #1000
return f1, precision, recall
def smd_one_liner(occlusion_intervals, occlusion_prob, data_dir):
labels_list = []
scores_list = []
for machine in MACHINES:
train_data = np.loadtxt(f'{data_dir}/ServerMachineDataset/train/{machine}.txt',delimiter=',')
test_data = np.loadtxt(f'{data_dir}/ServerMachineDataset/test/{machine}.txt',delimiter=',')
# Mask
np.random.seed(1)
train_mask = get_random_occlusion_mask(dataset=train_data[:,None,:], n_intervals=occlusion_intervals, occlusion_prob=occlusion_prob)
train_mask = train_mask[:,0,:]
train_means = np.average(train_data, axis=0, weights=train_mask)
train_means = train_means[None, :]
scores = np.abs(test_data - train_means)
scores = scores.mean(axis=1)
labels = np.loadtxt(f'{data_dir}/ServerMachineDataset/test_label/{machine}.txt',delimiter=',')
labels_list.append(labels)
scores_list.append(scores)
scores_normalized = normalize_scores(scores=scores_list, interval_size=64*4*10) # 64*1*10
scores_normalized = | np.hstack(scores_normalized) | numpy.hstack |
# todo docstrings
import numpy as np
import sympy as sym
import stanpy as stp
import matplotlib.pyplot as plt
def signif(x, p):
x = np.asarray(x)
x_positive = np.where(np.isfinite(x) & (x != 0), np.abs(x), 10 ** (p - 1))
mags = 10 ** (p - 1 - np.floor(np.log10(x_positive)))
return np.round(x * mags) / mags
def test_ex01():
x_sym = sym.Symbol("x")
E = 3e7 # kN/m2
b = 0.2 # m
ha = hb = 0.3 # m
hc = 0.4 # m
l1 = 4 # m
l2 = 3 # m
hx = ha + (hc - hb) / l2 * x_sym
cs_props1 = stp.cs(b=b, h=ha)
s1 = {"E": E, "cs": cs_props1, "q": 10, "l": l1, "bc_i": {"w": 0, "M": 0, "H": 0}}
cs_props2 = stp.cs(b=b, h=hx)
s2 = {"E": E, "cs": cs_props2, "q": 10, "l": l2, "bc_k": {"w": 0, "phi": 0}}
gamma, K = stp.gamma_K_function(**s2)
bj2 = stp.bj_p119(K, l2, 2, 0, cs_props2["eta_y"])
bj2s = stp.bj_p119(K, l2, 2, 1, cs_props2["eta_y"])
bj3 = stp.bj_p119(K, l2, 3, 0, cs_props2["eta_y"])
bj3s = stp.bj_p119(K, l2, 3, 1, cs_props2["eta_y"])
bj4 = stp.bj_p119(K, l2, 4, 0, cs_props2["eta_y"])
bj4s = stp.bj_p119(K, l2, 4, 1, cs_props2["eta_y"])
bj = stp.bj(x=l2, **s2)
print(bj)
bj_sol = np.array([[3.375, 2.905, 1.991], [1.969, 2.531, 2.344]])
np.testing.assert_allclose(bj[0, 0, 2:5], np.array([bj2, bj3, bj4]))
np.testing.assert_allclose(bj[0, 1, 2:5], np.array([bj2s, bj3s, bj4s]))
np.testing.assert_allclose(signif(bj[0, :, 2:5], 4), bj_sol)
x = np.linspace(0, l1 + l2, 500)
Fxa = stp.tr(s1, s2, x=x)
Za, Zc = stp.solve_tr(Fxa[-1], bc_i=s1["bc_i"], bc_k=s2["bc_k"])
np.testing.assert_allclose(signif(Za, 5), np.array([0, 43.161e-4, 0, 24.292, 1]))
np.testing.assert_allclose(signif(Zc, 5), np.array([0, 0, -74.954, -45.708, 1]))
Z_x = Fxa.dot(Za).round(10)
np.testing.assert_allclose(Z_x[-1], Zc)
np.testing.assert_allclose(Z_x[0], Za)
w_x = Z_x[:, 0]
phi_x = Z_x[:, 1]
M_x = Z_x[:, 2]
R_x = Z_x[:, 3]
# scale = 0.5
# fig, ax = plt.subplots(figsize=(12, 5))
# stp.plot_system(ax, s1, s2)
# stp.plot_M(ax, x=x, Mx=M_x, fill_p="red", fill_n="blue", scale=scale, alpha=0.2)
# ax.grid(linestyle=":")
# ax.set_axisbelow(True)
# ax.set_ylim(-0.8, 0.8)
# ax.set_ylabel("R/Rmax*{}".format(scale))
# ax.set_title("[R] = kN")
# plt.show()
def test_ex02():
E = 21e7 # kN/m^2
l1, l3 = 0.99, 0.99 # m
l2 = 0.51 # m
ha, hb, hd = 0.25, 0.25, 0.25 # m
hc = 0.15 # m
b = 0.2 # m
t = 0.02 # m
s = 0.015 # m
q = 3.04 # kN/m
P = 9.96 # kN
Ag = b * t
x_sym = sym.Symbol("x")
hx2 = hb - (hb - hc) / l2 * x_sym
hx3 = hc + (hd - hc) / l3 * x_sym
b_vec = | np.array([b, s, b]) | numpy.array |
# ========================================
# library
# ========================================
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel, AutoConfig
import transformers
from transformers import RobertaModel, RobertaTokenizer
from transformers import AlbertModel, AlbertTokenizer
from transformers import DebertaModel, DebertaTokenizer
from transformers import ElectraModel, ElectraTokenizer, ElectraForSequenceClassification
from transformers import BartModel, BertTokenizer
from transformers import MPNetModel, MPNetTokenizer
from transformers import FunnelBaseModel, FunnelTokenizer, FunnelModel
from transformers import GPT2Model, GPT2Tokenizer
from transformers import T5EncoderModel, T5Tokenizer
import logging
import sys
from contextlib import contextmanager
import time
from tqdm import tqdm
import pickle
import gc
# ==================
# Constant
# ==================
ex = "_predict"
TEST_PATH = "../data/test.csv"
SUB_PATH = "../data/sample_submission.csv"
SAVE_PATH = "../output/submission.csv"
LOGGER_PATH = f"ex{ex}.txt"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ===============
# Settings
# ===============
BATCH_SIZE = 8
max_len = 256
roberta_large_MODEL_PATH = '../models/roberta/roberta-large'
roberta_large_tokenizer = RobertaTokenizer.from_pretrained(
roberta_large_MODEL_PATH)
roberta_base_MODEL_PATH = '../models/roberta/roberta-base'
roberta_base_tokenizer = RobertaTokenizer.from_pretrained(
roberta_base_MODEL_PATH)
roberta_base_MODEL_PATH2 = '../output/ex/ex_mlm_roberta_base/mlm_roberta_base'
roberta_base_tokenizer2 = AutoTokenizer.from_pretrained(
roberta_base_MODEL_PATH2)
deberta_large_MODEL_PATH = "../models/deberta/large"
deberta_large_tokenizer = DebertaTokenizer.from_pretrained(
deberta_large_MODEL_PATH)
electra_large_MODEL_PATH = "../models/electra/large-discriminator"
electra_large_tokenizer = ElectraTokenizer.from_pretrained(
electra_large_MODEL_PATH)
bart_large_MODEL_PATH = '../models/bart/bart-large'
bart_large_tokenizer = RobertaTokenizer.from_pretrained(
roberta_large_MODEL_PATH)
deberta_xlarge_MODEL_PATH = "../models/deberta/v2-xlarge"
deberta_xlarge_tokenizer = AutoTokenizer.from_pretrained(
deberta_xlarge_MODEL_PATH)
mpnet_base_MODEL_PATH = 'microsoft/mpnet-base'
mpnet_base_tokenizer = MPNetTokenizer.from_pretrained(mpnet_base_MODEL_PATH)
deberta_v2_xxlarge_MODEL_PATH = "../models/deberta/v2-xxlarge"
deberta_v2_xxlarge_tokenizer = AutoTokenizer.from_pretrained(
deberta_v2_xxlarge_MODEL_PATH)
funnel_large_base_MODEL_PATH = 'funnel-transformer/large-base'
funnel_large_base_tokenizer = FunnelTokenizer.from_pretrained(
funnel_large_base_MODEL_PATH)
muppet_roberta_large_MODEL_PATH = 'facebook/muppet-roberta-large'
muppet_roberta_large_tokenizer = RobertaTokenizer.from_pretrained(
muppet_roberta_large_MODEL_PATH)
funnel_large_MODEL_PATH = 'funnel-transformer/large'
funnel_large_tokenizer = FunnelTokenizer.from_pretrained(
funnel_large_MODEL_PATH)
gpt2_medium_MODEL_PATH = "gpt2-medium"
gpt2_medium_tokenizer = GPT2Tokenizer.from_pretrained(
"gpt2-medium", bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|pad|>')
gpt2_medium_tokenizer.pad_token = gpt2_medium_tokenizer.eos_token
albert_v2_xxlarge_MODEL_PATH = 'albert-xxlarge-v2'
albert_v2_xxlarge_tokenizer = AlbertTokenizer.from_pretrained(
albert_v2_xxlarge_MODEL_PATH)
electra_base_MODEL_PATH = "../models/electra/base-discriminator"
electra_base_tokenizer = ElectraTokenizer.from_pretrained(
electra_base_MODEL_PATH)
bert_base_uncased_MODEL_PATH = 'bert-base-uncased'
bert_base_uncased_tokenizer = BertTokenizer.from_pretrained(
bert_base_uncased_MODEL_PATH)
t5_large_MODEL_PATH = 't5-large'
t5_large_tokenizer = T5Tokenizer.from_pretrained(t5_large_MODEL_PATH)
distil_bart_MODEL_PATH = 'sshleifer/distilbart-cnn-12-6'
distil_bart_tokenizer = RobertaTokenizer.from_pretrained(
distil_bart_MODEL_PATH)
# ===============
# Functions
# ===============
class CommonLitDataset(Dataset):
def __init__(self, excerpt, tokenizer, max_len, target=None):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.max_len = max_len
self.target = target
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
inputs = self.tokenizer(
text,
max_length=self.max_len,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_token_type_ids=True
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
if self.target is not None:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"target": torch.tensor(self.target[item], dtype=torch.float32)
}
else:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long)
}
class roberta_large_model(nn.Module):
def __init__(self):
super(roberta_large_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
roberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class roberta_base_model(nn.Module):
def __init__(self):
super(roberta_base_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
roberta_base_MODEL_PATH,
)
self.drop = nn.Dropout(0.2)
self.fc = nn.Linear(768, 256)
self.layernorm = nn.LayerNorm(256)
self.drop2 = nn.Dropout(0.2)
self.relu = nn.ReLU()
self.out = nn.Linear(256, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'pooler_output']
output = self.drop(emb)
output = self.fc(output)
output = self.layernorm(output)
output = self.drop2(output)
output = self.relu(output)
output = self.out(output)
return output, emb
class roberta_base_model2(nn.Module):
def __init__(self):
super().__init__()
config = AutoConfig.from_pretrained(roberta_base_MODEL_PATH2)
config.update({"output_hidden_states": True,
"hidden_dropout_prob": 0.0,
"layer_norm_eps": 1e-7})
self.roberta = AutoModel.from_pretrained(
roberta_base_MODEL_PATH, config=config)
self.attention = nn.Sequential(
nn.Linear(768, 512),
nn.Tanh(),
nn.Linear(512, 1),
nn.Softmax(dim=1)
)
self.regressor = nn.Sequential(
nn.Linear(768, 1)
)
def forward(self, input_ids, attention_mask):
roberta_output = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
last_layer_hidden_states = roberta_output.hidden_states[-1]
weights = self.attention(last_layer_hidden_states)
context_vector = torch.sum(weights * last_layer_hidden_states, dim=1)
return self.regressor(context_vector)
class deberta_large_model(nn.Module):
def __init__(self):
super(deberta_large_model, self).__init__()
self.deberta_model = DebertaModel.from_pretrained(deberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
hidden_act="gelu_new")
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class electra_large_model(nn.Module):
def __init__(self):
super(electra_large_model, self).__init__()
self.electra = ElectraForSequenceClassification.from_pretrained(
electra_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
summary_last_dropout=0,
num_labels=1
)
def forward(self, ids, mask, token_type_ids):
# pooler
output = self.electra(ids, attention_mask=mask,
token_type_ids=token_type_ids)["logits"]
return output
class bart_large_model(nn.Module):
def __init__(self):
super(bart_large_model, self).__init__()
self.bart = BartModel.from_pretrained(
bart_large_MODEL_PATH,
dropout=0.0, attention_dropout=0.0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.bart(ids, attention_mask=mask)['last_hidden_state']
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class deberta_xlarge_model(nn.Module):
def __init__(self):
super(deberta_xlarge_model, self).__init__()
self.deberta_model = AutoModel.from_pretrained(deberta_xlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1536)
self.out = nn.Linear(1536, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class mpnet_base_model(nn.Module):
def __init__(self):
super(mpnet_base_model, self).__init__()
self.mpnet = MPNetModel.from_pretrained(
mpnet_base_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.mpnet(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class deberta_v2_xxlarge_model(nn.Module):
def __init__(self):
super(deberta_v2_xxlarge_model, self).__init__()
self.deberta_model = AutoModel.from_pretrained(deberta_v2_xxlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1536)
self.out = nn.Linear(1536, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class funnel_large_base_model(nn.Module):
def __init__(self):
super(funnel_large_base_model, self).__init__()
self.funnel = FunnelBaseModel.from_pretrained(
funnel_large_base_MODEL_PATH,
hidden_dropout=0,
attention_dropout=0,
hidden_act="gelu"
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.funnel(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class muppet_roberta_large_model(nn.Module):
def __init__(self):
super(muppet_roberta_large_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
muppet_roberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class funnel_large_model(nn.Module):
def __init__(self):
super(funnel_large_model, self).__init__()
self.funnel = FunnelModel.from_pretrained(
funnel_large_MODEL_PATH,
hidden_dropout=0,
attention_dropout=0
)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.funnel(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class gpt2_medium_model(nn.Module):
def __init__(self):
super(gpt2_medium_model, self).__init__()
self.gpt2_model = GPT2Model.from_pretrained(gpt2_medium_MODEL_PATH,
attn_pdrop=0,
embd_pdrop=0,
resid_pdrop=0,
summary_first_dropout=0)
self.gpt2_model.resize_token_embeddings(len(gpt2_medium_tokenizer))
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.gpt2_model(ids, attention_mask=mask)["last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class albert_v2_xxlarge_model(nn.Module):
def __init__(self):
super(albert_v2_xxlarge_model, self).__init__()
self.albert = AlbertModel.from_pretrained(
albert_v2_xxlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(4096)
self.out = nn.Linear(4096, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.albert(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class electra_base_model(nn.Module):
def __init__(self):
super(electra_base_model, self).__init__()
self.electra = ElectraModel.from_pretrained(
electra_base_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.electra(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class bert_base_uncased_model(nn.Module):
def __init__(self):
super(bert_base_uncased_model, self).__init__()
self.bert = transformers.BertModel.from_pretrained(bert_base_uncased_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.bert = transformers.BertForSequenceClassification.from_pretrained(BERT_MODEL,num_labels=1)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb, _ = self.bert(ids, attention_mask=mask,
token_type_ids=token_type_ids, return_dict=False)
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
output = self.out(output)
return output
class t5_large_model(nn.Module):
def __init__(self):
super(t5_large_model, self).__init__()
self.t5 = T5EncoderModel.from_pretrained(t5_large_MODEL_PATH,
dropout_rate=0)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.t5(ids, attention_mask=mask)['last_hidden_state']
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class distil_bart_model(nn.Module):
def __init__(self):
super(distil_bart_model, self).__init__()
self.bart = BartModel.from_pretrained(
distil_bart_MODEL_PATH,
activation_dropout=0.0, attention_dropout=0.0,
classif_dropout=0, classifier_dropout=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.bart(ids, attention_mask=mask)['last_hidden_state']
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class CommonLitDataset_gpt(Dataset):
def __init__(self, excerpt, tokenizer, max_len, target=None):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.max_len = max_len
self.target = target
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
inputs = self.tokenizer('<|startoftext|>' + text + '<|endoftext|>',
truncation=True, max_length=self.max_len, padding="max_length")
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
if self.target is not None:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids" : torch.tensor(token_type_ids, dtype=torch.long),
"target": torch.tensor(self.target[item], dtype=torch.float32)
}
else:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids" : torch.tensor(token_type_ids, dtype=torch.long)
}
def setup_logger(out_file=None, stderr=True, stderr_level=logging.INFO, file_level=logging.DEBUG):
LOGGER.handlers = []
LOGGER.setLevel(min(stderr_level, file_level))
if stderr:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(FORMATTER)
handler.setLevel(stderr_level)
LOGGER.addHandler(handler)
if out_file is not None:
handler = logging.FileHandler(out_file)
handler.setFormatter(FORMATTER)
handler.setLevel(file_level)
LOGGER.addHandler(handler)
LOGGER.info("logger set up")
return LOGGER
@contextmanager
def timer(name):
t0 = time.time()
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s')
LOGGER = logging.getLogger()
FORMATTER = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
setup_logger(out_file=LOGGER_PATH)
# ================================
# Main
# ================================
test = pd.read_csv(TEST_PATH)
# ================================
# roberta base -> svr + ridge
# ================================
if len(test) > 0:
with timer("roberta base -> svr + ridge"):
y_test_roberta_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_base_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in range(5):
# model
model = roberta_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex014/ex014_model/ex014_{fold}.pth"))
model.to(device)
model.eval()
test_emb = np.ndarray((0, 768))
# svr
svr = pickle.load(
open(f"../output/ex/ex015/ex015_model/ex015_svr_roberta_emb_{fold}.pkl", "rb"))
# ridge
ridge = pickle.load(
open(f"../output/ex/ex015/ex015_model/ex015_ridge_roberta_emb_{fold}.pkl", "rb"))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
_, output = model(input_ids, mask, token_type_ids)
test_emb = np.concatenate(
[test_emb, output.detach().cpu().numpy()], axis=0)
x_test = pd.DataFrame(test_emb)
x_test.columns = [f"emb_{i}" for i in range(len(x_test.columns))]
test_preds_svr = svr.predict(x_test)
test_preds_ridge = ridge.predict(x_test)
test_preds = (test_preds_svr + test_preds_ridge)/2
y_test_roberta_base.append(test_preds)
del x_test, model, test_emb
gc.collect()
y_test_roberta_base = np.mean(y_test_roberta_base, axis=0)
del test_, test_loader
gc.collect()
# ================================
# roberta base
# ================================
if len(test) > 0:
with timer("roberta base"):
y_test_roberta_base2 = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_base_tokenizer2, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in range(5):
# model
model = roberta_base_model2()
model.load_state_dict(torch.load(
f"../output/ex/ex237/ex237_model/ex237_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_roberta_base2.append(test_preds)
del model
gc.collect()
y_test_roberta_base2 = np.mean(y_test_roberta_base2, axis=0)
del test_, test_loader
gc.collect()
# ================================
# roberta_large
# ================================
if len(test) > 0:
with timer("roberta_large"):
y_test_roberta_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = roberta_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex072/ex072_model/ex072_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_roberta_large.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_roberta_large = np.mean(y_test_roberta_large, axis=0)
# ================================
# deberta_large
# ================================
if len(test) > 0:
with timer("deberta_large"):
y_test_deberta_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, deberta_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = deberta_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex182/ex182_model/ex182_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_deberta_large .append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_deberta_large = np.mean(y_test_deberta_large, axis=0)
# ================================
# electra_large
# ================================
if len(test) > 0:
with timer("electra_largee"):
y_test_electra_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, electra_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = electra_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex190/ex190_model/ex190_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_electra_large.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_electra_large = np.mean(y_test_electra_large, axis=0)
# ================================
# bart_large
# ================================
if len(test) > 0:
with timer("bart_largee"):
y_test_bart_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, bart_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = bart_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex107/ex107_model/ex107_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_bart_large.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_bart_large = np.mean(y_test_bart_large, axis=0)
# ================================
# deberta_xlarge
# ================================
if len(test) > 0:
with timer("deberta_xlarge"):
y_test_deberta_xlarge = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, deberta_xlarge_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=4, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = deberta_xlarge_model()
model.load_state_dict(torch.load(
f"../output/ex/ex194/ex194_model/ex194_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_deberta_xlarge .append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_deberta_xlarge = np.mean(y_test_deberta_xlarge, axis=0)
# ================================
# mpnet_base
# ================================
if len(test) > 0:
with timer("mpnet_base"):
y_test_mpnet_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, mpnet_base_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = mpnet_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex292/ex292_model/ex292_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_mpnet_base.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_mpnet_base = np.mean(y_test_mpnet_base, axis=0)
# ================================
# deberta_v2_xxlarge
# ================================
if len(test) > 0:
with timer("deberta_v2_xlarge"):
y_test_deberta_v2_xxlarge = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, deberta_v2_xxlarge_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=4, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = deberta_v2_xxlarge_model()
model.load_state_dict(torch.load(
f"../output/ex/ex216/ex216_model/ex216_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_deberta_v2_xxlarge.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_deberta_v2_xxlarge = np.mean(y_test_deberta_v2_xxlarge, axis=0)
# ================================
# funnel_large_base
# ================================
if len(test) > 0:
with timer("funnel_large_base"):
y_test_funnel_large_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, funnel_large_base_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=4, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = funnel_large_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex272/ex272_model/ex272_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_funnel_large_base.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_funnel_large_base = np.mean(y_test_funnel_large_base, axis=0)
# ================================
# muppet_roberta_large
# ================================
if len(test) > 0:
with timer("muppet_roberta_large"):
y_test_muppet_roberta_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, muppet_roberta_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = muppet_roberta_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex384/ex384_model/ex384_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_muppet_roberta_large.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_muppet_roberta_large = np.mean(
y_test_muppet_roberta_large, axis=0)
# ================================
# funnel large
# ================================
if len(test) > 0:
with timer("funnel_model"):
y_test_funnel_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, funnel_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = funnel_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex407/ex407_model/ex407_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_funnel_large.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_funnel_large = np.mean(y_test_funnel_large, axis=0)
# ================================
# gpt_medium
# ================================
if len(test) > 0:
with timer("gpt_medium"):
y_test_gpt2_medium = []
# dataset
test_ = CommonLitDataset_gpt(
test["excerpt"].values, gpt2_medium_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = gpt2_medium_model()
model.load_state_dict(torch.load(
f"../output/ex/ex429/ex429_model/ex429_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
# token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
# token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_gpt2_medium.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_gpt2_medium = np.mean(y_test_gpt2_medium, axis=0)
# ================================
# albert_v2_xxlarge_model
# ================================
if len(test) > 0:
with timer("albert_v2_xxlarge_model"):
y_test_albert_v2_xxlarge = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, albert_v2_xxlarge_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = albert_v2_xxlarge_model()
if fold == 2:
model.load_state_dict(torch.load(
f"../output/ex/ex448/ex448_model/ex448_{fold}.pth"))
else:
model.load_state_dict(torch.load(
f"../output/ex/ex450/ex450_model/ex450_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_albert_v2_xxlarge.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_albert_v2_xxlarge = np.mean(y_test_albert_v2_xxlarge, axis=0)
# ================================
# ex465 electra_base_model
# ================================
if len(test) > 0:
with timer("electra_base_model"):
ex465_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, electra_base_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = electra_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex465/ex465_model/ex465_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
ex465_pred.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
ex465_pred = np.mean(ex465_pred, axis=0)
# ================================
# ex497 bert_base_uncased_model
# ================================
if len(test) > 0:
with timer("bert_base_uncased_model"):
ex497_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, bert_base_uncased_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = bert_base_uncased_model()
model.load_state_dict(torch.load(
f"../output/ex/ex497/ex497_model/ex497_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask, token_type_ids)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
ex497_pred.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
ex497_pred = np.mean(ex497_pred, axis=0)
# ================================
# ex434 t5_large_model
# ================================
if len(test) > 0:
with timer("t5_large"):
ex434_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, t5_large_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = t5_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex434/ex434_model/ex434_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
ex434_pred.append(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
ex434_pred = np.mean(ex434_pred, axis=0)
# ================================
# distil_bart
# ================================
if len(test) > 0:
with timer("distil_bart"):
ex507_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, distil_bart_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = distil_bart_model()
model.load_state_dict(torch.load(
f"../output/ex/ex507/ex507_model/ex507_{fold}.pth"))
model.to(device)
model.eval()
test_preds = | np.ndarray((0, 1)) | numpy.ndarray |
import cv2
import numpy as np
import random
from datetime import datetime
random.seed(datetime.now())
import argparse
from PIL import ImageFont, ImageDraw, Image
from pylatexenc.latex2text import LatexNodes2Text
color = (128, 128, 128)
mul_symbol = r"""\times"""
mul_symbol_text = LatexNodes2Text().latex_to_text(mul_symbol)
div_symbol = r"""\div"""
div_symbol_text = LatexNodes2Text().latex_to_text(div_symbol)
def generate_one_page(page_name, first_max, operators, second_max, remove_0_1):
numbers = 36 # number of equations to generate.
numbers = numbers // 2 * 2 # Make it even so each line can contain two equations.
start = 0
if remove_0_1:
start = 2
first_op_range = (start, first_max) # first operands range in [0, 10]
second_op_range = (start, second_max) # second operands range in [0, 100]
height = 1280
width = 960
line_per_page = 20
line_height = height // line_per_page
im = np.ones((height, width, 3), np.uint8) * 255
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 2
dx = 50
dy = 100
for i in range(numbers):
a = random.randint(*first_op_range)
b = random.randint(*second_op_range)
sign = random.choice(operators)
if sign == '-' or sign == '/' :
if a < b:
a, b = b, a
if sign == '/':
if b == 0:
b = 1
a = (a // b) * b
if i % 2 == 0:
x = dx
else:
x = dx + width // 2
y = dy + (i//2) * line_height
if sign == '*':
sign_text = mul_symbol_text
elif sign == '/':
sign_text = div_symbol_text
else:
sign_text = sign
ops = '{:<5d}'.format(a) + sign_text + '{:5d} = '.format(b)
print(ops)
# Downloads from https://github.com/sonatype/maven-guide-zh/blob/master/content-zh/src/main/resources/fonts/simsun.ttc.
fontpath = './simsun.ttc'
font = ImageFont.truetype(fontpath, 32)
img_pil = Image.fromarray(im)
draw = ImageDraw.Draw(img_pil)
draw.text((x,y), ops, font = font, fill = color)
im = | np.array(img_pil) | numpy.array |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os, argparse
import csv
from run1 import get_params_office_world, get_params_traffic_world, get_params_craft_world
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y.append(sum(y[-5:])/len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y_smooth = np.convolve(y[0:-5], box, mode='same')
y_smooth[-1] = y_smooth[-6]
y_smooth[-2] = y_smooth[-6]
y_smooth[-3] = y_smooth[-6]
y_smooth[-4] = y_smooth[-6]
y_smooth[-5] = y_smooth[-6]
return y_smooth
def export_results_traffic_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[0].num_steps
max_step = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("traffic" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row, 25))
p50_q.append(np.percentile(row, 50))
p75_q.append(np.percentile(row, 75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append( | np.percentile(row, 25) | numpy.percentile |
import numpy as np
import math
def snr_plot(model, snrs, lbl, test_idx, X_test, Y_test, classes):
# Plot confusion matrix
acc = {}
for snr in snrs:
# extract classes @ SNR
test_SNRs = list(map(lambda x: lbl[x][1], test_idx))
test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]
# estimate classes
test_Y_i_hat = model.predict(test_X_i)
conf = np.zeros([len(classes), len(classes)])
confnorm = np.zeros([len(classes), len(classes)])
for i in range(0, test_X_i.shape[0]):
j = list(test_Y_i[i, :]).index(1)
k = int(np.argmax(test_Y_i_hat[i, :]))
conf[j, k] = conf[j, k] + 1
for i in range(0, len(classes)):
confnorm[i, :] = conf[i, :] / np.sum(conf[i, :])
cor = np.sum(np.diag(conf))
ncor = np.sum(conf) - cor
print(snr, "dB, Overall Accuracy: ", cor / (cor + ncor))
acc[snr] = 1.0 * cor / (cor + ncor)
return acc
def SNR_singlech(S, SN):
S = S-np.mean(S)# 消除直流分量
S = S/np.max(np.abs(S))#幅值归一化
mean_S = (np.sum(S))/(len(S))#纯信号的平均值
PS = np.sum((S-mean_S)*(S-mean_S))
PN = | np.sum((S-SN)*(S-SN)) | numpy.sum |
# Author : <NAME>
# Last update : 16 October 2020
# EPFL Rocket Team, 1015 Lausanne, Switzerland
import numpy as np
import numpy.linalg as lin
import math
import time
from scipy.integrate import ode, solve_ivp
from dataclasses import dataclass
from aero.Rocket.Stage import Stage
from aero.Rocket.Rocket import Rocket
from aero.Rocket.Body import Body
from aero.Functions.Models.stdAtmosUS import stdAtmosUS
from aero.Functions.Models.drag import drag
from aero.Functions.Models.Nose_drag import Nose_drag
from aero.Functions.Models.drag_shuriken import drag_shuriken
from aero.Functions.Models.wind_model import wind_model
from aero.Functions.Models.normal_lift import normal_lift
from aero.Functions.Math.normalize_vector import normalize_vector
from aero.Functions.Math.rotmat import rotmat
from aero.Functions.Math.quat2rotmat import quat2rotmat
from aero.Functions.Math.rot2anglemat import rot2anglemat
from aero.Functions.Math.quat_evolve import quat_evolve
from aero.Functions.Math.rot2quat import rot2quat
from aero.Functions.Models.pitch_damping_moment import pitch_damping_moment
from aero.Functions.Models.Mass_Non_Lin import Mass_Non_Lin
from aero.Functions.Models.Thrust import Thrust
from aero.Functions.Models.Mass_Properties import Mass_Properties
class Simulator3D:
"""
"""
@dataclass
class SimAuxResults:
Margin: np.array(0)
Alpha: np.array
Cn_alpha: np.array
Xcp: np.array
Cd: np.array
Mass: np.array
CM: np.array
Il: np.array
Ir: np.array
Delta: np.array
Nose_Alpha: np.array
Nose_delta: np.array
global tmp_Margin, tmp_Alpha, tmp_Cn_alpha, tmp_Xcp, tmp_Cd, tmp_Mass, tmp_CM, tmp_Il, tmp_Ir, tmp_Delta
global tmp_Nose_Alpha, tmp_Nose_Delta
global simAuxResults
simAuxResults = SimAuxResults(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
def __init__(self, rocket: Rocket, atmosphere: stdAtmosUS):
self.x_0 = np.array([0, 0])
self.t0 = 0
self.state = [self.x_0]
self.time = [self.t0]
self.rocket = rocket
self.Environment = atmosphere
def Dynamics_Rail_1DOF(self, t, s):
x = s[0]
v = s[1]
# Rocket inertia
Mass, dMdt = Mass_Non_Lin(t, self.rocket)
# Environment
g = 9.81
a = self.Environment.get_speed_of_sound(s[0] + self.Environment.ground_altitude)
rho = self.Environment.get_density(s[0] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(s[0] + self.Environment.ground_altitude)
# Gravity
G = -g*np.cos(self.Environment.Rail_Angle)*Mass
T = Thrust(t, self.rocket)
# TODO: Add drag influences (done?)
CD = drag(self.rocket, 0, v, nu, a)
D = -0.5*rho*self.rocket.get_max_cross_section_surface*CD*v**2
F_tot = G + T*self.rocket.get_motor_fac() + D
x_dot = v
v_dot = 1/Mass * (F_tot - v*dMdt)
CD_AB = 0 # TODO: Insert reference to drag_shuriken or other
return x_dot, v_dot
def Compute_aero(self, s, thrust_force):
x = s[0:3]
v = s[3:6]
q = s[6:10]
w = s[10:13]
propellant_mass = s[13]
# Normalise quaternion
q = normalize_vector(q)
# Rotation matrix from rocket coordinates to Earth coordinates
c = quat2rotmat(q)
angle = rot2anglemat(c)
# Rocket principle frame vectors expressed in Earth coordinates
ya = c.dot(np.array([1, 0, 0]).transpose()) # Yaw axis
pa = c.dot(np.array([0, 1, 0]).transpose()) # Pitch axis
ra = c.dot(np.array([0, 0, 1]).transpose()) # Roll axis
# Earth coordinates vectors expressed in Earth's frame
xe = np.array([1, 0, 0]).transpose()
ye = np.array([0, 1, 0]).transpose()
ze = np.array([0, 0, 1]).transpose()
# Mass properties
m = self.rocket.get_empty_mass() + propellant_mass
dMdt = np.linalg.norm(thrust_force)/(self.rocket.get_motor_Isp()*9.81)
cg = (self.rocket.get_dry_cg()*self.rocket.get_empty_mass() + self.rocket.get_propellant_cg()*propellant_mass)/m #from tip of nosecone
Sm = self.rocket.get_max_cross_section_surface
#I = c.transpose().dot(self.rocket.get_rocket_inertia()).dot(c)
I = c.dot(self.rocket.get_rocket_inertia()).dot(c.transpose())
# Environment
g = 9.81 # Gravity [m/s^2]
rho = self.Environment.get_density(x[2] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(x[2] + self.Environment.ground_altitude) # !!! take 200 us
a = self.Environment.get_speed_of_sound(x[2] + self.Environment.ground_altitude)
# Aerodynamic corrective forces --------------------
# Compute center of mass angle of attack
v_cm = v - self.Environment.get_V_inf()*self.Environment.V_dir
v_cm_mag = np.linalg.norm(v_cm)
alpha_cm = math.atan2(np.linalg.norm(np.cross(ra, v_cm)), np.dot(ra, v_cm)) # !!! take 200 us
# Mach number
Mach = v_cm_mag / a
# Normal lift coefficient and center of pressure
CNa, Xcp, CNa_bar, CP_bar = normal_lift(self.rocket, alpha_cm, 1.1, Mach, angle[2], 1)
# Stability margin
margin = Xcp - cg
# Compute rocket angle of attack
if np.linalg.norm(w) != 0:
w_norm = w / np.linalg.norm(w)
else:
w_norm = np.zeros((3, 1))
wind_dir = np.dot(ra, w_norm)
if wind_dir > 1: wind_dir = 1
if wind_dir < -1: wind_dir = -1
v_rel = v_cm + margin * math.sin(math.acos(wind_dir)) * np.cross(ra, w) # center of mass speed
v_mag = np.linalg.norm(v_rel)
v_norm = normalize_vector(v_rel)
# Angle of attack
v_cross = np.cross(ra, v_norm)
v_cross_norm = normalize_vector(v_cross)
alpha = math.atan2(np.linalg.norm(np.cross(ra, v_norm)), np.dot(ra, v_norm))
delta = math.atan2(np.linalg.norm(np.cross(ra, ze)), np.dot(ra, ze))
# Normal force
na = np.cross(ra, v_cross)
if np.linalg.norm(na) == 0:
n = np.array([0, 0, 0]).transpose()
else:
n = 0.5 * rho * Sm * CNa * alpha * v_mag ** 2 * na/ (np.linalg.norm(na)+0.05) # --> constant added to avoid division by small number
# Drag
# Drag coefficient
cd = drag(self.rocket, alpha, v_mag, nu, a)*self.rocket.CD_fac # !!! take 3000 us !!! -> actually half of the computation time
# Drag force
d = -0.5 * rho * Sm * cd * v_mag ** 2 * v_norm
#print(0.5 * rho * Sm * cd * v_norm)
# Moment estimation ------------------------
# Aerodynamic corrective moment
mn = np.linalg.norm(n) * margin * v_cross_norm
# Aerodynamic damping moment
w_pitch = w - np.dot(w, ra) * ra
cdm = pitch_damping_moment(self.rocket, rho, CNa_bar, CP_bar, dMdt, cg, np.linalg.norm(w_pitch), v_mag)
md = -0.5 * rho * cdm * Sm * v_mag ** 2 * normalize_vector(w_pitch)
self.rocket.set_aero(n+d, mn+md)
def Dynamics_6DOF(self, t, s, thrust_force, thrust_torque):
start_time = time.time() # -----------------------------------------------------------------
x = s[0:3]
v = s[3:6]
q = s[6:10]
w = s[10:13]
propellant_mass = s[13]
# Normalise quaternion
q = normalize_vector(q)
# Rotation matrix from rocket coordinates to Earth coordinates
c = quat2rotmat(q)
angle = rot2anglemat(c)
# Rocket principle frame vectors expressed in Earth coordinates
ya = c.dot(np.array([1, 0, 0]).transpose()) # Yaw axis
pa = c.dot(np.array([0, 1, 0]).transpose()) # Pitch axis
ra = c.dot(np.array([0, 0, 1]).transpose()) # Roll axis
# Earth coordinates vectors expressed in Earth's frame
xe = np.array([1, 0, 0]).transpose()
ye = np.array([0, 1, 0]).transpose()
ze = np.array([0, 0, 1]).transpose()
# Mass properties
m = self.rocket.get_empty_mass() + propellant_mass
dMdt = np.linalg.norm(thrust_force)/(self.rocket.get_motor_Isp()*9.81)
cg = (self.rocket.get_dry_cg()*self.rocket.get_empty_mass() + self.rocket.get_propellant_cg()*propellant_mass)/m
Sm = self.rocket.get_max_cross_section_surface
#I = c.transpose().dot(self.rocket.get_rocket_inertia()).dot(c)
I = c.dot(self.rocket.get_rocket_inertia()).dot(c.transpose())
# Environment
g = 9.81 # Gravity [m/s^2]
rho = self.Environment.get_density(x[2] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(x[2] + self.Environment.ground_altitude) # !!! take 200 us
a = self.Environment.get_speed_of_sound(x[2] + self.Environment.ground_altitude)
# Force computation: Thrust, gravity, drag and lift --------------------------
# Thrust
# X, Y, Z force in rocket frame, reoriented to world frame
T = c.dot(thrust_force.transpose())
# Gravity
G = -g * m * ze
# Aerodynamic corrective forces
# Compute center of mass angle of attack
v_cm = v - wind_model(t, self.Environment.get_turb(x[2] + self.Environment.ground_altitude),
self.Environment.get_V_inf()*self.Environment.V_dir, 'None' , x[2]) # TODO : V_dir
v_cm_mag = np.linalg.norm(v_cm)
alpha_cm = math.atan2(np.linalg.norm(np.cross(ra, v_cm)), np.dot(ra, v_cm)) # !!! take 200 us
# Mach number
Mach = v_cm_mag / a
# Normal lift coefficient and center of pressure
CNa, Xcp, CNa_bar, CP_bar = normal_lift(self.rocket, alpha_cm, 1.1, Mach, angle[2], 1)
# Stability margin
margin = Xcp - cg
# Compute rocket angle of attack
if np.linalg.norm(w) != 0:
w_norm = w / np.linalg.norm(w)
else:
w_norm = np.zeros((3, 1))
wind_dir = np.dot(ra, w_norm)
if wind_dir > 1: wind_dir = 1
if wind_dir < -1: wind_dir = -1
v_rel = v_cm + margin * math.sin(math.acos(wind_dir)) * np.cross(ra, w) # center of mass speed
v_mag = np.linalg.norm(v_rel)
v_norm = normalize_vector(v_rel)
# Angle of attack
v_cross = np.cross(ra, v_norm)
v_cross_norm = normalize_vector(v_cross)
alpha = math.atan2(np.linalg.norm(np.cross(ra, v_norm)), np.dot(ra, v_norm))
delta = math.atan2(np.linalg.norm(np.cross(ra, ze)), np.dot(ra, ze))
# Normal force
na = np.cross(ra, v_cross)
if np.linalg.norm(na) == 0:
n = np.array([0, 0, 0]).transpose()
else:
n = 0.5 * rho * Sm * CNa * alpha * v_mag ** 2 * na/ (np.linalg.norm(na)+0.05) # --> constant added to avoid division by small number
# Drag
# Drag coefficient
cd = drag(self.rocket, alpha, v_mag, nu, a)*self.rocket.CD_fac # !!! take 3000 us !!! -> actually half of the computation time
# Drag force
d = -0.5 * rho * Sm * cd * v_mag ** 2 * v_norm
# Total forces
f_tot = T + G #+ n + d
# Moment estimation
# Aerodynamic corrective moment
mn = np.linalg.norm(n) * margin * v_cross_norm
# Aerodynamic damping moment
w_pitch = w - np.dot(w, ra) * ra
cdm = pitch_damping_moment(self.rocket, rho, CNa_bar, CP_bar, dMdt, cg, np.linalg.norm(w_pitch), v_mag)
md = -0.5 * rho * cdm * Sm * v_mag ** 2 * normalize_vector(w_pitch)
m_tot = c.dot(thrust_torque.transpose())# + mn + md
# Translational dynamics
X_dot = v
V_dot = 1/m*(f_tot - v*dMdt)
# State derivatives
q_dot = quat_evolve(q, w)
w_dot = np.linalg.lstsq(I, m_tot, rcond=None)[0]
S_dot = np.concatenate((X_dot, V_dot, q_dot, w_dot, np.array([-dMdt])))
self.rocket.set_sensor_data(V_dot, w, x[2], c)
#print(1000*(time.time()-start_time))
return S_dot
def Dynamics_Parachute_3DOF(self, t, s, rocket, main):
x = s[0:3]
v = s[3:6]
rho = self.Environment.get_density(x[2] + self.Environment.ground_altitude)
# Aerodynamic force
v_rel = -v + wind_model(t, self.Environment.get_turb(x[2] + self.Environment.ground_altitude),
self.Environment.get_V_inf()*self.Environment.V_dir, self.Environment.get_turb_model(), x[2])
M = self.rocket.get_empty_mass() - self.rocket.pl_mass
if main:
SCD = self.rocket.get_para_main_SCD()
else:
SCD = self.rocket.get_para_drogue_SCD()
D = 0.5 * rho * SCD * np.linalg.norm(v_rel) * v_rel
# Gravity force
g = np.array([0, 0, -9.81])
G = g * M
dXdt = v
dVdt = (D+G)/M
dsdt = np.concatenate((dXdt, dVdt))
return dsdt
def Dynamics_3DOF(self, t, s):
X = s[0:3]
V = s[3:6]
XE = np.array([1, 0, 0])
YE = np.array([0, 1, 0])
ZE = np.array([0, 0, 1])
a = self.Environment.get_speed_of_sound(X[2] + self.Environment.ground_altitude)
rho = self.Environment.get_density(X[2] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(X[2] + self.Environment.ground_altitude)
M = self.rocket.get_empty_mass()
V_rel = V - wind_model(t, self.Environment.get_turb(X[2] + self.Environment.ground_altitude),
self.Environment.get_V_inf()*self.Environment.V_dir,
self.Environment.get_turb_model(), X[2])
G = -9.81 * M * ZE
CD = drag(self.rocket, 0, np.linalg.norm(V_rel), nu, a)
D = -0.5 * rho * self.rocket.get_max_cross_section_surface * CD * V_rel * np.linalg.norm(V_rel)
X_dot = V
V_dot = 1 / M * (D + G)
S_dot = np.concatenate((X_dot, V_dot))
return S_dot
def Nose_Dynamics_3DOF(self, t, s, Environment):
X = s[0:3]
V = s[3:6]
XE = np.array([1, 0, 0]).transpose()
YE = np.array([0, 1, 0]).transpose()
ZE = np.array([0, 0, 1]).transpose()
# atmosphere
a = self.Environment.get_speed_of_sound(X[2] + self.Environment.ground_altitude)
rho = self.Environment.get_density(X[2] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(X[2] + self.Environment.ground_altitude)
M = self.rocket.get_mass(t)
V_rel = V - wind_model(t, self.Environment.get_turb(X[0] + self.Environment.ground_altitude),
self.Environment.get_V_inf(),
self.Environment.get_turb_model(), X[2])
G = -9.81 * M * ZE
CD = Nose_drag(self.rocket, 0, np.linalg.norm(V_rel), nu, a)
D = -0.5 * rho * self.rocket.get_max_cross_section_surface * CD * V_rel * np.linalg.norm(V_rel)
X_dot = V
V_dot = 1 / M * (D + G)
return X_dot, V_dot
def Nose_Dynamics_6DOF(self, t, s):
X = s[0:3]
V = s[3:6]
Q = s[6:10]
W = s[10:13]
# Check quaternion norm
Q = normalize_vector(Q)
# Rotation matrix from rocket coordinates to Earth coordinates
C = quat2rotmat(Q)
angle = rot2anglemat(C)
# Rocket principle frame vectors expressed in earth coordinates
YA = C * np.array([1, 0, 0]).transpose()
PA = C * np.array([0, 1, 0]).transpose()
RA = C * np.array([0, 0, 1]).transpose()
# Earth coordinates vectors expressed in earth's frame
XE = np.array([1, 0, 0]).transpose()
YE = np.array([0, 1, 0]).transpose()
ZE = np.array([0, 0, 1]).transpose()
# Rocket inertia
M = self.rocket.get_mass(t)
dMdt = self.rocket.get_dmass_dt(t)
CM = self.rocket.get_cg(t)
Sm = self.rocket.get_max_cross_section_surface
I_L = self.rocket.get_long_inertia(t)
I_R = self.rocket.get_rot_inertia(t)
I = C.transpose() * ([[I_L, 0, 0],
[0, I_L, 0],
[0, 0, I_R]]) * C
g = 9.81
# atmosphere
a = self.Environment.get_speed_of_sound(X[2] + self.Environment.ground_altitude)
rho = self.Environment.get_density(X[2] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(X[2] + self.Environment.ground_altitude)
# Thrust
# Oriented along roll axis of rocket frame, expressed, in earth coordinates
T = self.rocket.get_thrust(t) * RA
G = -g * M * ZE
# Compute center of mass angle of attack
Vcm = V - wind_model(t, self.Environment.get_turb(X[0] + self.Environment.ground_altitude),
self.Environment.get_v_inf(),
self.Environment.get_turb_model(), X[2])
Vcm_mag = np.linalg.norm(Vcm)
alpha_cm = math.atan2(np.linalg.norm(np.cross(RA, Vcm)), np.dot(RA, Vcm))
# Mach number
Mach = np.linalg.norm(Vcm_mag) / a
# Normal lift coefficient and center of pressure
CNa, Xcp, CNa_bar, CP_bar = normal_lift(self.rocket, alpha_cm, 1.1, Mach, angle[2], 1)
# Stability margin
margin = Xcp - CM
# Compute rocket angle of attack
if np.linalg.norm(W) != 0:
w_norm = W / np.linalg.norm(W)
else:
w_norm = np.zeros(3, 1)
Vrel = Vcm + margin * math.sin(math.acos(np.dot(RA, w_norm))) * np.cross(RA, W)
Vmag = np.linalg.norm(Vrel)
Vnorm = normalize_vector(Vrel)
# Angle of attack
Vcross = np.cross(RA, Vnorm)
Vcross_norm = normalize_vector(Vcross)
alpha = math.atan2(np.linalg.norm(np.cross(RA, Vnorm)), np.dot(RA, Vnorm))
delta = math.atan2(np.linalg.norm(np.cross(RA, ZE)), np.dot(RA, ZE))
# Normal force
NA = np.cross(RA, Vcross)
if np.linalg.norm(NA) == 0:
N = np.array([0, 0, 0]).transpose
else:
N = 0.5 * rho * Sm * CNa * alpha * Vmag ** 2 * NA / np.linalg.norm(NA)
# Drag
# Drag coefficient
CD = drag(self.rocket, alpha, Vmag, nu, a) # TODO : * cd_fac (always 1 ?)
ab_phi = self.rocket.ab_phi # TODO : find a way to deal with airbrakes, /!\ magic number
if t > self.rocket.get_burn_time:
CD = CD + drag_shuriken(self.rocket, ab_phi, alpha, Vmag, nu)
# Drag force
D = -0.5 * rho * Sm * CD * Vmag ** 2 * Vnorm
# Total forces
motor_fac = self.rocket.motor_fac # TODO : always 1 ?
F_tot = T * motor_fac + G + N + D
# Moment estimation
# Aerodynamic corrective moment
MN = np.linalg.norm(N) * margin * Vcross_norm
# Aerodynamic damping moment
w_pitch = W - np.dot(W, RA) * RA
cdm = pitch_damping_moment(self.rocket, rho, CNa_bar, CP_bar, dMdt, CM, np.linalg.norm(w_pitch), Vmag)
MD = -0.5 * rho * cdm * Sm * Vmag ** 2 * normalize_vector(w_pitch)
m_tot = MN + MD
tmp_Nose_Alpha = alpha
tmp_Nose_Delta = delta
return V, 1 / M * (F_tot + V * dMdt), quat_evolve(Q, W), lin.lstsq(I, m_tot)
def Payload_Dynamics_3DOF(self, t, s, Environment):
X = s[0:3]
V = s[3:6]
XE = np.array([1, 0, 0]).transpose()
YE = np.array([0, 1, 0]).transpose()
ZE = np.array([0, 0, 1]).transpose()
# atmosphere
a = self.Environment.get_speed_of_sound(X[2] + self.Environment.ground_altitude)
rho = self.Environment.get_density(X[2] + self.Environment.ground_altitude)
nu = self.Environment.get_viscosity(X[2] + self.Environment.ground_altitude)
M = self.rocket.get_mass(t)
V_rel = V - wind_model(t, self.Environment.get_turb(X[0] + self.Environment.ground_altitude),
self.Environment.get_v_inf(),
self.Environment.get_turb_model(), X[2])
G = -9.81 * M * ZE
SCD = 2.56 * 10 ** (-2)
D = -0.5 * rho * SCD * V_rel * np.linalg.norm(V_rel)
X_dot = V
V_dot = 1 / M * (D + G)
return X_dot, V_dot
def RailSim(self):
def off_rail(t, y): return y[0] - self.Environment.Rail_Length
off_rail.terminal = True
off_rail.direction = 1
# Initial Conditions
X0 = np.array([0, 0])
# Time span
tspan = np.array([0, 5])
# Options
print(tspan, X0)
# intergration
self.integration_ivp = solve_ivp(self.Dynamics_Rail_1DOF, tspan, X0, events=off_rail)
T1 = self.integration_ivp.t
S1 = self.integration_ivp.y
return T1, S1
def FlightSim(self, tspan, arg2, arg3=None, arg4=None, arg5=None):
if arg3 is None and arg4 is None and arg5 is None:
# Compute initial conditions based on rail output values
V = arg2
# Rail vector
C_rail = rotmat(self.Environment.Rail_Azimuth, 3) * rotmat(self.Environment.Rail_Angle, 2) * rotmat(
self.Environment.Rail_Azimuth, 3).transpose()
RV = C_rail.dot(np.array([0, 0, 1]).transpose())
# Initial Conditions
X0 = RV * self.Environment.Rail_Length
V0 = RV * V
Q0 = rot2quat(C_rail.transpose())
W0 = np.array([0, 0, 0]).transpose()
S0 = np.concatenate((X0,V0,Q0,W0), axis=0)
elif arg3 is not None and arg4 is not None and arg5 is not None:
# Set initial conditions based on the exact value of the state vector
X0 = arg2
V0 = arg3
Q0 = arg4
W0 = arg5
S0 = np.concatenate((X0,V0,Q0,W0), axis=0)
else:
print("ERROR: In flight simulator, function accepts either 3 or 6 arguments")
def apogee(t, y):
return y[5]
apogee.terminal = True
apogee.direction = -1
self.integration_ivp = solve_ivp(self.Dynamics_6DOF, tspan, S0, events=apogee)
T2 = self.integration_ivp.t
S2 = self.integration_ivp.y
T2E = self.integration_ivp.t_events
S2E = self.integration_ivp.y_events
I2E = np.where(T2 == T2E)
return T2, S2, T2E, S2E, I2E
def DrogueParaSim(self, T0, X0, V0):
# Initial conditions
S0 = np.concatenate((X0, V0), axis=0)
# time span
tspan = np.array([T0, 500])
def MainEvent(t, y, rocket, main):
return (y[2] > rocket.get_para_main_event()) - 0.5
MainEvent.terminal = True
MainEvent.direction = -1
print(self.rocket.get_para_main_event())
# integration
self.integration_ivp = solve_ivp(self.Dynamics_Parachute_3DOF, tspan, S0, args=[self.rocket, 0], events=MainEvent)
T3 = self.integration_ivp.t
S3 = self.integration_ivp.y
T3E = self.integration_ivp.t_events
S3E = self.integration_ivp.y_events
I3E = np.where(T3 == T3E)
return T3, S3, T3E, S3E, I3E
def MainParaSim(self, T0, X0, V0):
# Initial conditions
S0 = np.concatenate((X0, V0), axis=0)
# time span
tspan = np.array([T0, 500])
def CrashEvent(t, y, rocket, main):
return (y[2] > 0) - 0.5
CrashEvent.terminal = True
CrashEvent.direction = -1
# integration
self.integration_ivp = solve_ivp(self.Dynamics_Parachute_3DOF, tspan, S0, args=[self.rocket, 1], events=CrashEvent)
T4 = self.integration_ivp.t
S4 = self.integration_ivp.y
T4E = self.integration_ivp.t_events
S4E = self.integration_ivp.y_events
I4E = np.where(T4 == T4E)
return T4, S4, T4E, S4E, I4E
def CrashSim(self, T0, X0, V0):
# Initial conditions
S0 = np.concatenate((X0, V0), axis=0)
print(S0, T0)
# time span
tspan = | np.array([T0, 100]) | numpy.array |
"""Code for computing the gamma sensor footprint, and for applying and
unapplying spatial convolution filters to a given image.
BM: this is used in scripts/gammasensor_cli.py - I haven't used it in
my time with uncoverml or seen it used.
"""
import numpy as np
import numpy.fft as fft
from skimage.restoration import deconvolution
def pad2(img):
img = np.ma.vstack((img, img[-2::-1]))
img = np.ma.hstack((img, img[:, -2::-1]))
return img
def fwd_filter(img, S):
img_w, img_h, ch = img.shape
F = pad2(img)
F.data[F.mask] = 0. # make sure its zero-filled!
# Forward transform
specF = np.fft.fft2(F.data.astype(float), axes=(0, 1))
specN = np.fft.fft2(1. - F.mask.astype(float), axes=(0, 1))
specS = np.fft.fft2(S[::-1, ::-1])
out = np.real(np.fft.ifft2(specF * specS[:, :, np.newaxis], axes=(0, 1)))
norm = np.real(np.fft.ifft2(specN * specS[:, :, np.newaxis], axes=(0, 1)))
eps = 1e-15
norm = np.maximum(norm, eps)
out /= norm
out = out[-img_w:, -img_h:]
out[img.mask] = 0.
return np.ma.MaskedArray(data=out, mask=img.mask)
def kernel_impute(img, S):
F = pad2(img)
F.data[F.mask] = 0. # make sure its zero-filled!
img_w, img_h, img_ch = img.shape
Q = S
specF = np.fft.fft2(F.data.astype(float), axes=(0, 1))
specN = np.fft.fft2(1. - F.mask.astype(float), axes=(0, 1))
specQ = np.fft.fft2(Q[::-1, ::-1])
numer = np.real(np.fft.ifft2(specF * specQ[:, :, np.newaxis], axes=(0, 1)))
denom = np.real(np.fft.ifft2(specN * specQ[:, :, np.newaxis], axes=(0, 1)))
eps = 1e-15
fill = numer/(denom+eps)
fill = fill[-img_w:, -img_h:]
image = img.data.copy()
# img = img.copy()
image[img.mask] = fill[img.mask]
mask = | np.zeros_like(img.mask, dtype=bool) | numpy.zeros_like |
# coding=utf-8
import time
from pathlib import Path
from typing import List, Dict, Union
import numpy as np
from matplotlib import pyplot as plt
import argparse
## lib functions
def nice_string_output(
names: List[str], values: List[str], extra_spacing: int = 0,
):
max_values = len(max(values, key=len))
max_names = len(max(names, key=len))
string = ""
for name, value in zip(names, values):
string += "{0:s} {1:>{spacing}} \n".format(
name,
value,
spacing=extra_spacing + max_values + max_names - len(name),
)
return string[:-2]
# utility functions
def variance_harmonic(t, D, eta, k) -> np.ndarray:
_k = eta / k
return D * _k * (1 - np.exp(-2 * _k * t))
def F(x):
return -1 * args["spring"] * x * args["inv_friction"]
# Main running functions
def run_a(args):
D = args["diffusion"]
dt1 = args["dt1"]
dt2 = args["dt2"]
var1 = 2 * D * dt1
var2 = 2 * D * dt2
std1 = np.sqrt(var1)
std2 = np.sqrt(var2)
first_str = (
"a. In order to scale the normal distribution properly, multiply events with the \n"
"square root of the variance of the delta W distribution.\n"
"In this case, we have the following parameters:\n"
)
second_str = nice_string_output(
names=["D", "dt1", "dt2", "var1", "std1", "var2", "std2"],
values=[f"{val:.4f}" for val in (D, dt1, dt2, var1, std1, var2, std2)],
extra_spacing=4,
)
print(first_str + second_str)
def run_b(args):
t_max = args["tmax"]
D = args["diffusion"]
dt1 = args["dt1"]
dt2 = args["dt2"]
n_samples = args["samples"]
var1 = 2 * D * dt1
var2 = 2 * D * dt2
std1 = np.sqrt(var1)
std2 = np.sqrt(var2)
print(
"b. Simulating Brownian motion for "
f"{n_samples} particles, with\n"
+ nice_string_output(
names=["D", "dt1", "dt2", "var1", "std1", "var2", "std2"],
values=[
f"{val:.4f}" for val in (D, dt1, dt2, var1, std1, var2, std2)
],
extra_spacing=4,
)
)
# mean of distribution is 0, var is 2D dt
n_1 = int(t_max / dt1)
n_2 = int(t_max / dt2)
ts_1 = | np.linspace(0, t_max, num=n_1) | numpy.linspace |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print( | np.diagonal(cov) | numpy.diagonal |
import numpy as np
from . import feature_extract
from mne.time_frequency import psd_welch
from sklearn.preprocessing import StandardScaler
class BandExtract(feature_extract.FeatureExtract):
AVALIABLE_BANDS = {
"alpha": (8.0, 12.0),
"beta": (12.0, 40.0),
"gamma": (40.0, 100.0),
"theta": (4.0, 8.0),
"delta": (0.0, 4.0)
}
def __init__(self,
bands,
freqs_around=None,
standard_scaler=True,
average=False):
self.bands = bands
self.freqs_around = freqs_around
self.standard_scaler = standard_scaler
self.average = average
def get_freq_range(self, band):
if type(band) is str:
return self._get_range_from_str(band)
return self._get_freq_range_from_number(band)
def _get_freq_range_from_number(self, band):
freqs_around = self.freqs_around if self.freqs_around else 0.5
min_freq = float(band) - freqs_around
max_freq = float(band) + freqs_around
return min_freq, max_freq
def _get_range_from_str(self, band: str):
if band in self.AVALIABLE_BANDS:
return self.AVALIABLE_BANDS[band]
def extract(self, raw):
feature = []
for band in self.bands:
min_freq, max_freq = self.get_freq_range(band)
psd, _ = psd_welch(raw, fmin=min_freq,
fmax=max_freq, verbose=False)
band_all_channels = | np.average(psd, axis=1) | numpy.average |
import numpy as np
import networkx as nx
def make_maze(width=81, height=51, complexity=.75, density=.75):
r"""Generate a random maze array.
It only contains two kind of objects, obstacle and free space. The numerical value for obstacle
is ``1`` and for free space is ``0``.
Code from https://en.wikipedia.org/wiki/Maze_generation_algorithm
>>> make_maze(10, 10)
array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint8)
"""
# Only odd shapes
shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
# Adjust complexity and density relative to maze size
complexity = int(complexity * (5 * (shape[0] + shape[1])))
density = int(density * ((shape[0] // 2) * (shape[1] // 2)))
# Build actual maze
Z = np.zeros(shape, dtype=bool)
# Fill borders
Z[0, :] = Z[-1, :] = 1
Z[:, 0] = Z[:, -1] = 1
# Make aisles
for i in range(density):
x, y = | np.random.randint(0, shape[1] // 2 + 1) | numpy.random.randint |
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import zip
from builtins import range
from builtins import object
import time
from matplotlib.collections import LineCollection
from matplotlib.transforms import Transform,Affine2D
import matplotlib.transforms as transforms
from matplotlib import collections, path
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.tri import Triangulation
from matplotlib import ticker
import numpy as np
from .. import utils
from six import string_types
try:
import xarray as xr
except ImportError:
xr="XARRAY NOT AVAILABLE"
# convenience function for getting coordinates from the plot:
def pick_points(n):
count = [0]
pick_points.results = np.zeros( (n,2), float64)
fig = plt.gcf()
cid = None
def click_handler(event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata))
if event.xdata:
pick_points.results[count[0]] = [event.xdata, event.ydata]
count[0] += 1
if count[0] >= n:
fig.canvas.mpl_disconnect(cid)
cid = fig.canvas.mpl_connect('button_press_event', click_handler)
# A rehash of pick_points:
def ax_picker(ax):
fig = ax.figure
if hasattr(ax,'pick_cids'):
for cid in ax.pick_cids:
fig.canvas.mpl_disconnect(cid)
def init_picked():
ax.picked = zeros( (0,4), float64)
ax.pick_start = None
init_picked()
def on_press(event):
if fig.canvas.toolbar.mode != '':
return
if event.button==1 and event.xdata:
ax.pick_start = [event.xdata,event.ydata]
elif event.button==3:
print(ax.picked)
init_picked()
def on_release(event):
if fig.canvas.toolbar.mode != '':
return
if event.xdata and ax.pick_start is not None:
new_pnt = np.array([ax.pick_start[0],event.xdata,ax.pick_start[1],event.ydata])
ax.picked=utils.array_append( ax.picked,new_pnt )
cid_p = fig.canvas.mpl_connect('button_press_event', on_press)
cid_r = fig.canvas.mpl_connect('button_release_event', on_release)
ax.pick_cids = [cid_p,cid_r]
def draw_polyline(ax=None,remove=True):
"""
rough and ready interface to capture a polyline in a plot window.
left clicks add a point, right click ends. returns the points in
a numpy array.
"""
ax=ax or plt.gca()
fig=ax.get_figure()
collecting=[1]
pick_points=[]
line=ax.plot([],[],'r-o')[0]
def click_handler(event):
if fig.canvas.toolbar.mode != '':
return
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata))
if event.button==1 and event.xdata:
pick_points.append( [event.xdata,event.ydata] )
x=[p[0] for p in pick_points]
y=[p[1] for p in pick_points]
line.set_xdata(x)
line.set_ydata(y)
elif event.button==3:
print("Done collecting points")
collecting[0]=0
cid = fig.canvas.mpl_connect('button_press_event', click_handler)
while collecting[0]:
plt.pause(0.01)
fig.canvas.mpl_disconnect(cid)
if remove:
ax.lines.remove(line)
plt.draw()
return np.array(pick_points)
def plotyy( x1, y1, x2, y2, color1='b', color2='g', fun=None, **kwargs ):
"""
A work-alike of the Matlab (TM) function of the same name. This
places two curves on the same axes using the same x-axis, but
different y-axes.
Call signature::
ax, h1, h2 = plotyy( x1, y2, x2, y2, color1='b', color2='g',
fun=None, **kwargs )
color1 and color2 are the colors to make respective curves and y-axes.
fun is the function object to use for plotting. Must accept calls
of the form fun(x,y,color='color',**kwargs). Typically, something
like plot, semilogy, semilogx or loglog. If *None*, defaults to
pyplot.plot.
**kwargs is any list of keyword arguments accepted by fun.
ax is a 2 element list with the handles for the first and second
axes. h1 is the handle to the first curve, h2 to the second
curve.
NOTE that this function won't scale two curves so that y-ticks are
in the same location as the Matlab (TM) version does.
"""
if fun == None: fun = plot
ax1 = plt.gca()
ax1.clear()
# Get axes location
try:
rect = ax1.get_position().bounds
except AttributeError:
rect = np.array( ax1.get_position() )
rect[2:] += rect[:2]
# Add first curve
h1 = fun( x1, y1, color=color1, **kwargs )
# Add second axes on top of first with joined x-axis
ax2 = plt.twinx(ax1)
# Plot second curve initially
h2 = fun( x2, y2, color=color2, **kwargs )
# Set axis properties
plt.setp( ax2.get_xticklabels(), visible=False)
# Change colors appropriately
def recolor( obj, col ):
try: obj.set_color( col )
except: pass
try: obj.set_facecolor( col )
except: pass
try: obj.set_edgecolor( col )
except: pass
try:
ch = obj.get_children()
for c in ch:
recolor( c, col )
except: pass
recolor( ax1.yaxis, color1 )
recolor( ax2.yaxis, color2 )
plt.draw_if_interactive()
return ( [ax1,ax2], h1, h2 )
# remove parts of the plot that extend beyond the x limits of the
# axis - assumes that the x-data for each line is non-decreasing
def trim_xaxis(ax=None):
ax = ax or plt.gca()
xmin,xmax,ymin,ymax = ax.axis()
for line in ax.lines:
xdata = line.get_xdata()
ydata = line.get_ydata()
i_start = np.searchsorted(xdata,xmin) - 1
if i_start < 0:
i_start = 0
i_end = np.searchsorted(xdata,xmax) + 1
xdata = xdata[i_start:i_end]
ydata = ydata[i_start:i_end]
line.set_xdata(xdata)
line.set_ydata(ydata)
def plot_tri(tri,**kwargs):
# DEPRECATED: matplotlib now has triplot and friends
# compile list of edges, then create the collection, and plot
ex = tri.x[tri.edge_db]
ey = tri.y[tri.edge_db]
edges = np.concatenate( (ex[:,:,newaxis], ey[:,:,newaxis]), axis=2)
colors = np.ones( (len(edges),4), float32 )
colors[:,:3] = 0
colors[:,3] = 1.0
coll = LineCollection(edges,colors=colors)
ax = plt.gca()
ax.add_collection(coll)
def scalebar(xy,L=None,aspect=0.05,unit_factor=1,fmt="%.0f",label_txt=None,fractions=[0,0.5,1.0],
ax=None,xy_transform=None,dy=None,
style='altboxes'):
""" Draw a simple scale bar with labels - bottom left
is given by xy.
xy_transform: units for interpreting xy. If not given
"""
ax = ax or plt.gca()
if xy_transform is None:
txt_trans=xy_transform=ax.transData
else:
# Still have to pull x scaling from the data axis
xy_transform=ScaleXOnly(xy_transform,
ax.transData,xoffset=xy[0])
txt_trans=xy_transform
xy=[0,xy[1]] # x offset now rolled into xy_transform
if L is None:
xmin,xmax,ymin,ymax = ax.axis()
L = 0.2 * (xmax - xmin)
xmin,ymin = xy
dx = L
dy = dy or (aspect*L)
# xmax = xmin + L
ymax = ymin + dy
objs = []
txts = []
if style in ('boxes','altboxes'):
# one filled black box:
objs.append( ax.fill([xmin,xmin+dx,xmin+dx,xmin],
[ymin,ymin,ymax,ymax],
'k', edgecolor='k',transform=xy_transform) )
for i in range(len(fractions)-1):
xleft=xmin+dx*fractions[i]
xright=xmin+dx*fractions[i+1]
xlist=[xleft,xright,xright,xleft]
if style=='altboxes':
ybot=ymin+0.5*(i%2)*dy
ytop=ybot+0.5*dy
# print ybot,ytop
objs.append( ax.fill(xlist,
[ybot,ybot,ytop,ytop],
'w', edgecolor='k',transform=xy_transform) )
else:
if y%2==0:
objs.append( ax.fill(xlist,
[ymin,ymin,ymax,ymax],
'w', edgecolor='k',transform=xy_transform) )
baseline=ymax + 0.25*dy
for frac in fractions:
frac_txt=fmt%(unit_factor* frac*L)
txts.append( ax.text(xmin+frac*dx,baseline,
frac_txt,
ha='center',
transform=txt_trans)
)
# annotate(fmt%(unit_factor*L), [xmin+dx,ymax+0.25*dy], ha='center')
# Really would like for the label to be on the same baseline
# as the fraction texts, and typeset along with the last
# label, but allowing the number of the last label to be
# centered on its mark
if label_txt:
txts.append( ax.text(xmin+frac*dx,baseline," "*len(frac_txt) + label_txt,ha='left',
transform=txt_trans) )
return objs,txts
def north_arrow(xy,L,ax=None,decl_east=0.0,transform=None,angle=0.0,width=0.1):
ax=ax or plt.gca()
transform=transform or ax.transData
w=width*L
xy=np.asarray(xy)
pnts=np.array( [[0,0], # base of arrow
[0,L], # vertical stroke
[w,0.5*L], # outer hypotenuse
[0,0.55*L]] ) # barb
tot_rot=angle-decl_east
pnts=utils.rot(tot_rot*np.pi/180,pnts)
pnts=pnts+xy
tip=xy+utils.rot(tot_rot*np.pi/180,np.array( [0,1.02*L] ))
obj=ax.fill( pnts[:,0],pnts[:,1],'k',transform=transform)
txt=ax.text(tip[0],tip[1]," $\mathcal{N}$",transform=transform,ha='center',rotation=tot_rot)
return obj,txt
def show_slopes(ax=None,slopes=[-5./3,-1],xfac=5,yfac=3):
ax = ax or plt.gca()
x = np.median( [l.get_xdata()[-1] for l in ax.lines] )
y = np.max( [l.get_ydata()[-1] for l in ax.lines] )
y *= yfac # set the legend above the plotted lines
xs = np.array([x/xfac,x])
for s in slopes:
ys = np.array([y/xfac**s,y])
ax.loglog(xs,ys,c='0.5')
plt.annotate("%g"%s,[xs[0],ys[0]])
class LogLogSlopeGrid(object):
""" draw evenly spaced lines, for now in log-log space, at a given slope.
y=mx+b
"""
def __init__(self,ax=None,slopes=[-5/3.],intervals=[10],xmin=None,xmax=None):
""" Note that intervals is linear!
"""
self.ax = ax or plt.gca()
self.slopes = slopes
self.intervals = intervals
self.colls = []
self.xlog = self.ylog = True
self.xmin=xmin
self.xmax=xmax
self.draw()
def draw(self):
for c in self.colls:
self.ax.collections.remove(c)
self.colls = []
xmin,xmax,ymin,ymax = self.ax.axis()
# allow override
if self.xmin is not None:
xmin=self.xmin
if self.xmax is not None:
xmax=self.xmax
if self.xlog:
xmin = np.log(xmin) ; xmax = | np.log(xmax) | numpy.log |
import numpy
import csv
#import time
#from skimage.feature import corner_fast,corner_peaks,corner_harris,corner_shi_tomasi
global lastlinecount,misslabel
from scipy.stats import shapiro
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
import tkintercore
colortable={}
colormatch={}
caliavgarea=0
calimax=0
calimin=0
calisigma=0
greatareas=[]
class node:
def __init__(self,i,j):
self.i=i
self.j=j
self.label=0
self.check=False
def boundarywatershed(area,segbondtimes,boundarytype): #area = 1's
if caliavgarea is not None and numpy.count_nonzero(area)<caliavgarea/2:
return area
x=[0,-1,-1,-1,0,1,1,1]
y=[1,1,0,-1,-1,-1,0,1]
areaboundary=tkintercore.get_boundary(area)
temparea=area-areaboundary
arealabels=tkintercore.labelgapnp(temparea)
unique, counts = numpy.unique(arealabels, return_counts=True)
if segbondtimes>=20:
return area
if(len(unique)>2):
res=arealabels+areaboundary
leftboundaryspots=numpy.where(areaboundary==1)
leftboundary_y=leftboundaryspots[0].tolist()
leftboundary_x=leftboundaryspots[1].tolist()
for uni in unique[1:]:
labelboundaryloc=tkintercore.get_boundaryloc(arealabels,uni)
for m in range(len(labelboundaryloc[0])):
for k in range(len(y)):
i = labelboundaryloc[0][m] + y[k]
j = labelboundaryloc[1][m] + x[k]
if i >= 0 and i < res.shape[0] and j >= 0 and j < res.shape[1]:
if res[i, j] == 1:
res[i,j]=uni
for n in range(len(leftboundary_y)):
if leftboundary_y[n]==i and leftboundary_x[n]==j:
leftboundary_y.pop(n)
leftboundary_x.pop(n)
break
res=numpy.asarray(res)-1
res=numpy.where(res<0,0,res)
return res
else:
newarea=boundarywatershed(temparea,segbondtimes+1,boundarytype)*2
res=newarea+areaboundary
leftboundaryspots=numpy.where(res==1)
leftboundary_y = leftboundaryspots[0].tolist()
leftboundary_x = leftboundaryspots[1].tolist()
unique=numpy.unique(newarea)
for uni in unique[1:]:
labelboundaryloc = tkintercore.get_boundaryloc(newarea, uni)
for m in range(len(labelboundaryloc[0])):
for k in range(len(y)):
i = labelboundaryloc[0][m] + y[k]
j = labelboundaryloc[1][m] + x[k]
if i >= 0 and i < res.shape[0] and j >= 0 and j < res.shape[1]:
if res[i, j] == 1:
res[i, j] = uni
for n in range(len(leftboundary_y)):
if leftboundary_y[n] == i and leftboundary_x[n] == j:
leftboundary_y.pop(n)
leftboundary_x.pop(n)
break
res=numpy.asarray(res)/2
res=numpy.where(res<1,0,res)
return res
def manualboundarywatershed(area):
'''
if numpy.count_nonzero(area)<avgarea/2:
return area
x=[0,-1,-1,-1,0,1,1,1]
y=[1,1,0,-1,-1,-1,0,1]
leftboundaryspots=numpy.where(area==1)
pixelcount=1
label=1
for k in range(len(leftboundaryspots[0])):
i=leftboundaryspots[0][k]
j=leftboundaryspots[1][k]
area[i][j]=label
pixelcount+=1
if pixelcount==int(avgarea):
pixelcount=1
label+=1
unique,count=numpy.unique(area,return_counts=True)
for i in range(1,len(count)):
if count[i]<avgarea/2:
area=numpy.where(area==unique[i],unique[i-1],area)
'''
maskpara=0.5
possiblecount=int(numpy.count_nonzero(area)/caliavgarea)
distance=ndi.distance_transform_edt(area)
masklength=int((caliavgarea*maskpara)**0.5)-1
local_maxi=peak_local_max(distance,indices=False,footprint=numpy.ones((masklength,masklength)),labels=area)
markers=ndi.label(local_maxi)[0]
unique=numpy.unique(markers)
while(len(unique)-1>possiblecount):
maskpara+=0.1
masklength=int((caliavgarea*maskpara)**0.5)-1
local_maxi=peak_local_max(distance,indices=False,footprint=numpy.ones((masklength,masklength)),labels=area)
markers=ndi.label(local_maxi)[0]
unique=numpy.unique(markers)
while(len(unique)-1<possiblecount):
maskpara-=0.1
masklength=int((caliavgarea*maskpara)**0.5)-1
try:
local_maxi=peak_local_max(distance,indices=False,footprint=numpy.ones((masklength,masklength)),labels=area)
except:
maskpara+=0.1
masklength=int((caliavgarea*maskpara)**0.5)-1
local_maxi=peak_local_max(distance,indices=False,footprint=numpy.ones((masklength,masklength)),labels=area)
markers=ndi.label(local_maxi)[0]
break
markers=ndi.label(local_maxi)[0]
unique=numpy.unique(markers)
localarea=watershed(-distance,markers,mask=area)
return localarea
def manualdivide(area,greatareas):
global exceptions
unique, counts = numpy.unique(area, return_counts=True)
hist=dict(zip(unique,counts))
del hist[0]
meanpixel=sum(counts[1:])/len(counts[1:])
countseed=numpy.asarray(counts[1:])
stdpixel=numpy.std(countseed)
sortedkeys=list(sorted(hist,key=hist.get,reverse=True))
while len(greatareas)>0:
topkey=greatareas.pop(0)
locs=numpy.where(area==topkey)
ulx,uly=min(locs[1]),min(locs[0])
rlx,rly=max(locs[1]),max(locs[0])
subarea=area[uly:rly+1,ulx:rlx+1]
subarea=subarea.astype(float)
tempsubarea=subarea/topkey
newtempsubarea=numpy.where(tempsubarea!=1.,0,1).astype(int)
antitempsubarea=numpy.where((tempsubarea!=1.) & (tempsubarea!=0),subarea,0)
times=len(locs[0])/meanpixel
averagearea=len(locs[0])/times
newsubarea=manualboundarywatershed(newtempsubarea)
labelunique,labcounts=numpy.unique(newsubarea,return_counts=True)
labelunique=labelunique.tolist()
labcounts=labcounts.tolist()
if len(labelunique)>2:
newsubarea=newsubarea*topkey
newlabel=labelunique.pop(-1)
maxlabel=area.max()
add=1
while newlabel>1:
newsubarea=numpy.where(newsubarea==topkey*newlabel,maxlabel+add,newsubarea)
print('new label: '+str(maxlabel+add))
newlabelcount=len(numpy.where(newsubarea==maxlabel+add)[0].tolist())
print('add '+'label: '+str(maxlabel+add)+' count='+str(newlabelcount))
newlabel=labelunique.pop(-1)
add+=1
newsubarea=newsubarea+antitempsubarea.astype(int)
area[uly:rly+1,ulx:rlx+1]=newsubarea
#labels=relabel(labels)
unique, counts = numpy.unique(area, return_counts=True)
hist=dict(zip(unique,counts))
del hist[0]
print('hist length='+str(len(counts)-1))
print('max label='+str(area.max()))
sortedkeys=list(sorted(hist,key=hist.get,reverse=True))
meanpixel=sum(counts[1:])/len(counts[1:])
countseed=numpy.asarray(counts[1:])
stdpixel= | numpy.std(countseed) | numpy.std |
from __future__ import print_function
import numpy as np
import unittest
import discretize
TOL = 1e-8
class TestSimpleQuadTree(unittest.TestCase):
def test_counts(self):
nc = 8
h1 = np.random.rand(nc)*nc*0.5 + nc*0.5
h2 = np.random.rand(nc)*nc*0.5 + nc*0.5
h = [hi/np.sum(hi) for hi in [h1, h2]] # normalize
M = discretize.TreeMesh(h)
points = np.array([[0.1, 0.1, 0.3]])
level = np.array([3])
M.insert_cells(points, level)
M.number()
self.assertEqual(M.nhFx, 4)
self.assertEqual(M.nFx, 12)
self.assertTrue(np.allclose(M.vol.sum(), 1.0))
#self.assertTrue(np.allclose(np.r_[M._areaFxFull, M._areaFyFull], M._deflationMatrix('F') * M.area)
def test_getitem(self):
M = discretize.TreeMesh([4, 4])
M.refine(1)
self.assertEqual(M.nC, 4)
self.assertEqual(len(M), M.nC)
self.assertTrue(np.allclose(M[0].center, [0.25, 0.25]))
# actual = [[0, 0], [0.5, 0], [0, 0.5], [0.5, 0.5]]
# for i, n in enumerate(M[0].nodes):
# self.assertTrue(np.allclose(, actual[i])
def test_getitem3D(self):
M = discretize.TreeMesh([4, 4, 4])
M.refine(1)
self.assertEqual(M.nC, 8)
self.assertEqual(len(M), M.nC)
self.assertTrue(np.allclose(M[0].center, [0.25, 0.25, 0.25]))
# actual = [[0, 0, 0], [0.5, 0, 0], [0, 0.5, 0], [0.5, 0.5, 0],
# [0, 0, 0.5], [0.5, 0, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5]]
# for i, n in enumerate(M[0].nodes):
# self.assertTrue(np.allclose(M._gridN[n, :], actual[i])
def test_refine(self):
M = discretize.TreeMesh([4, 4, 4])
M.refine(1)
self.assertEqual(M.nC, 8)
def test_h_gridded_2D(self):
hx, hy = np.ones(4), np.r_[1., 2., 3., 4.]
M = discretize.TreeMesh([hx, hy])
def refinefcn(cell):
xyz = cell.center
d = (xyz**2).sum()**0.5
if d < 3:
return 2
return 1
M.refine(refinefcn)
H = M.h_gridded
test_hx = np.all(H[:, 0] == np.r_[1., 1., 1., 1., 2., 2., 2.])
test_hy = np.all(H[:, 1] == np.r_[1., 1., 2., 2., 3., 7., 7.])
self.assertTrue(test_hx and test_hy)
# def test_h_gridded_updates(self):
# mesh = discretize.TreeMesh([8, 8])
# mesh.refine(1)
#
# H = mesh.h_gridded
# self.assertTrue(np.all(H[:, 0] == 0.5*np.ones(4)))
# self.assertTrue(np.all(H[:, 1] == 0.5*np.ones(4)))
#
# # refine the mesh and make sure h_gridded is updated
# mesh.refine(2)
# H = mesh.h_gridded
# self.assertTrue(np.all(H[:, 0] == 0.25*np.ones(16)))
# self.assertTrue(np.all(H[:, 1] == 0.25*np.ones(16)))
def test_faceDiv(self):
hx, hy = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8]
T = discretize.TreeMesh([hx, hy], levels=2)
T.refine(lambda xc: 2)
# T.plotGrid(show_it=True)
M = discretize.TensorMesh([hx, hy])
self.assertEqual(M.nC, T.nC)
self.assertEqual(M.nF, T.nF)
self.assertEqual(M.nFx, T.nFx)
self.assertEqual(M.nFy, T.nFy)
self.assertEqual(M.nE, T.nE)
self.assertEqual(M.nEx, T.nEx)
self.assertEqual(M.nEy, T.nEy)
self.assertTrue(np.allclose(M.area, T.permuteF*T.area))
self.assertTrue(np.allclose(M.edge, T.permuteE*T.edge))
self.assertTrue(np.allclose(M.vol, T.permuteCC*T.vol))
# plt.subplot(211).spy(M.faceDiv)
# plt.subplot(212).spy(T.permuteCC*T.faceDiv*T.permuteF.T)
# plt.show()
self.assertEqual((M.faceDiv - T.permuteCC*T.faceDiv*T.permuteF.T).nnz, 0)
def test_serialization(self):
hx, hy = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8]
mesh1 = discretize.TreeMesh([hx, hy], levels=2, x0=np.r_[-1, -1])
mesh1.refine(2)
mesh2 = discretize.TreeMesh.deserialize(mesh1.serialize())
self.assertTrue(np.all(mesh1.x0 == mesh2.x0))
self.assertTrue(np.all(mesh1._n == mesh2._n))
self.assertTrue(np.all(mesh1.gridCC == mesh2.gridCC))
mesh1.x0 = np.r_[-2., 2]
mesh2 = discretize.TreeMesh.deserialize(mesh1.serialize())
self.assertTrue(np.all(mesh1.x0 == mesh2.x0))
class TestOcTree(unittest.TestCase):
def test_counts(self):
nc = 8
h1 = np.random.rand(nc)*nc*0.5 + nc*0.5
h2 = np.random.rand(nc)*nc*0.5 + nc*0.5
h3 = np.random.rand(nc)*nc*0.5 + nc*0.5
h = [hi/np.sum(hi) for hi in [h1, h2, h3]] # normalize
M = discretize.TreeMesh(h, levels=3)
points = np.array([[0.2, 0.1, 0.7],
[0.8, 0.4, 0.2]])
levels = np.array([1, 2])
M.insert_cells(points, levels)
M.number()
# M.plotGrid(show_it=True)
self.assertEqual(M.nhFx, 4)
self.assertTrue(M.nFx, 19)
self.assertTrue(M.nC, 15)
self.assertTrue(np.allclose(M.vol.sum(), 1.0))
# self.assertTrue(np.allclose(M._areaFxFull, (M._deflationMatrix('F') * M.area)[:M.ntFx]))
# self.assertTrue(np.allclose(M._areaFyFull, (M._deflationMatrix('F') * M.area)[M.ntFx:(M.ntFx+M.ntFy)])
# self.assertTrue(np.allclose(M._areaFzFull, (M._deflationMatrix('F') * M.area)[(M.ntFx+M.ntFy):])
# self.assertTrue(np.allclose(M._edgeExFull, (M._deflationMatrix('E') * M.edge)[:M.ntEx])
# self.assertTrue(np.allclose(M._edgeEyFull, (M._deflationMatrix('E') * M.edge)[M.ntEx:(M.ntEx+M.ntEy)])
# self.assertTrue(np.allclose(M._edgeEzFull, (M._deflationMatrix('E') * M.edge)[(M.ntEx+M.ntEy):]))
def test_faceDiv(self):
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
M.refine(lambda xc: 2)
# M.plotGrid(show_it=True)
Mr = discretize.TensorMesh([hx, hy, hz])
self.assertEqual(M.nC, Mr.nC)
self.assertEqual(M.nF, Mr.nF)
self.assertEqual(M.nFx, Mr.nFx)
self.assertEqual(M.nFy, Mr.nFy)
self.assertEqual(M.nE, Mr.nE)
self.assertEqual(M.nEx, Mr.nEx)
self.assertEqual(M.nEy , Mr.nEy)
self.assertTrue(np.allclose(Mr.area, M.permuteF*M.area))
self.assertTrue(np.allclose(Mr.edge, M.permuteE*M.edge))
self.assertTrue(np.allclose(Mr.vol, M.permuteCC*M.vol))
A = Mr.faceDiv - M.permuteCC*M.faceDiv*M.permuteF.T
self.assertTrue(np.allclose(A.data, 0))
def test_edgeCurl(self):
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
M.refine(lambda xc:2)
Mr = discretize.TensorMesh([hx, hy, hz])
A = Mr.edgeCurl - M.permuteF*M.edgeCurl*M.permuteE.T
self.assertTrue(len(A.data)==0 or np.allclose(A.data, 0))
def test_faceInnerProduct(self):
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
# hx, hy, hz = [[(1, 4)], [(1, 4)], [(1, 4)]]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
M.refine(lambda xc:2)
# M.plotGrid(show_it=True)
Mr = discretize.TensorMesh([hx, hy, hz])
# print(M.nC, M.nF, M.getFaceInnerProduct().shape, M.permuteF.shape)
A_face = Mr.getFaceInnerProduct() - M.permuteF * M.getFaceInnerProduct() * M.permuteF.T
A_edge = Mr.getEdgeInnerProduct() - M.permuteE * M.getEdgeInnerProduct() * M.permuteE.T
self.assertTrue(len(A_face.data)==0 or np.allclose(A_face.data, 0))
self.assertTrue(len(A_edge.data)==0 or np.allclose(A_edge.data, 0))
def test_VectorIdenties(self):
hx, hy, hz = [[(1, 4)], [(1, 4)], [(1, 4)]]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
Mr = discretize.TensorMesh([hx, hy, hz])
M.refine(2) #Why wasn't this here before?
self.assertTrue(np.allclose((M.faceDiv * M.edgeCurl).data, 0))
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
Mr = discretize.TensorMesh([hx, hy, hz])
M.refine(2)
A1 = M.faceDiv * M.edgeCurl
A2 = Mr.faceDiv * Mr.edgeCurl
self.assertTrue(len(A1.data)==0 or np.allclose(A1.data, 0))
self.assertTrue(len(A2.data)==0 or np.allclose(A2.data, 0))
def test_h_gridded_3D(self):
hx, hy, hz = np.ones(4), np.r_[1., 2., 3., 4.], 2*np.ones(4)
M = discretize.TreeMesh([hx, hy, hz])
def refinefcn(cell):
xyz = cell.center
d = (xyz**2).sum()**0.5
if d < 3:
return 2
return 1
M.refine(refinefcn)
H = M.h_gridded
test_hx = np.all(H[:, 0] == np.r_[1., 1., 1., 1., 1., 1., 1., 1., 2., 2., 2., 2., 2., 2., 2.])
test_hy = np.all(H[:, 1] == np.r_[1., 1., 2., 2., 1., 1., 2., 2., 3., 7., 7., 3., 3., 7., 7.])
test_hz = np.all(H[:, 2] == np.r_[2., 2., 2., 2., 2., 2., 2., 2., 4., 4., 4., 4., 4., 4., 4.])
self.assertTrue(test_hx and test_hy and test_hz)
class Test2DInterpolation(unittest.TestCase):
def setUp(self):
def topo(x):
return np.sin(x*(2.*np.pi))*0.3 + 0.5
def function(cell):
r = cell.center - np.array([0.5]*len(cell.center))
dist1 = np.sqrt(r.dot(r)) - 0.08
dist2 = np.abs(cell.center[-1] - topo(cell.center[0]))
dist = min([dist1, dist2])
# if dist < 0.05:
# return 5
if dist < 0.05:
return 6
if dist < 0.2:
return 5
if dist < 0.3:
return 4
if dist < 1.0:
return 3
else:
return 0
M = discretize.TreeMesh([64, 64], levels=6)
M.refine(function)
self.M = M
def test_fx(self):
r = np.random.rand(self.M.nFx)
P = self.M.getInterpolationMat(self.M.gridFx, 'Fx')
self.assertLess(np.abs(P[:, :self.M.nFx]*r - r).max(), TOL)
def test_fy(self):
r = np.random.rand(self.M.nFy)
P = self.M.getInterpolationMat(self.M.gridFy, 'Fy')
self.assertLess(np.abs(P[:, self.M.nFx:]*r - r).max(), TOL)
class Test3DInterpolation(unittest.TestCase):
def setUp(self):
def function(cell):
r = cell.center - np.array([0.5]*len(cell.center))
dist = np.sqrt(r.dot(r))
if dist < 0.2:
return 4
if dist < 0.3:
return 3
if dist < 1.0:
return 2
else:
return 0
M = discretize.TreeMesh([16, 16, 16], levels=4)
M.refine(function)
# M.plotGrid(show_it=True)
self.M = M
def test_Fx(self):
r = np.random.rand(self.M.nFx)
P = self.M.getInterpolationMat(self.M.gridFx, 'Fx')
self.assertLess(np.abs(P[:, :self.M.nFx]*r - r).max(), TOL)
def test_Fy(self):
r = np.random.rand(self.M.nFy)
P = self.M.getInterpolationMat(self.M.gridFy, 'Fy')
self.assertLess( | np.abs(P[:, self.M.nFx:(self.M.nFx+self.M.nFy)]*r - r) | numpy.abs |
from abc import abstractmethod
from collections import OrderedDict
import esm
import multiprocessing as mlp
from numbers import Number
import numpy as np
import os
from pathlib import Path, PosixPath, WindowsPath
import pandas as pd
import pyrosetta
import pytorch_lightning as pl
import re
import shutil
import sys
import torch
import torch.nn.functional as F
import torch_geometric as torchg
from tqdm import tqdm
from typing import List, Union, Optional, Generator, Dict, Tuple, Callable, Set, Type, Iterable, Any
import json
import warnings
AnyNum = Union[int, float]
AnyPath = Union[str, PosixPath, WindowsPath]
AtomType = pyrosetta.rosetta.core.conformation.Atom
ResidueType = pyrosetta.rosetta.core.conformation.Residue
ConformationType = pyrosetta.rosetta.core.conformation.Conformation
PoseType = pyrosetta.rosetta.core.pose.Pose
AtomIDType = pyrosetta.rosetta.core.id.AtomID
EdgeType = List[int]
VERBOSE = 0
CLONE_RMSD = 1e-7
AA_ALPHABETS = list('ACDEFGHIKLMNPQRSTVWY')
PYROSETTA_INIT = False
ESM_MODEL = None
ESM_ALPHABET = None
ESM_BATCH_CONVERTER = None
CUDA_AVAILABLE = torch.cuda.is_available()
def is_dummy_mut(mut_name: str) -> bool:
return mut_name[0] == mut_name[-1]
def aa2index(aa: str) -> int:
aa = aa.capitalize()
assert aa in AA_ALPHABETS, f'{aa} is not supported.'
return AA_ALPHABETS.index(aa)
def seq2index(seq: Iterable) -> List[int]:
return [aa2index(aa) for aa in seq]
def seq2onehot(seq: Iterable) -> torch.Tensor:
index = torch.tensor(seq2index(seq))
return F.one_hot(index, num_classes=len(AA_ALPHABETS))
def read_pssm(pssm: AnyPath, return_type: str = 'DataFrame', relative: bool = False):
if return_type not in ('DataFrame', 'Array', 'Tensor', 'OrderedDict'):
raise ValueError('Only DataFrame, Array, Tensor and OrderedDict supported.')
pssm = Path(pssm)
df = pd.read_csv(pssm, sep='\s+', skiprows=3, skipfooter=5, engine='python')
df.loc[len(df)] = df.columns
df = df.iloc[:, :22]
df.columns = ['resid', 'wt'] + list('ARNDCQEGHILKMFPSTWYV')
df = df[['resid', 'wt'] + AA_ALPHABETS]
df = df.astype({aa: float for aa in AA_ALPHABETS})
df = df.astype({'resid': int}).sort_values('resid')
df = df.set_index('resid')
if relative:
for i, row in df.copy().iterrows():
wt_v = row[row['wt']]
df.loc[i, AA_ALPHABETS] = row[AA_ALPHABETS] - wt_v
if return_type == 'DataFrame':
return df
elif return_type == 'Array':
return df[AA_ALPHABETS].values
elif return_type == 'Tensor':
return torch.from_numpy(df[AA_ALPHABETS].values)
else:
pssm = OrderedDict()
for resid in df.index:
wt_aa = df.loc[resid, 'wt']
for aa in AA_ALPHABETS:
pssm[f'{wt_aa}{resid}{aa}'] = df.loc[resid, aa]
return pssm
def pssm1D(seq: Iterable, pssm=None, return_type='Array', **kwargs):
"""Obtain pssm given sequence."""
if pssm is None:
pssm = read_pssm(return_type=return_type, **kwargs)
pssm_values = [pssm[i, aa2index(aa)] for i, aa in enumerate(seq)]
return torch.tensor(pssm_values).unsqueeze(-1)
def pssm2D(*args, return_type='Tensor', **kwargs):
"""Alias of read_pssm."""
return read_pssm(*args, return_type=return_type, **kwargs)
def get_esm_representations(data, layers: Optional[List[int]] = None, model_name='esm1b_t33_650M_UR50S'):
# import esm and create object only when necessary to save memory
# but globals is a bad habit
model = globals()['ESM_MODEL']
alphabet = globals()['ESM_ALPHABET']
batch_converter = globals()['ESM_BATCH_CONVERTER']
if model is None or alphabet is None or batch_converter is None:
model, alphabet = getattr(esm.pretrained, model_name)()
batch_converter = alphabet.get_batch_converter()
model = model.eval()
if CUDA_AVAILABLE:
model = model.cuda()
globals()['ESM_MODEL'] = model
globals()['ESM_ALPHABET'] = alphabet
globals()['ESM_BATCH_CONVERTER'] = batch_converter
if layers is None:
layers = [33]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
with torch.no_grad():
if CUDA_AVAILABLE:
batch_tokens = batch_tokens.cuda()
results = model(batch_tokens, repr_layers=layers)
representations = [results['representations'][layer_i].squeeze()[1:-1] for layer_i in layers]
representations = torch.cat(representations, dim=1).squeeze()
return representations.cpu()
def get_directory_by_suffix(src_dir: AnyPath, suffixes: List, deep: bool = False) -> Generator[Path, None, None]:
"""Return files matching suffix (with .) underneath a directory (optionally recursive)."""
src_dir = Path(src_dir)
if deep:
for rootdir, subdirs, files in os.walk(src_dir):
for file in files:
if Path(file).suffix in suffixes:
yield Path(rootdir) / file
else:
if len(suffixes) == 1:
return src_dir.glob(f'*{suffixes[0]}')
else:
for file in src_dir.glob('*'):
if file.suffix in suffixes:
yield file
def get_directory_pdb(src_dir: Path, deep: bool = False) -> Generator[Path, None, None]:
"""Return all pdb (Path) underneath a directory (optionally recursive)."""
return get_directory_by_suffix(src_dir, ['.pdb'], deep)
def get_pyrosetta_flags(flags: Optional[AnyPath] = None, params_dir: Optional[AnyPath] = None,
verbose_level: int = 300 if VERBOSE else 0):
"""Return pyrosetta initialize flags with ligand parameters and verbose level in string.
If the ligand name clashes with existing ones in database, consider to rename/comment out
in database/chemical/residue_type_sets/fa_standard/residue_types.txt
"""
if not flags:
flags = 'predataset/flags'
if not params_dir:
params_dir = 'predataset/Ligand_params'
flags = Path(flags)
with flags.open('r') as fopen:
flags_str = fopen.read()
params_dir = Path(params_dir)
params = params_dir.glob('*.params')
params = sorted(map(str, params))
if not params:
raise FileNotFoundError(f'No params under {params_dir}')
flags_str += '\n-extra_res_fa ' + ' '.join(params)
flags_str += f'\n-out::level {verbose_level}'
return flags_str
def create_dir(path: AnyPath, overwrite: bool = False) -> bool:
"""Create (and overwrite) path directory."""
path = Path(path)
if overwrite:
shutil.rmtree(path)
path.mkdir(parents=True, exist_ok=True)
return True
def pdb2pose_atoms(pdb: AnyPath) -> Tuple[PoseType, Dict[AtomIDType, AtomType]]:
if not Path(pdb).exists():
raise FileNotFoundError(f'{pdb} not found in graph processing.')
pose = pyrosetta.pose_from_pdb(str(pdb))
atoms = {}
for residue in pose.residues:
resid = residue.seqpos()
for atom_idx in range(1, residue.natoms() + 1):
atom = residue.atom(atom_idx)
atom_id = pyrosetta.rosetta.core.id.AtomID(atom_idx, resid)
atoms[atom_id] = atom
return pose, atoms
def atom_id2atom_name(atom_id: AtomIDType, pose: PoseType) -> str:
residue = pose.residue(atom_id.rsd())
atom_name = residue.atom_name(atom_id.atomno())
atom_name = atom_name.strip()
return atom_name
def sort_atom_ids(atom_ids: Iterable[AtomIDType]) -> List[AtomIDType]:
"""Sort AtomIDs by (residue id, atom number)"""
def atom_id2int(atom_id):
return atom_id.rsd(), atom_id.atomno()
return sorted(atom_ids, key=atom_id2int)
def _parameter2config(instance: Any, parameters: Dict[str, Any]) -> Dict[str, Any]:
class_name = instance.__class__.__name__
return {'class_name': class_name, 'parameters': parameters}
class BaseNodeFilter:
"""Base class for node (atom) filter.
Only filters virtual atoms.
"""
def __init__(self, name: Optional[str] = None):
if name is None:
name = self.__class__.__name__
self.name = name
@property
def config(self) -> Dict[str, Any]:
parameters = {'name': self.name}
return _parameter2config(self, parameters)
def filter_func(self, pose: PoseType, atom_id: AtomIDType, atom_name: str) -> bool:
return True
def filter_virt(self, pose: PoseType, atom_id: AtomIDType, atom_name: str) -> bool:
"""Filter out virtual atoms."""
name_split = re.findall(r'[^\W\d_]+|\d+', atom_name)
if len(name_split) >= 2:
if 'V' == name_split[-2]:
return False
return True
def filter(self, pose: PoseType, atoms: Dict[AtomIDType, AtomType]) -> Dict[AtomIDType, AtomType]:
"""Filter by arbitrary method. Filter virtual atoms regardless."""
new_atoms = {}
for atom_id, atom in atoms.items():
atom_name = atom_id2atom_name(atom_id, pose)
if self.filter_func(pose, atom_id, atom_name) and self.filter_virt(pose, atom_id, atom_name):
new_atoms[atom_id] = atom
return new_atoms
# TODO filter by chain names
class ChainNodeFilter(BaseNodeFilter):
"""Filter node by chain"""
def __init__(self, chain_names: Union[Tuple, List], name: Optional[str] = None):
if not chain_names:
raise ValueError('chain_names is empty.')
self.chain_names = list(chain_names).copy()
super().__init__(name=name)
@property
def config(self) -> Dict[str, Dict]:
parameters = {'name': self.name, 'chain_names': self.chain_names.copy()}
return _parameter2config(self, parameters)
def filter_func(self, pose: PoseType, atom_id: AtomIDType, atom_name: str) -> bool:
"""Filter node by chain_names"""
raise NotImplementedError # how to get chain name instead of chain id in pyrosetta?
# TODO filter by backbone atoms
# class BackboneNodeFilter(BaseNodeFilter)
# TODO filter by residue name (3 letters)
# class ResidueNameFilter(BaseNodeFilter)
class AtomNameNodeFilter(BaseNodeFilter):
"""Filter node by atom name"""
def __init__(self, atom_name_pass: List = None, name: Optional[str] = None):
if type(atom_name_pass) != list:
raise TypeError('Either atom_name_pass is not List.')
self.atom_name_pass = atom_name_pass.copy()
super().__init__(name=name)
@property
def config(self) -> Dict[str, Dict]:
parameters = {'name': self.name, 'atom_name_pass': self.atom_name_pass.copy()}
return _parameter2config(self, parameters)
def filter_func(self, pose: PoseType, atom_id: AtomIDType, atom_name: str) -> bool:
"""Filter node by atom_name_pass"""
return atom_name.strip() in self.atom_name_pass
class CompositeNodeFilter(BaseNodeFilter):
"""Composite of NodeFilter(s)."""
def __init__(self, components: Optional[Iterable[Type[BaseNodeFilter]]] = None,
intersection: bool = True, name: Optional[str] = None):
"""Initialize with name and any number of NodeFilter(s).
Args:
components: Iterable of NodeFilters
intersection: Filter if it does not fulfill all node filter criteria
"""
if not components:
raise ValueError('No filter received.')
if not all(issubclass(filter.__class__, BaseNodeFilter) for filter in components):
raise TypeError('Some filter(s) is not subclass of BaseNodeFilter.')
super().__init__(name=name)
self.components = components
self.intersection = intersection
@property
def config(self) -> List[Dict]:
config = [component.config for component in self.components]
parameters = {'name': self.name, 'intersection': self.intersection}
subconfig = _parameter2config(self, parameters)
config.append(subconfig)
return config
def filter_func(self, pose: PoseType, atom_id: AtomIDType, atom_name: str) -> bool:
"""Filter by union/intersection of components"""
bools = (component.filter_func(pose, atom_id, atom_name) for component in self.components)
return all(bools) if self.intersection else any(bools)
class BaseNodeFeaturizer:
"""Base class for node featurization."""
def __init__(self, name: Optional[str] = None):
self.name = name
@property
def config(self) -> Dict[str, Dict]:
raise NotImplementedError
@abstractmethod
def featurize(self, pose: PoseType, atom_id: AtomIDType) -> List[Number]:
raise NotImplementedError
# TODO featurize by residue name (3 letters)
# class ResidueNodeFeaturizer(BaseNodeFeaturizer)
class AtomtypeNodeFeaturizer(BaseNodeFeaturizer):
"""Featurize node by atom type"""
def __init__(self, atomtypes: Optional[Dict[str, list]] = None, atom_cats: Optional[Dict[str, int]] = None,
name: Optional[str] = None):
"""Initialize with atom types and atom type categories.
As a safety precaution, all atom types must be contained in atom categories. Provide
custom atomtypes and atom_cats for 'AnythingElse' atom type if necessary. See example
below.
Args:
atomtypes: atom type containing a set of atom names. For example,
{'C': {'C', 'CA', 'CB', 'CD', 'CD1'}, 'O': {'O', 'OD1', 'OD2'}}
atom_cats: atom type mapping to numerical index. INDEX SHOULD START AT 1.
For example,
{'C': 1, 'H': 2, 'O': 3, 'N': 4, 'S': 5, 'MG': 6}
"""
self._atom_cats = None
self._atomtypes = None
self._atomname2cat = None
if atom_cats is None:
atom_cats = {'C': 1, 'H': 2, 'O': 3, 'N': 4, 'S': 5, 'MG': 6}
assert 0 not in atom_cats.keys(), '0 is not allowed in atom_cats.'
self.atom_cats = atom_cats
if atomtypes is None:
atomtypes = {
'N': ['N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NV', 'NZ'],
'S': ['SD', 'SG'],
'O': ['O', 'OD1', 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT'],
'C': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2', 'CZ',
'CZ2', 'CZ3'],
'H': ['1H', '1HA', '1HB', '1HD', '1HD1', '1HD2', '1HE', '1HE2', '1HG', '1HG1', '1HG2', '1HH1', '1HH2',
'1HZ', '2H', '2HA', '2HB', '2HD', '2HD1', '2HD2', '2HE', '2HE2', '2HG', '2HG1', '2HG2', '2HH1',
'2HH2', '2HZ', '3H', '3HB', '3HD1', '3HD2', '3HE', '3HG1', '3HG2', '3HZ', 'H', 'HA', 'HB', 'HD1',
'HD2', 'HE', 'HE1', 'HE2', 'HE3', 'HG', 'HG1', 'HH', 'HH2', 'HZ', 'HZ2', 'HZ3'],
'MG': ['MG']
} # JSON serializable
self.atomtypes = atomtypes
super().__init__(name=name)
@property
def config(self) -> Dict[str, Dict]:
parameters = {
'name': self.name,
'atom_cats': self.atom_cats,
'atomtypes': self.atomtypes
}
return _parameter2config(self, parameters)
def _update_atomname2cat(self) -> bool:
"""Update atomname2cat upon change in atom_cats and atomtypes"""
mis_atomtypes = set()
if not self._atomtypes:
return False
self._atomname2cat = {}
for key, values in self._atomtypes.items():
if key not in self._atom_cats:
mis_atomtypes.add(key)
for value in values:
self._atomname2cat[value] = self._atom_cats[key]
if mis_atomtypes:
raise KeyError(f'{mis_atomtypes} not found in atom_cats.')
return True
@property
def atom_cats(self) -> Dict[str, int]:
return self._atom_cats.copy()
@atom_cats.setter
def atom_cats(self, atom_cats: Dict[str, int]):
values = list(atom_cats.values())
if len(values) != len(set(values)):
raise ValueError('Duplicate atom_cats indices are not allowed.')
self._atom_cats = atom_cats.copy()
self._update_atomname2cat()
@property
def atomtypes(self) -> Dict[str, Dict]:
return self._atomtypes.copy()
@atomtypes.setter
def atomtypes(self, atomtypes: Dict[str, Dict]):
self._atomtypes = atomtypes.copy()
self._update_atomname2cat()
@property
def n_atom_types(self) -> int:
return len(self.atom_cats)
@property
def atomname2cat(self) -> Dict[str, int]:
return self._atomname2cat.copy()
def featurize(self, pose: PoseType, atom_id: AtomIDType) -> List[Number]:
"""Annotate node by atom category in one-hot encoding"""
residue = pose.residue(atom_id.rsd())
atom_name = residue.atom_name(atom_id.atomno())
atom_name = atom_name.strip()
atom_cat = self._atomname2cat[atom_name]
# index must start at 1
encoding = [1 if atom_cat == i else 0
for i in range(1, len(self.atom_cats) + 1)]
return encoding
class SeqEmbNodeFeaturizer(BaseNodeFeaturizer):
"""Featurize node by (external) sequence embedding."""
def __init__(self, emb_dir: AnyPath, name: Optional[str] = None):
"""Initialize with directory holding sequence embeddings.
WARNING: MAKE SURE PDB NUMBERING MATCHES THAT ON RCSB FASTA!
All embeddings must be named in format '{pdb_code}.pt'. Each of them should contain
a tensor of the sequence embedding in shape (seq_dim, emb_dim).
Args:
emb_dir: Embedding directory
"""
emb_dir = Path(emb_dir)
if not emb_dir.exists():
raise FileNotFoundError(f'{emb_dir} not found.')
super().__init__(name=name)
self.emb_dir = emb_dir
self.pt_dict = {pt.stem: pt for pt in emb_dir.glob('*.pt')}
if not self.pt_dict:
raise FileNotFoundError('No embedding pt file found underneath directory.')
warnings.warn('Make sure pdb numbering matches rcsb fasta!') # leap of faith!
def get_emb(self, pdb_name: str):
"""Get embedding by pdb name"""
pt = self.pt_dict[pdb_name]
emb = torch.load(str(pt))
assert len(emb.shape) == 2, 'Sequence embedding shape should be in (seq_dim, emb_dim).'
return emb
@property
def config(self) -> Dict[str, Dict]:
parameters = {
'name': self.name,
'emb_dir': str(self.emb_dir),
}
return _parameter2config(self, parameters)
def featurize(self, pose: PoseType, atom_id: AtomIDType) -> List[Number]:
"""Annotate node by sequence embedding."""
pdb = pose.pdb_info().name()
pdb_name = Path(pdb).stem
emb = self.get_emb(pdb_name)
rsd = atom_id.rsd()
resid = pose.pdb_info().pose2pdb(rsd)
resid = int(resid.split()[0])
return list(emb[resid-1])
class CompositeNodeFeaturizer(BaseNodeFeaturizer):
"""Composite of NodeFeaturizer(s)."""
def __init__(self, components: Optional[List[Type[BaseNodeFeaturizer]]] = None, name: Optional[str] = None):
"""Initialize with name and any number of NodeFeaturizer(s).
Args:
components: (Ordered) list of NodeFeaturizers
intersection: Edge determination by intersection of featurizers
"""
if not components:
raise ValueError('No featurizer received.')
if not all(issubclass(featurizer.__class__, BaseNodeFeaturizer) for featurizer in components):
raise TypeError('Some featurizer(s) is not subclass of NodeFeaturizer.')
super().__init__(name)
self.components = list(components) # mutable and ordered
self.n_features = None
@property
def config(self) -> List[Dict]:
config = [component.config for component in self.components]
parameters = {'name': self.name}
subconfig = _parameter2config(self, parameters)
config.append(subconfig)
return config
def featurize(self, pose: PoseType, atom_id: AtomIDType) -> List[Number]:
"""Annotate in accordance to sub-NodeFeaturizers and append outputs."""
features = []
for component in self.components:
feature = component.featurize(pose, atom_id)
features += feature
if self.n_features:
assert len(features) == self.n_features, \
f'Irregular feature length. (storage: {self.n_features} v.s. current {len(features)})'
else:
self.n_features = len(features)
return features
class BaseEdgeFeaturizer:
"""Base class of edge featurization (including edge determination)."""
def __init__(self, is_edge_only: bool = False, name: Optional[str] = None):
"""
Initialize and optionally allow edge determination only (empty feature list).
"""
if name is None:
name = self.__class__.__name__
self.name = name
self.is_edge_only = is_edge_only
@property
def config(self) -> Dict[str, Dict]:
raise NotImplementedError
@abstractmethod
def featurize(self, pose: PoseType, atom_id1: AtomIDType, atom_id2: AtomIDType) -> Tuple[bool, List[Number]]:
raise NotImplementedError
class BondedEdgeFeaturizer(BaseEdgeFeaturizer):
"""Featurize edge by chemical bond formation."""
def __init__(self, is_edge_only: bool = False, name: Optional[str] = None):
super().__init__(is_edge_only=is_edge_only, name=name)
@property
def config(self) -> Dict[str, Dict]:
parameters = {'name': self.name, 'is_edge_only': self.is_edge_only}
return _parameter2config(self, parameters)
def featurize(self, pose: PoseType, atom_id1: AtomIDType, atom_id2: AtomIDType) -> Tuple[bool, List[Number]]:
"""Annotate edge with [1] if chemically bonded. Otherwise [0]."""
is_edge = pose.conformation().is_bonded(atom_id1, atom_id2)
if self.is_edge_only:
return is_edge, []
feature = [1] if is_edge else [0]
return is_edge, feature
class DistanceEdgeFeaturizer(BaseEdgeFeaturizer):
"""Featurize edge by atom separation distance."""
def __init__(self, is_edge_only: bool = False, max_distance: Number = 0., sigma: float = 0., name: Optional[str] = None):
"""Initialize with max separation distance."""
super().__init__(is_edge_only=is_edge_only, name=name)
self.max_distance = max_distance
self.sigma = sigma
@property
def config(self) -> Dict[str, Dict]:
parameters = {'name': self.name, 'is_edge_only': self.is_edge_only, 'sigma': self.sigma,
'max_distance': self.max_distance}
return _parameter2config(self, parameters)
def featurize(self, pose: PoseType, atom_id1: AtomIDType, atom_id2: AtomIDType) -> Tuple[bool, List[Number]]:
"""Annotate edge with [1] if nodes (atoms) are closer than max_distance. Otherwise [0]."""
xyz1 = pose.xyz(atom_id1)
xyz2 = pose.xyz(atom_id2)
xyz1 = np.asarray(xyz1)
xyz2 = np.asarray(xyz2)
if self.sigma:
xyz1 += | np.random.normal(0, self.sigma / 3 ** 0.5, 3) | numpy.random.normal |
# Class A is wTx >0
# Class B is wTx <=0
import numpy
import matplotlib.pyplot as plt
samplesize = 50
# Generating All random data for class A and Class B
xclassA = numpy.random.uniform(-1,1, size=samplesize)
yclassA = numpy.random.uniform(-1,1, size=samplesize)
xclassB = numpy.random.uniform(-1,1, size=samplesize)
yclassB = numpy.random.uniform(-1,1, size=samplesize)
ones = numpy.ones(samplesize)
xyclassA = numpy.c_[xclassA, yclassA,ones]
xyclassB = numpy.c_[xclassB, yclassB,ones]
# Creating w list
wList = [[1, 1 ,0], [-1,-1,0],[0,0.5,0],[1,-1,5],[1.0,1.0,0.3]]
# Calculating the accuracy
overallAccuracy = []
for w in wList :
correct = [0 ,0]
for cord in xyclassA:
if( | numpy.dot(w,cord) | numpy.dot |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the catalog module.
"""
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Gaussian2D
from astropy.table import QTable
import astropy.units as u
from numpy.testing import assert_allclose, assert_equal, assert_raises
import numpy as np
import pytest
from ..catalog import SourceCatalog
from ..core import SegmentationImage
from ..detect import detect_sources
from ...aperture import CircularAperture, EllipticalAperture
from ...datasets import make_gwcs, make_wcs, make_noise_image
from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB, HAS_SCIPY # noqa
@pytest.mark.skipif('not HAS_SCIPY')
class TestSourceCatalog:
def setup_class(self):
xcen = 51.
ycen = 52.7
major_sigma = 8.
minor_sigma = 3.
theta = np.pi / 6.
g1 = Gaussian2D(111., xcen, ycen, major_sigma, minor_sigma,
theta=theta)
g2 = Gaussian2D(50, 20, 80, 5.1, 4.5)
g3 = Gaussian2D(70, 75, 18, 9.2, 4.5)
g4 = Gaussian2D(111., 11.1, 12.2, major_sigma, minor_sigma,
theta=theta)
g5 = Gaussian2D(81., 61, 42.7, major_sigma, minor_sigma, theta=theta)
g6 = Gaussian2D(107., 75, 61, major_sigma, minor_sigma, theta=-theta)
g7 = Gaussian2D(107., 90, 90, 4, 2, theta=-theta)
yy, xx = np.mgrid[0:101, 0:101]
self.data = (g1(xx, yy) + g2(xx, yy) + g3(xx, yy) + g4(xx, yy)
+ g5(xx, yy) + g6(xx, yy) + g7(xx, yy))
threshold = 27.
self.segm = detect_sources(self.data, threshold, npixels=5)
self.error = make_noise_image(self.data.shape, mean=0, stddev=2.,
seed=123)
self.background = np.ones(self.data.shape) * 5.1
self.mask = np.zeros(self.data.shape, dtype=bool)
self.mask[0:30, 0:30] = True
self.wcs = make_wcs(self.data.shape)
self.cat = SourceCatalog(self.data, self.segm, error=self.error,
background=self.background, mask=self.mask,
wcs=self.wcs, localbkg_width=24)
unit = u.nJy
self.unit = unit
self.cat_units = SourceCatalog(self.data << unit, self.segm,
error=self.error << unit,
background=self.background << unit,
mask=self.mask, wcs=self.wcs,
localbkg_width=24)
@pytest.mark.parametrize('with_units', (True, False))
def test_catalog(self, with_units):
props1 = ('background_centroid', 'background_mean', 'background_sum',
'bbox', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx',
'cxy', 'cyy', 'ellipticity', 'elongation', 'fwhm',
'equivalent_radius', 'gini', 'kron_radius', 'maxval_xindex',
'maxval_yindex', 'minval_xindex', 'minval_yindex',
'perimeter', 'sky_bbox_ll', 'sky_bbox_lr', 'sky_bbox_ul',
'sky_bbox_ur', 'sky_centroid_icrs', 'local_background',
'segment_flux', 'segment_fluxerr', 'kron_flux',
'kron_fluxerr')
props2 = ('centroid', 'covariance', 'covariance_eigvals',
'cutout_centroid', 'cutout_maxval_index',
'cutout_minval_index', 'inertia_tensor', 'maxval_index',
'minval_index', 'moments', 'moments_central', 'background',
'background_ma', 'convdata', 'convdata_ma', 'data',
'data_ma', 'error', 'error_ma', 'segment', 'segment_ma')
props = tuple(self.cat.default_columns) + props1 + props2
if with_units:
cat1 = self.cat_units.copy()
cat2 = self.cat_units.copy()
else:
cat1 = self.cat.copy()
cat2 = self.cat.copy()
# test extra properties
cat1.circular_photometry(5.0, name='circ5')
cat1.kron_photometry((2.0, 1.0), name='kron2')
cat1.fluxfrac_radius(0.5, name='r_hl')
segment_snr = cat1.segment_flux / cat1.segment_fluxerr
cat1.add_extra_property('segment_snr', segment_snr)
props = list(props)
props.extend(cat1.extra_properties)
idx = 1
# evaluate (cache) catalog properties before slice
obj = cat1[idx]
for prop in props:
assert_equal(getattr(cat1, prop)[idx], getattr(obj, prop))
# slice catalog before evaluating catalog properties
obj = cat2[idx]
obj.circular_photometry(5.0, name='circ5')
obj.kron_photometry((2.0, 1.0), name='kron2')
obj.fluxfrac_radius(0.5, name='r_hl')
segment_snr = obj.segment_flux / obj.segment_fluxerr
obj.add_extra_property('segment_snr', segment_snr)
for prop in props:
assert_equal(getattr(obj, prop), getattr(cat1, prop)[idx])
@pytest.mark.parametrize('with_units', (True, False))
def test_catalog_detection_cat(self, with_units):
"""
Test aperture-based properties with an input detection catalog.
"""
error = 2.0 * self.error
data2 = self.data + error
if with_units:
cat1 = self.cat_units.copy()
cat2 = SourceCatalog(data2 << self.unit, self.segm,
error=error << self.unit,
background=self.background << self.unit,
mask=self.mask, wcs=self.wcs,
localbkg_width=24, detection_cat=None)
cat3 = SourceCatalog(data2 << self.unit, self.segm,
error=error << self.unit,
background=self.background << self.unit,
mask=self.mask, wcs=self.wcs,
localbkg_width=24, detection_cat=cat1)
else:
cat1 = self.cat.copy()
cat2 = SourceCatalog(data2, self.segm, error=error,
background=self.background, mask=self.mask,
wcs=self.wcs, localbkg_width=24,
detection_cat=None)
cat3 = SourceCatalog(data2, self.segm, error=error,
background=self.background, mask=self.mask,
wcs=self.wcs, localbkg_width=24,
detection_cat=cat1)
| assert_equal(cat1.kron_radius, cat3.kron_radius) | numpy.testing.assert_equal |
import numpy as np
import pandas as pd
from classy import Class
import pickle
import sys,os
import astropy
from astropy.cosmology import Planck15
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
from scipy import interpolate
from scipy import integrate
from scipy import special
from scipy.signal import argrelextrema
class CoflexTwopoint:
def __init__(self, coflex_power, survey, bin_combo):
self.l_list = coflex_power['ell']
self.P_F_list = coflex_power['P_F']
self.P_kappa_F_list = coflex_power['P_kappa_F']
self.survey = str(survey)
self.bin_combo = str(bin_combo)
def getTwoPoint(self):
# First, interpolate all arrays so that they can be turned into callable functions
self.interpolateArrays()
# .. Get two point correlation functions
# .. .. F-F autocorrelation
xi_FF_plus = self.two_point_corr_flexflex(theta_flexflex_list, 'FF_plus')
xi_FF_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'FF_minus')
# .. .. F-G cross-correlation. Note: xi_FG_plus = -xi_FF_minus
xi_FG_plus = [-xi_FF_minus[i] for i in range(len(xi_FF_minus))]
xi_FG_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'FG_minus')
# .. .. G-G cross correlation. Note: xi_GG_plus = xi_FF_plus
xi_GG_plus = xi_FF_plus
xi_GG_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'GG_minus')
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_FF_plus', 'xi_FF_minus', 'xi_FG_plus', 'xi_FG_minus', 'xi_GG_plus', 'xi_GG_minus']
arrs = [theta_flexflex_list, xi_FF_plus, xi_FF_minus, xi_FG_plus, xi_FG_minus, xi_GG_plus, xi_GG_minus]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/flexion-flexion_two_point_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
# Shear-flexion correlations:
# .. Get theta_list
theta_shearflex_list = self.theta_shearflex_list()
# .. Get two point correlation functions
# .. .. gam-F cross-correlation
xi_gamF_plus = self.two_point_corr_shearflex(theta_shearflex_list, 'gamF_plus')
xi_gamF_minus = self.two_point_corr_shearflex(theta_shearflex_list, 'gamF_minus')
# .. .. G-gam cross-correlation. Note: xi_Ggam_plus = xi_gamF_minus
xi_Ggam_plus = xi_gamF_plus
xi_Ggam_minus = self.two_point_corr_shearflex(theta_shearflex_list, 'Ggam_minus')
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_gamF_plus', 'xi_gamF_minus', 'xi_Ggam_plus', 'xi_Ggam_minus']
arrs = [theta_shearflex_list, xi_gamF_plus, xi_gamF_minus, xi_Ggam_plus, xi_Ggam_minus]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/shear-flexion_two_point_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
def theta_flexflex_list(self, theta_min=1, theta_max=100, N_theta=100):
"""
List of theta values for real-space cosmic flexion correlation functions
Input angle values are in untis of arcseconds
self, theta_min=1, theta_max=120, N_theta=100
"""
# Create logspace list of angular scale, in units of arcseconds
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
theta_max = np.log10(bin_high_list[-1])
theta_list = np.logspace(theta_min,theta_max,N_theta)
theta_list *= u.arcsec
return theta_list
def theta_shearflex_list(self, theta_min=1/60, theta_max=10., N_theta=100):
"""
List of theta values for real-space cosmic shear-flexion correlation functions
Input angle values are in untis of arcminutes
self, theta_min=0.01, theta_max=15., N_theta=100
theta_min=1/60, theta_max=50., N_theta=100
"""
# Create logspace list of angular scale, in units of arcseconds
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**( | np.log10(theta_list) | numpy.log10 |
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils of GPT2 Modules.
"""
import collections
import json
import os
import re
import warnings
from abc import ABC
from typing import Any, Dict
import tensorflow as tf
import numpy as np
from texar.tf.modules.pretrained.pretrained_base import PretrainedMixin
__all__ = [
"PretrainedGPT2Mixin",
]
_GPT2_PATH = "https://storage.googleapis.com/gpt-2/models/"
_CHECKPOINT_FILES = [
"checkpoint", "encoder.json", "hparams.json", "vocab.bpe",
"model.ckpt.data-00000-of-00001", "model.ckpt.index", "model.ckpt.meta"]
class PretrainedGPT2Mixin(PretrainedMixin, ABC):
r"""A mixin class to support loading pre-trained checkpoints for modules
that implement the GPT2 model.
The GPT2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by `Radford et al.` from OpenAI. It is a unidirectional Transformer model
pre-trained using the vanilla language modeling objective on a large corpus.
The available GPT2 models are as follows:
* ``gpt2-small``: Small version of GPT-2, 124M parameters.
* ``gpt2-medium``: Medium version of GPT-2, 355M parameters.
* ``gpt2-large``: Large version of GPT-2, 774M parameters.
* ``gpt2-xl``: XL version of GPT-2, 1558M parameters.
We provide the following GPT2 classes:
* :class:`~texar.tf.modules.GPT2Encoder` for text encoding.
* :class:`~texar.tf.modules.GPT2Decoder` for text generation and
decoding.
* :class:`~texar.tf.modules.GPT2Classifier` for text classification and
sequence tagging.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
"""
_IS_DECODE = False
_MODEL_NAME = "GPT2"
_MODEL2URL = {
'gpt2-small': [_GPT2_PATH + f"124M/{file}"
for file in _CHECKPOINT_FILES],
'gpt2-medium': [_GPT2_PATH + f"355M/{file}"
for file in _CHECKPOINT_FILES],
'gpt2-large': [_GPT2_PATH + f"774M/{file}"
for file in _CHECKPOINT_FILES],
'gpt2-xl': [_GPT2_PATH + f"1558M/{file}"
for file in _CHECKPOINT_FILES],
}
# Raise warning for the deprecated pre-trained model names
class MyDict(dict):
def __contains__(self, key):
if key == '117M':
warnings.warn("Pre-trained model name '117M' is deprecated, "
"use 'gpt2-small' instead.", UserWarning)
return True
elif key == '345M':
warnings.warn("Pre-trained model name '345M' is deprecated, "
"use 'gpt2-medium' instead.", UserWarning)
return True
else:
return super().__contains__(key)
_DEPRECATED_MODEL2URL = {
'117M': [_GPT2_PATH + f"124M/{file}" for file in _CHECKPOINT_FILES],
'345M': [_GPT2_PATH + f"355M/{file}" for file in _CHECKPOINT_FILES],
}
_MODEL2URL.update(_DEPRECATED_MODEL2URL)
_MODEL2URL = MyDict(_MODEL2URL) # type: ignore
def _transform_config(self, pretrained_model_name: str,
cache_dir: str) -> Dict[str, Any]:
info = list(os.walk(cache_dir))
root, _, files = info[0]
config_path = None
for file in files:
if file.endswith('hparams.json'):
config_path = os.path.join(root, file)
if config_path is None:
raise ValueError(f"Cannot find the config file in {cache_dir}")
with open(config_path) as f:
config_gpt = json.loads(f.read())
hidden_dim = config_gpt["n_embd"]
configs = {
"vocab_size": config_gpt["n_vocab"],
"context_size": config_gpt["n_ctx"],
"embedding_size": config_gpt["n_embd"], "embed": {
"dim": hidden_dim,
},
"position_size": config_gpt["n_ctx"],
"position_embed": {
"dim": hidden_dim
}
}
module_name = "decoder" if self._IS_DECODE else "encoder"
configs.update({module_name: {
"dim": hidden_dim,
"num_blocks": config_gpt["n_layer"],
"embedding_dropout": 0,
"residual_dropout": 0,
"multihead_attention": {
"use_bias": True,
"num_units": hidden_dim,
"num_heads": config_gpt["n_head"],
"output_dim": hidden_dim,
},
"initializer": {
"type": "variance_scaling_initializer",
"kwargs": {
'factor': 1.0,
'mode': 'FAN_AVG',
'uniform': True
},
},
"poswise_feedforward": {
"layers": [
{
"type": "Dense",
"kwargs": {
'name': 'intermediate',
'activation': 'gelu',
"units": hidden_dim * 4,
"use_bias": True,
}
},
{
"type": "Dense",
"kwargs": {
'activation': None,
'name': 'output',
"units": hidden_dim,
"use_bias": True,
}
}
],
},
}})
return configs
def _init_from_checkpoint(self, pretrained_model_name, cache_dir,
scope_name, load_output_layer=True, **kwargs):
r"""Initialize model parameters from weights stored in the pre-trained
checkpoint.
Args:
pretrained_model_name (str): Name of the pre-trained model.
cache_dir (str): Path to the cache directory.
scope_name (str): Scope name of the model.
load_output_layer (bool): If `False`, will not load weights of the
output layer. Set this argument to `False` when loading weights
into a GPT2 encoder. Defaults to `True`.
"""
init_checkpoint = os.path.abspath(os.path.join(cache_dir,
'model.ckpt'))
ckpt = tf.train.load_checkpoint(init_checkpoint)
ckpt_params = {key: ckpt.get_tensor(key) for key in
ckpt.get_variable_to_shape_map().keys()}
tvars = tf.trainable_variables()
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
if load_output_layer:
global_tensor_map = {
'model/wte': scope_name + '/word_embeddings/w',
'model/wpe': scope_name + '/position_embeddings/w',
'model/ln_f/b': scope_name + '/decoder/beta',
'model/ln_f/g': scope_name + '/decoder/gamma',
}
layer_tensor_map = {
"ln_1/b": scope_name + '/layer_{}/beta',
"ln_1/g": scope_name + '/layer_{}/gamma',
"ln_2/b": scope_name + '/layer_{}/past_poswise_ln/beta',
"ln_2/g": scope_name + '/layer_{}/past_poswise_ln/gamma',
"mlp/c_fc/b": scope_name + '/decoder/layer_{}'
'/ffn/intermediate/bias',
"mlp/c_fc/w": scope_name + '/decoder/layer_{}'
'/ffn/intermediate/kernel',
"mlp/c_proj/b": scope_name + '/decoder/layer_{}/ffn/output/'
'bias',
"mlp/c_proj/w": scope_name + '/decoder/layer_{}/ffn/output/'
'kernel',
"attn/c_attn/b": None,
"attn/c_attn/w": None,
"attn/c_proj/b": scope_name + '/decoder/layer_{}'
'/self_attention/self/output/'
'bias',
"attn/c_proj/w": scope_name + '/decoder/layer_{}'
'/self_attention/self/output/'
'kernel',
}
else:
global_tensor_map = {
'model/wte': scope_name + '/word_embeddings/w',
'model/wpe': scope_name + '/position_embeddings/w',
'model/ln_f/b': scope_name + '/encoder/LayerNorm/beta',
'model/ln_f/g': scope_name + '/encoder/LayerNorm/gamma',
}
layer_tensor_map = {
"ln_1/b": scope_name + '/encoder/layer_{}/LayerNorm/beta',
"ln_1/g": scope_name + '/encoder/layer_{}/LayerNorm/gamma',
"ln_2/b": scope_name + '/encoder/layer_{}/output/'
'LayerNorm/beta',
"ln_2/g": scope_name + '/encoder/layer_{}/output/'
'LayerNorm/gamma',
"mlp/c_fc/b": scope_name + '/encoder/layer_{}'
'/ffn/intermediate/bias',
"mlp/c_fc/w": scope_name + '/encoder/layer_{}'
'/ffn/intermediate/kernel',
"mlp/c_proj/b": scope_name + '/encoder/layer_{}/ffn/output/'
'bias',
"mlp/c_proj/w": scope_name + '/encoder/layer_{}/ffn/output/'
'kernel',
"attn/c_attn/b": None,
"attn/c_attn/w": None,
"attn/c_proj/b": scope_name + '/encoder/layer_{}'
'/attention/self/output/bias',
"attn/c_proj/w": scope_name + '/encoder/layer_{}'
'/attention/self/output/kernel',
}
for name, array in ckpt_params.items():
if name in global_tensor_map:
v_name = global_tensor_map[name]
pointer = name_to_variable[v_name]
pointer._initializer_op = tf.assign(pointer._variable, array)
else:
name_tmp = name.split("/")
layer_no = name_tmp[1][1:]
name = "/".join(name_tmp[2:])
if name in layer_tensor_map:
if name == "attn/c_attn/b":
if load_output_layer:
K = name_to_variable[
scope_name + '/decoder/layer_' + layer_no +
'/self_attention/self/key/bias']
Q = name_to_variable[
scope_name + '/decoder/layer_' + layer_no +
'/self_attention/self/query/bias']
V = name_to_variable[
scope_name + '/decoder/layer_' + layer_no +
'/self_attention/self/value/bias']
else:
K = name_to_variable[
scope_name + '/encoder/layer_' + layer_no +
'/attention/self/key/bias']
Q = name_to_variable[
scope_name + '/encoder/layer_' + layer_no +
'/attention/self/query/bias']
V = name_to_variable[
scope_name + '/encoder/layer_' + layer_no +
'/attention/self/value/bias']
index_d = array.shape[-1] // 3
Q_w = array[:index_d]
K_w = array[index_d: 2 * index_d]
V_w = array[2 * index_d:]
K._initializer_op = tf.assign(K._variable, K_w)
Q._initializer_op = tf.assign(Q._variable, Q_w)
V._initializer_op = tf.assign(V._variable, V_w)
elif name == "attn/c_attn/w":
if load_output_layer:
K = name_to_variable[
scope_name + '/decoder/layer_' + layer_no +
'/self_attention/self/key/kernel']
Q = name_to_variable[
scope_name + '/decoder/layer_' + layer_no +
'/self_attention/self/query/kernel']
V = name_to_variable[
scope_name + '/decoder/layer_' + layer_no +
'/self_attention/self/value/kernel']
else:
K = name_to_variable[
scope_name + '/encoder/layer_' + layer_no +
'/attention/self/key/kernel']
Q = name_to_variable[
scope_name + '/encoder/layer_' + layer_no +
'/attention/self/query/kernel']
V = name_to_variable[
scope_name + '/encoder/layer_' + layer_no +
'/attention/self/value/kernel']
index_d = array.shape[-1] // 3
Q_w = | np.transpose(array[0, :, :index_d]) | numpy.transpose |
import copy
import warnings
from collections.abc import Iterable, Iterator
import numpy as np
import scipy
import scipy.optimize
import scipy.stats
from stingray.exceptions import StingrayError
from stingray.gti import bin_intervals_from_gtis, check_gtis, cross_two_gtis
from stingray.largememory import createChunkedSpectra, saveData
from stingray.utils import genDataPath, rebin_data, rebin_data_log, simon
from .events import EventList
from .lightcurve import Lightcurve
from .utils import show_progress
# location of factorial moved between scipy versions
try:
from scipy.misc import factorial
except ImportError:
from scipy.special import factorial
try:
from pyfftw.interfaces.scipy_fft import fft, fftfreq
except ImportError:
warnings.warn("pyfftw not installed. Using standard scipy fft")
from scipy.fft import fft, fftfreq
__all__ = [
"Crossspectrum", "AveragedCrossspectrum", "coherence", "time_lag",
"cospectra_pvalue", "normalize_crossspectrum"
]
def normalize_crossspectrum(unnorm_power, tseg, nbins, nphots1, nphots2, norm="none", power_type="real"):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
nbins : int
Number of bins in the light curve
nphots1 : int
Number of photons in the light curve no. 1
nphots2 : int
Number of photons in the light curve no. 2
Other parameters
----------------
norm : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`
(absolute rms)
power_type : str
One of `'real'` (real part), `'all'` (all complex powers), `'abs'`
(absolute value)
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
# The "effective" counts/bin is the geometrical mean of the counts/bin
# of the two light curves. Same goes for counts/second in meanrate.
log_nphots1 = np.log(nphots1)
log_nphots2 = np.log(nphots2)
actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))
if power_type == "all":
c_num = unnorm_power
elif power_type == "real":
c_num = unnorm_power.real
elif power_type == "absolute":
c_num = np.absolute(unnorm_power)
else:
raise ValueError("`power_type` not recognized!")
if norm.lower() == 'leahy':
power = c_num * 2. / actual_nphots
elif norm.lower() == 'frac':
meancounts1 = nphots1 / nbins
meancounts2 = nphots2 / nbins
actual_mean = np.sqrt(meancounts1 * meancounts2)
assert actual_mean > 0.0, \
"Mean count rate is <= 0. Something went wrong."
c = c_num / float(nbins ** 2.)
power = c * 2. * tseg / (actual_mean ** 2.0)
elif norm.lower() == 'abs':
meanrate = np.sqrt(nphots1 * nphots2) / tseg
power = c_num * 2. * meanrate / actual_nphots
elif norm.lower() == 'none':
power = unnorm_power
else:
raise ValueError("Value for `norm` not recognized.")
return power
def normalize_crossspectrum_gauss(
unnorm_power, mean_flux, var, dt, N, norm="none", power_type="real"):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
mean_flux: float
The mean flux of the light curve (if a cross spectrum, the geometrical
mean of the flux in the two channels)
var: float
The variance of the light curve (if a cross spectrum, the geometrical
mean of the variance in the two channels)
dt: float
The sampling time of the light curve
N: int
The number of bins in the light curve
Other parameters
----------------
norm : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`
(absolute rms)
power_type : str
One of `'real'` (real part), `'all'` (all complex powers), `'abs'`
(absolute value)
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
Examples
--------
>>> lc_c = np.random.poisson(10000, 10000)
>>> lc_c_var = 10000
>>> lc = lc_c / 17.3453
>>> lc_var = (100 / 17.3453)**2
>>> pds_c = np.absolute(np.fft.fft(lc_c))**2
>>> pds = np.absolute(np.fft.fft(lc))**2
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), lc_c_var, 0.1, len(lc_c), norm='leahy')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='leahy')
>>> np.allclose(norm, norm_c)
True
>>> np.isclose(np.mean(norm[1:]), 2, atol=0.1)
True
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='frac')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='frac')
>>> np.allclose(norm, norm_c)
True
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='abs')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='abs')
>>> np.allclose(norm / np.mean(lc)**2, norm_c / np.mean(lc_c)**2)
True
>>> np.isclose(np.mean(norm_c[2:]), 2 * np.mean(lc_c * 0.1), rtol=0.1)
True
"""
# The "effective" counts/bin is the geometrical mean of the counts/bin
# of the two light curves. Same goes for counts/second in meanrate.
if power_type == "all":
c_num = unnorm_power
elif power_type == "real":
c_num = unnorm_power.real
elif power_type == "absolute":
c_num = np.absolute(unnorm_power)
else:
raise ValueError("`power_type` not recognized!")
common_factor = 2 * dt / N
rate_mean = mean_flux * dt
if norm.lower() == 'leahy':
norm = 2 / var / N
elif norm.lower() == 'frac':
norm = common_factor / rate_mean**2
elif norm.lower() == 'abs':
norm = common_factor
elif norm.lower() == 'none':
norm = 1
else:
raise ValueError("Value for `norm` not recognized.")
return norm * c_num
def _averaged_cospectra_cdf(xcoord, n):
"""
Function calculating the cumulative distribution function for
averaged cospectra, Equation 19 of Huppenkothen & Bachetti (2018).
Parameters
----------
xcoord : float or iterable
The cospectral power for which to calculate the CDF.
n : int
The number of averaged cospectra
Returns
-------
cdf : float
The value of the CDF at `xcoord` for `n` averaged cospectra
"""
if np.size(xcoord) == 1:
xcoord = [xcoord]
cdf = np.zeros_like(xcoord)
for i, x in enumerate(xcoord):
prefac_bottom1 = factorial(n - 1)
for j in range(n):
prefac_top = factorial(n - 1 + j)
prefac_bottom2 = factorial(
n - 1 - j) * factorial(j)
prefac_bottom3 = 2.0 ** (n + j)
prefac = prefac_top / (prefac_bottom1 * prefac_bottom2 *
prefac_bottom3)
gf = -j + n
first_fac = scipy.special.gamma(gf)
if x >= 0:
second_fac = scipy.special.gammaincc(gf, n * x) * first_fac
fac = 2.0 * first_fac - second_fac
else:
fac = scipy.special.gammaincc(gf, -n * x) * first_fac
cdf[i] += (prefac * fac)
if np.size(xcoord) == 1:
return cdf[i]
else:
continue
return cdf
def cospectra_pvalue(power, nspec):
"""
This function computes the single-trial p-value that the power was
observed under the null hypothesis that there is no signal in
the data.
Important: the underlying assumption that make this calculation valid
is that the powers in the power spectrum follow a Laplace distribution,
and this requires that:
1. the co-spectrum is normalized according to [Leahy 1983]_
2. there is only white noise in the light curve. That is, there is no
aperiodic variability that would change the overall shape of the power
spectrum.
Also note that the p-value is for a *single trial*, i.e. the power
currently being tested. If more than one power or more than one power
spectrum are being tested, the resulting p-value must be corrected for the
number of trials (Bonferroni correction).
Mathematical formulation in [Huppenkothen 2017]_.
Parameters
----------
power : float
The squared Fourier amplitude of a spectrum to be evaluated
nspec : int
The number of spectra or frequency bins averaged in ``power``.
This matters because averaging spectra or frequency bins increases
the signal-to-noise ratio, i.e. makes the statistical distributions
of the noise narrower, such that a smaller power might be very
significant in averaged spectra even though it would not be in a single
power spectrum.
Returns
-------
pval : float
The classical p-value of the observed power being consistent with
the null hypothesis of white noise
References
----------
* .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract
* .. [Huppenkothen 2017] http://adsabs.harvard.edu/abs/2018ApJS..236...13H
"""
if not np.all(np.isfinite(power)):
raise ValueError("power must be a finite floating point number!")
# if power < 0:
# raise ValueError("power must be a positive real number!")
if not np.isfinite(nspec):
raise ValueError("nspec must be a finite integer number")
if not np.isclose(nspec % 1, 0):
raise ValueError("nspec must be an integer number!")
if nspec < 1:
raise ValueError("nspec must be larger or equal to 1")
elif nspec == 1:
lapl = scipy.stats.laplace(0, 1)
pval = lapl.sf(power)
elif nspec > 50:
exp_sigma = np.sqrt(2) / np.sqrt(nspec)
gauss = scipy.stats.norm(0, exp_sigma)
pval = gauss.sf(power)
else:
pval = 1. - _averaged_cospectra_cdf(power, nspec)
return pval
def coherence(lc1, lc2):
"""
Estimate coherence function of two light curves.
For details on the definition of the coherence, see Vaughan and Nowak,
1996 [#]_.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
The first light curve data for the channel of interest.
lc2: :class:`stingray.Lightcurve` object
The light curve data for reference band
Returns
-------
coh : ``np.ndarray``
The array of coherence versus frequency
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
return cs.coherence()
def time_lag(lc1, lc2):
"""
Estimate the time lag of two light curves.
Calculate time lag and uncertainty.
Equation from Bendat & Piersol, 2011 [bendat-2011]_.
Returns
-------
lag : np.ndarray
The time lag
lag_err : np.ndarray
The uncertainty in the time lag
References
----------
.. [bendat-2011] https://www.wiley.com/en-us/Random+Data%3A+Analysis+and+Measurement+Procedures%2C+4th+Edition-p-9780470248775
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
lag = cs.time_lag()
return lag
class Crossspectrum(object):
"""
Make a cross spectrum from a (binned) light curve.
You can also make an empty :class:`Crossspectrum` object to populate with your
own Fourier-transformed data (this can sometimes be useful when making
binned power spectra). Stingray uses the scipy.fft standards for the sign
of the Nyquist frequency.
Parameters
----------
data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The first light curve data for the channel/band of interest.
data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The light curve data for the reference band.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
power_type: string, optional, default ``real``
Parameter to choose among complete, real part and magnitude of the cross spectrum.
fullspec: boolean, optional, default ``False``
If False, keep only the positive frequencies, or if True, keep all of them .
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects allowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects allowed
dt: float
The time resolution of the light curve. Only needed when constructing
light curves in the case where ``data1``, ``data2`` are
:class:`EventList` objects
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra (complex numbers)
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging more than one spectra). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross-spectra amplitudes in each bin.
n: int
The number of data points/time bins in one segment of the light
curves.
nphots1: float
The total number of photons in light curve 1
nphots2: float
The total number of photons in light curve 2
"""
def __init__(self, data1=None, data2=None, norm='none', gti=None,
lc1=None, lc2=None, power_type="real", dt=None, fullspec=False):
if isinstance(norm, str) is False:
raise TypeError("norm must be a string")
if norm.lower() not in ["frac", "abs", "leahy", "none"]:
raise ValueError("norm must be 'frac', 'abs', 'leahy', or 'none'!")
self.norm = norm.lower()
# check if input data is a Lightcurve object, if not make one or
# make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None``
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if data1 is None or data2 is None:
if data1 is not None or data2 is not None:
raise TypeError("You can't do a cross spectrum with just one "
"light curve!")
else:
self.freq = None
self.power = None
self.power_err = None
self.df = None
self.nphots1 = None
self.nphots2 = None
self.m = 1
self.n = None
return
if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \
dt is None:
raise ValueError("If using event lists, please specify the bin "
"time to generate lightcurves.")
if not isinstance(data1, EventList):
lc1 = data1
else:
lc1 = data1.to_lc(dt)
if not isinstance(data2, EventList):
lc2 = data2
elif isinstance(data2, EventList) and data2 is not data1:
lc2 = data2.to_lc(dt)
elif data2 is data1:
lc2 = lc1
self.gti = gti
self.lc1 = lc1
self.lc2 = lc2
self.power_type = power_type
self.fullspec = fullspec
self._make_crossspectrum(lc1, lc2, fullspec)
# These are needed to calculate coherence
self._make_auxil_pds(lc1, lc2)
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
if lc1 is not lc2 and isinstance(lc1, Lightcurve):
self.pds1 = Crossspectrum(lc1, lc1, norm='none')
self.pds2 = Crossspectrum(lc2, lc2, norm='none')
def _make_crossspectrum(self, lc1, lc2, fullspec=False):
"""
Auxiliary method computing the normalized cross spectrum from two
light curves. This includes checking for the presence of and
applying Good Time Intervals, computing the unnormalized Fourier
cross-amplitude, and then renormalizing using the required
normalization. Also computes an uncertainty estimate on the cross
spectral powers.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
fullspec: boolean, default ``False``
Return full frequency array (True) or just positive frequencies (False)
"""
# make sure the inputs work!
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
if self.lc2.mjdref != self.lc1.mjdref:
raise ValueError("MJDref is different in the two light curves")
# Then check that GTIs make sense
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
if self.gti.shape[0] != 1:
raise TypeError("Non-averaged Cross Spectra need "
"a single Good Time Interval")
lc1 = lc1.split_by_gti()[0]
lc2 = lc2.split_by_gti()[0]
# total number of photons is the sum of the
# counts in the light curve
self.meancounts1 = lc1.meancounts
self.meancounts2 = lc2.meancounts
self.nphots1 = np.float64(np.sum(lc1.counts))
self.nphots2 = np.float64(np.sum(lc2.counts))
self.err_dist = 'poisson'
if lc1.err_dist == 'poisson':
self.var1 = lc1.meancounts
else:
self.var1 = np.mean(lc1.counts_err) ** 2
self.err_dist = 'gauss'
if lc2.err_dist == 'poisson':
self.var2 = lc2.meancounts
else:
self.var2 = np.mean(lc2.counts_err) ** 2
self.err_dist = 'gauss'
if lc1.n != lc2.n:
raise StingrayError("Light curves do not have same number "
"of time bins per segment.")
# If dt differs slightly, its propagated error must not be more than
# 1/100th of the bin
if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise StingrayError("Light curves do not have same time binning "
"dt.")
# In case a small difference exists, ignore it
lc1.dt = lc2.dt
self.dt = lc1.dt
self.n = lc1.n
# the frequency resolution
self.df = 1.0 / lc1.tseg
# the number of averaged periodograms in the final output
# This should *always* be 1 here
self.m = 1
# make the actual Fourier transform and compute cross spectrum
self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2, fullspec)
# If co-spectrum is desired, normalize here. Otherwise, get raw back
# with the imaginary part still intact.
self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
if lc1.err_dist.lower() != lc2.err_dist.lower():
simon("Your lightcurves have different statistics."
"The errors in the Crossspectrum will be incorrect.")
elif lc1.err_dist.lower() != "poisson":
simon("Looks like your lightcurve statistic is not poisson."
"The errors in the Powerspectrum will be incorrect.")
if self.__class__.__name__ in ['Powerspectrum',
'AveragedPowerspectrum']:
self.power_err = self.power / np.sqrt(self.m)
elif self.__class__.__name__ in ['Crossspectrum',
'AveragedCrossspectrum']:
# This is clearly a wild approximation.
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any inconsistencies.")
unnorm_power_err = np.sqrt(2) / np.sqrt(self.m) # Leahy-like
unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))
unnorm_power_err += np.zeros_like(self.power)
self.power_err = \
self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)
else:
self.power_err = np.zeros(len(self.power))
def _fourier_cross(self, lc1, lc2, fullspec=False):
"""
Fourier transform the two light curves, then compute the cross spectrum.
Computed as CS = lc1 x lc2* (where lc2 is the one that gets
complex-conjugated). The user has the option to either get just the
positive frequencies or the full spectrum.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
One light curve to be Fourier transformed. Ths is the band of
interest or channel of interest.
lc2: :class:`stingray.Lightcurve` object
Another light curve to be Fourier transformed.
This is the reference band.
fullspec: boolean. Default is False.
If True, return the whole array of frequencies, or only positive frequencies (False).
Returns
-------
fr: numpy.ndarray
The squared absolute value of the Fourier amplitudes
"""
fourier_1 = fft(lc1.counts) # do Fourier transform 1
fourier_2 = fft(lc2.counts) # do Fourier transform 2
freqs = scipy.fft.fftfreq(lc1.n, lc1.dt)
cross = np.multiply(fourier_1, np.conj(fourier_2))
if fullspec is True:
return freqs, cross
else:
return freqs[freqs > 0], cross[freqs > 0]
def rebin(self, df=None, f=None, method="mean"):
"""
Rebin the cross spectrum to a new frequency resolution ``df``.
Parameters
----------
df: float
The new frequency resolution
Other Parameters
----------------
f: float
the rebin factor. If specified, it substitutes df with ``f*self.df``
Returns
-------
bin_cs = :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that called this method. For example, if this method is called
from :class:`AveragedPowerspectrum`, it will return an object of class
:class:`AveragedPowerspectrum`, too.
"""
if f is None and df is None:
raise ValueError('You need to specify at least one between f and '
'df')
elif f is not None:
df = f * self.df
# rebin cross spectrum to new resolution
binfreq, bincs, binerr, step_size = \
rebin_data(self.freq, self.power, df, self.power_err,
method=method, dx=self.df)
# make an empty cross spectrum object
# note: syntax deliberate to work with subclass Powerspectrum
bin_cs = copy.copy(self)
# store the binned periodogram in the new object
bin_cs.freq = binfreq
bin_cs.power = bincs
bin_cs.df = df
bin_cs.n = self.n
bin_cs.norm = self.norm
bin_cs.nphots1 = self.nphots1
bin_cs.power_err = binerr
if hasattr(self, 'unnorm_power'):
_, binpower_unnorm, _, _ = \
rebin_data(self.freq, self.unnorm_power, df,
method=method, dx=self.df)
bin_cs.unnorm_power = binpower_unnorm
if hasattr(self, 'cs_all'):
cs_all = []
for c in self.cs_all:
cs_all.append(c.rebin(df=df, f=f, method=method))
bin_cs.cs_all = cs_all
if hasattr(self, 'pds1'):
bin_cs.pds1 = self.pds1.rebin(df=df, f=f, method=method)
if hasattr(self, 'pds2'):
bin_cs.pds2 = self.pds2.rebin(df=df, f=f, method=method)
try:
bin_cs.nphots2 = self.nphots2
except AttributeError:
if self.type == 'powerspectrum':
pass
else:
raise AttributeError(
'Spectrum has no attribute named nphots2.')
bin_cs.m = np.rint(step_size * self.m)
return bin_cs
def _normalize_crossspectrum(self, unnorm_power, tseg):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
if self.err_dist == 'poisson':
return normalize_crossspectrum(
unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm,
self.power_type)
return normalize_crossspectrum_gauss(
unnorm_power, np.sqrt(self.meancounts1 * self.meancounts2),
np.sqrt(self.var1 * self.var2),
dt=self.dt,
N=self.n,
norm=self.norm,
power_type=self.power_type)
def rebin_log(self, f=0.01):
"""
Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency
modified by a factor f:
.. math::
d\\nu_j = d\\nu_{j-1} (1+f)
Parameters
----------
f: float, optional, default ``0.01``
parameter that steers the frequency resolution
Returns
-------
new_spec : :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that called this method. For example, if this method is called
from :class:`AveragedPowerspectrum`, it will return an object of class
"""
binfreq, binpower, binpower_err, nsamples = \
rebin_data_log(self.freq, self.power, f,
y_err=self.power_err, dx=self.df)
# the frequency resolution
df = np.diff(binfreq)
# shift the lower bin edges to the middle of the bin and drop the
# last right bin edge
binfreq = binfreq[:-1] + df / 2
new_spec = copy.copy(self)
new_spec.freq = binfreq
new_spec.power = binpower
new_spec.power_err = binpower_err
new_spec.m = nsamples * self.m
if hasattr(self, 'unnorm_power'):
_, binpower_unnorm, _, _ = \
rebin_data_log(self.freq, self.unnorm_power, f, dx=self.df)
new_spec.unnorm_power = binpower_unnorm
if hasattr(self, 'pds1'):
new_spec.pds1 = self.pds1.rebin_log(f)
if hasattr(self, 'pds2'):
new_spec.pds2 = self.pds2.rebin_log(f)
if hasattr(self, 'cs_all'):
cs_all = []
for c in self.cs_all:
cs_all.append(c.rebin_log(f))
new_spec.cs_all = cs_all
return new_spec
def coherence(self):
""" Compute Coherence function of the cross spectrum.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Returns
-------
coh : numpy.ndarray
Coherence function
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
return self.unnorm_power.real / (self.pds1.power.real *
self.pds2.power.real)
def _phase_lag(self):
"""Return the fourier phase lag of the cross spectrum."""
return np.angle(self.unnorm_power)
def time_lag(self):
"""
Calculate the fourier time lag of the cross spectrum. The time lag is
calculate using the center of the frequency bins.
"""
if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:
ph_lag = self._phase_lag()
return ph_lag / (2 * np.pi * self.freq)
else:
raise AttributeError("Object has no attribute named 'time_lag' !")
def plot(self, labels=None, axis=None, title=None, marker='-', save=False,
filename=None):
"""
Plot the amplitude of the cross spectrum vs. the frequency using ``matplotlib``.
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of the ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for the``matplotlib.pyplot.axis()`` method.
title : str, default ``None``
The title of the plot.
marker : str, default '-'
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional, default ``False``
If ``True``, save the figure with specified filename.
filename : str
File name of the image to save. Depends on the boolean ``save``.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
plt.figure('crossspectrum')
plt.plot(self.freq,
np.abs(self.power),
marker,
color='b',
label='Amplitude')
plt.plot(self.freq,
np.abs(self.power.real),
marker,
color='r',
alpha=0.5,
label='Real Part')
plt.plot(self.freq,
np.abs(self.power.imag),
marker,
color='g',
alpha=0.5,
label='Imaginary Part')
if labels is not None:
try:
plt.xlabel(labels[0])
plt.ylabel(labels[1])
except TypeError:
simon("``labels`` must be either a list or tuple with "
"x and y labels.")
raise
except IndexError:
simon("``labels`` must have two labels for x and y "
"axes.")
# Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
plt.legend(loc='best')
if axis is not None:
plt.axis(axis)
if title is not None:
plt.title(title)
if save:
if filename is None:
plt.savefig('spec.png')
else:
plt.savefig(filename)
else:
plt.show(block=False)
def classical_significances(self, threshold=1, trial_correction=False):
"""
Compute the classical significances for the powers in the power
spectrum, assuming an underlying noise distribution that follows a
chi-square distributions with 2M degrees of freedom, where M is the
number of powers averaged in each bin.
Note that this function will *only* produce correct results when the
following underlying assumptions are fulfilled:
1. The power spectrum is Leahy-normalized
2. There is no source of variability in the data other than the
periodic signal to be determined with this method. This is important!
If there are other sources of (aperiodic) variability in the data, this
method will *not* produce correct results, but instead produce a large
number of spurious false positive detections!
3. There are no significant instrumental effects changing the
statistical distribution of the powers (e.g. pile-up or dead time)
By default, the method produces ``(index,p-values)`` for all powers in
the power spectrum, where index is the numerical index of the power in
question. If a ``threshold`` is set, then only powers with p-values
*below* that threshold with their respective indices. If
``trial_correction`` is set to ``True``, then the threshold will be corrected
for the number of trials (frequencies) in the power spectrum before
being used.
Parameters
----------
threshold : float, optional, default ``1``
The threshold to be used when reporting p-values of potentially
significant powers. Must be between 0 and 1.
Default is ``1`` (all p-values will be reported).
trial_correction : bool, optional, default ``False``
A Boolean flag that sets whether the ``threshold`` will be corrected
by the number of frequencies before being applied. This decreases
the ``threshold`` (p-values need to be lower to count as significant).
Default is ``False`` (report all powers) though for any application
where `threshold`` is set to something meaningful, this should also
be applied!
Returns
-------
pvals : iterable
A list of ``(index, p-value)`` tuples for all powers that have p-values
lower than the threshold specified in ``threshold``.
"""
if not self.norm == "leahy":
raise ValueError("This method only works on "
"Leahy-normalized power spectra!")
if np.size(self.m) == 1:
# calculate p-values for all powers
# leave out zeroth power since it just encodes the number of photons!
pv = np.array([cospectra_pvalue(power, self.m)
for power in self.power])
else:
pv = np.array([cospectra_pvalue(power, m)
for power, m in zip(self.power, self.m)])
# if trial correction is used, then correct the threshold for
# the number of powers in the power spectrum
if trial_correction:
threshold /= self.power.shape[0]
# need to add 1 to the indices to make up for the fact that
# we left out the first power above!
indices = np.where(pv < threshold)[0]
pvals = np.vstack([pv[indices], indices])
return pvals
class AveragedCrossspectrum(Crossspectrum):
"""
Make an averaged cross spectrum from a light curve by segmenting two
light curves, Fourier-transforming each segment and then averaging the
resulting cross spectra.
Parameters
----------
data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A light curve from which to compute the cross spectrum. In some cases, this would
be the light curve of the wavelength/energy/frequency band of interest.
data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A second light curve to use in the cross spectrum. In some cases, this would be
the wavelength/energy/frequency reference band to compare the band of interest with.
segment_size: float
The size of each segment to average. Note that if the total
duration of each :class:`Lightcurve` object in ``lc1`` or ``lc2`` is not an
integer multiple of the ``segment_size``, then any fraction left-over
at the end of the time series will be lost. Otherwise you introduce
artifacts.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
dt : float
The time resolution of the light curve. Only needed when constructing
light curves in the case where data1 or data2 are of :class:EventList
power_type: string, optional, default ``real``
Parameter to choose among complete, real part and magnitude of
the cross spectrum.
silent : bool, default False
Do not show a progress bar when generating an averaged cross spectrum.
Useful for the batch execution of many spectra
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects allowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects allowed
fullspec: boolean, optional, default ``False``
If True, return the full array of frequencies, otherwise return just the
positive frequencies.
large_data : bool, default False
Use only for data larger than 10**7 data points!! Uses zarr and dask for computation.
save_all : bool, default False
Save all intermediate PDSs used for the final average. Use with care.
This is likely to fill up your RAM on medium-sized datasets, and to
slow down the computation when rebinning.
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging powerspectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross spectra
n: int
The number of time bins per segment of light curve
nphots1: float
The total number of photons in the first (interest) light curve
nphots2: float
The total number of photons in the second (reference) light curve
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
They are calculated by taking the common GTI between the
two light curves
"""
def __init__(self, data1=None, data2=None, segment_size=None, norm='none',
gti=None, power_type="real", silent=False, lc1=None, lc2=None,
dt=None, fullspec=False, large_data=False, save_all=False):
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if segment_size is None and data1 is not None:
raise ValueError("segment_size must be specified")
if segment_size is not None and not np.isfinite(segment_size):
raise ValueError("segment_size must be finite!")
if large_data and data1 is not None and data2 is not None:
if isinstance(data1, EventList):
input_data = 'EventList'
elif isinstance(data1, Lightcurve):
input_data = 'Lightcurve'
chunks = int(np.rint(segment_size // data1.dt))
segment_size = chunks * data1.dt
else:
raise ValueError(
f'Invalid input data type: {type(data1).__name__}')
dir_path1 = saveData(data1, persist=False, chunks=chunks)
dir_path2 = saveData(data2, persist=False, chunks=chunks)
data_path1 = genDataPath(dir_path1)
data_path2 = genDataPath(dir_path2)
spec = createChunkedSpectra(input_data,
'AveragedCrossspectrum',
data_path=list(data_path1 +
data_path2),
segment_size=segment_size,
norm=norm,
gti=gti,
power_type=power_type,
silent=silent,
dt=dt)
for key, val in spec.__dict__.items():
setattr(self, key, val)
return
self.type = "crossspectrum"
self.segment_size = segment_size
self.power_type = power_type
self.fullspec = fullspec
self.show_progress = not silent
self.dt = dt
self.save_all = save_all
if isinstance(data1, EventList):
lengths = data1.gti[:, 1] - data1.gti[:, 0]
good = lengths >= segment_size
data1.gti = data1.gti[good]
data1 = list(data1.to_lc_list(dt))
if isinstance(data2, EventList):
lengths = data2.gti[:, 1] - data2.gti[:, 0]
good = lengths >= segment_size
data2.gti = data2.gti[good]
data2 = list(data2.to_lc_list(dt))
Crossspectrum.__init__(self, data1, data2, norm, gti=gti,
power_type=power_type, dt=dt, fullspec=fullspec)
return
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
is_event = isinstance(lc1, EventList)
is_lc = isinstance(lc1, Lightcurve)
is_lc_iter = isinstance(lc1, Iterator)
is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter
# A way to say that this is actually not a power spectrum
if self.type != "powerspectrum" and \
(lc1 is not lc2) and (is_event or is_lc or is_lc_list):
self.pds1 = AveragedCrossspectrum(lc1, lc1,
segment_size=self.segment_size,
norm='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, fullspec=self.fullspec,
save_all=self.save_all)
self.pds2 = AveragedCrossspectrum(lc2, lc2,
segment_size=self.segment_size,
norm='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, fullspec=self.fullspec,
save_all=self.save_all)
def _make_segment_spectrum(self, lc1, lc2, segment_size, silent=False):
"""
Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for
each.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
segment_size : ``numpy.float``
Size of each light curve segment to use for averaging.
Other parameters
----------------
silent : bool, default False
Suppress progress bars
Returns
-------
cs_all : list of :class:`Crossspectrum`` objects
A list of cross spectra calculated independently from each light curve segment
nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``
Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.
"""
assert isinstance(lc1, Lightcurve)
assert isinstance(lc2, Lightcurve)
if lc1.tseg != lc2.tseg:
simon("Lightcurves do not have same tseg. This means that the data"
"from the two channels are not completely in sync. This "
"might or might not be an issue. Keep an eye on it.")
# If dt differs slightly, its propagated error must not be more than
# 1/100th of the bin
if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise ValueError("Light curves do not have same time binning dt.")
# In case a small difference exists, ignore it
lc1.dt = lc2.dt
current_gtis = cross_two_gtis(lc1.gti, lc2.gti)
lc1.gti = lc2.gti = current_gtis
lc1.apply_gtis()
lc2.apply_gtis()
if self.gti is None:
self.gti = current_gtis
else:
if not np.allclose(self.gti, current_gtis):
self.gti = np.vstack([self.gti, current_gtis])
check_gtis(current_gtis)
cs_all = []
nphots1_all = []
nphots2_all = []
start_inds, end_inds = \
bin_intervals_from_gtis(current_gtis, segment_size, lc1.time,
dt=lc1.dt)
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any inconsistencies.")
local_show_progress = show_progress
if not self.show_progress or silent:
local_show_progress = lambda a: a
for start_ind, end_ind in \
local_show_progress(zip(start_inds, end_inds)):
time_1 = copy.deepcopy(lc1.time[start_ind:end_ind])
counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind])
counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind])
time_2 = copy.deepcopy(lc2.time[start_ind:end_ind])
counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind])
counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind])
if np.sum(counts_1) == 0 or np.sum(counts_2) == 0:
warnings.warn(
"No counts in interval {}--{}s".format(time_1[0],
time_1[-1]))
continue
gti1 = np.array([[time_1[0] - lc1.dt / 2,
time_1[-1] + lc1.dt / 2]])
gti2 = np.array([[time_2[0] - lc2.dt / 2,
time_2[-1] + lc2.dt / 2]])
lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
err_dist=lc1.err_dist,
gti=gti1,
dt=lc1.dt, skip_checks=True)
lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
err_dist=lc2.err_dist,
gti=gti2,
dt=lc2.dt, skip_checks=True)
with warnings.catch_warnings(record=True) as w:
cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm,
power_type=self.power_type, fullspec=self.fullspec)
cs_all.append(cs_seg)
nphots1_all.append(np.sum(lc1_seg.counts))
nphots2_all.append( | np.sum(lc2_seg.counts) | numpy.sum |
from __future__ import print_function
import emcee
from multiprocessing import Pool
import numpy as np
import corner
import matplotlib.pyplot as plt
import sys
import scipy.optimize as op
from rbvfit.rb_vfit import rb_veldiff as rb_veldiff
from rbvfit import rb_setline as rb
import pdb
def plot_model(wave_obs,fnorm,enorm,fit,model,outfile= False,xlim=[-600.,600.],verbose=False):
#This model only works if there are no nuissance paramteres
theta_prime=fit.best_theta
value1=fit.low_theta
value2=fit.high_theta
n_clump=model.nclump
n_clump_total=np.int(len(theta_prime)/3)
ntransition=model.ntransition
zabs=model.zabs
samples=fit.samples
model_mcmc=fit.model
wave_list=np.zeros( len(model.lambda_rest_original),)
# Use the input lambda rest list to plot correctly
for i in range(0,len(wave_list)):
s=rb.rb_setline(model.lambda_rest_original[i],'closest')
wave_list[i]=s['wave']
wave_rest=wave_obs/(1+zabs[0])
best_N = theta_prime[0:n_clump_total]
best_b = theta_prime[n_clump_total:2 * n_clump_total]
best_v = theta_prime[2 * n_clump_total:3 * n_clump_total]
low_N = value1[0:n_clump_total]
low_b = value1[n_clump_total:2 * n_clump_total]
low_v = value1[2 * n_clump_total:3 * n_clump_total]
high_N = value2[0:n_clump_total]
high_b = value2[n_clump_total:2 * n_clump_total]
high_v = value2[2 * n_clump_total:3 * n_clump_total]
#Now extracting individual fitted components
best_fit, f1 = model.model_fit(theta_prime, wave_obs)
fig, axs = plt.subplots(ntransition, sharex=True, sharey=False,figsize=(12,18 ),gridspec_kw={'hspace': 0})
BIGGER_SIZE = 18
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
index = np.random.randint(0, high=len(samples), size=100)
if ntransition == 1:
#When there are no nuissance parameter
#Now loop through each transition and plot them in velocity space
vel=rb_veldiff(wave_list[0],wave_rest)
axs.step(vel, fnorm, 'k-', linewidth=1.)
axs.step(vel, enorm, color='r', linewidth=1.)
# Plotting a random sample of outputs extracted from posterior dis
for ind in range(len(index)):
axs.plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs.set_ylim([0, 1.6])
axs.set_xlim(xlim)
axs.plot(vel, best_fit, color='b', linewidth=3)
axs.plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs.plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs.plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs.text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + | np.str('%.0f' % (high_b[iclump]-best_b[iclump])) | numpy.str |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 08:54:30 2020
@author: yuhanyao
"""
from copy import deepcopy
from helper import phys
import collections
import numpy as np
from helper.specread import gaplinelist
from helper.mcmcfit import mylinear_fit
from lmfit.models import LinearModel
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
def get_vvyy(dt4, wv, binning = 1):
v4 = (dt4['wave_rest'] - wv)/wv * phys.c /1e+5
y4 = dt4['spec_obs0']
if binning != 1:
yy6 = deepcopy(y4)
vv6 = deepcopy(v4)
rest = len(yy6)%binning
if rest!=0:
vv6 = vv6[:(-1)*rest]
yy6 = yy6[:(-1)*rest]
nnew = int(len(yy6) / binning)
yy6_new = yy6.reshape(nnew, binning)
yy6_new = np.sum(yy6_new, axis=1)
y4 = yy6_new / binning
vv6_new = vv6.reshape(nnew, binning)
vv6_new = np.sum(vv6_new, axis=1)
v4 = vv6_new / binning
yy4 = np.repeat(y4, 2, axis=0)
v4diff = np.diff(v4)
v4diff_left = np.hstack([v4diff[0], v4diff])
v4diff_right = np.hstack([v4diff, v4diff[-1]])
vv4 = np.repeat(v4, 2, axis=0)
vv4[::2] -= v4diff_left/2
vv4[1::2] += v4diff_right/2
return vv4, yy4
def add_tick(ax, wv, NIII_list, t1, t2):
if type(NIII_list) != np.float64:
vs = np.zeros(len(NIII_list))
for i in range(len(NIII_list)):
wvnew = NIII_list[i]
v = (wvnew - wv)/wv * phys.c /1e+5
vs[i] = v
ax.plot([v,v], [t1, t2], 'k-', linewidth = 0.8, color = "k")
#ax.plot([min(vs), max(vs)], [t2, t2], 'k-', linewidth = 0.8, color = "k")
else:
wvnew = NIII_list
v = (wvnew - wv)/wv * phys.c /1e+5
ax.plot([v,v], [t1, t2], 'k-', linewidth = 0.8, color = "k")
def plot_mask_gal_lines(ax2, wave, flux, plotfinal = False, returnfinal=False,
finalcolor= "k"):
yaolist = gaplinelist(z=0)
H_list = yaolist['H_list']
HeI_list = yaolist['HeI_list']
OIII_list = yaolist['OIII_list']
OII_list = yaolist['OII_list']
OI_list = yaolist['OI_list']
SIII_list = yaolist['SIII_list']
SII_list = yaolist['SII_list']
NII_list = yaolist['NII_list']
CaII_list = yaolist['CaII_list']
NeIII_list = yaolist['NeIII_list']
ArIII_list = yaolist["ArIII_list"]
color = "mistyrose"
ix_retain = np.ones(len(wave), dtype=bool)
# ok
for i in range(len(H_list)):
wv = H_list[i]
if i<3:
ix = abs(wave - wv) < 10
else:
ix = abs(wave - wv) < 12
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for wv in OIII_list[1:]:
ix = abs(wave - wv) < 14
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for wv in NII_list:
ix = abs(wave - wv) < 10
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for wv in ArIII_list:
ix = abs(wave - wv) < 10
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for i in range(len(SII_list)):
wv = SII_list[i]
if i==0:
ix = abs(wave - wv) < 14
else:
ix = abs(wave - wv) < 10
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for wv in NeIII_list:
ix = abs(wave - wv) < 10
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for wv in OII_list:
ix = abs(wave - wv) < 12
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
for i in range(len(SIII_list)):
wv = SIII_list[i]
if i==0:
ix = abs(wave - wv) < 6
else:
ix = abs(wave - wv) < 16
ix_retain = ix_retain & (~ix)
ax2.plot(wave[ix], flux[ix], color=color, zorder=5)
wave = wave[ix_retain]
flux = flux[ix_retain]
if plotfinal==True:
ax2.plot(wave, flux, color=finalcolor, zorder=6, linewidth = 0.9)
if returnfinal==True:
return wave, flux
def gaus(x, a, A, x0, sigma):
return a + A * np.exp(-(x-x0)**2/(2*sigma**2))
def parabola(x, a, A, x0):
return a + A * (x-x0)**2
###### measure absorption minumum velocity
def measure_abs_velocity(wave,
flux,
line_info = None,
sigma_guess = 2000,
line_center = -6500,
line_bound_width = 1000,
plotfig=False):
if line_info == None:
# He I 5875
line_info = {'line_shoulder_left': (-12600, -9800),
'line_shoulder_right': (-1300, 1800),
'line_fit': (-8000, -3500)}
line_shoulder_left = line_info['line_shoulder_left']
line_shoulder_right = line_info['line_shoulder_right']
line_range = (line_shoulder_left[1], line_shoulder_right[0])
line_fit = line_info["line_fit"]
ind_shoulder = np.any([
np.all([wave > line_shoulder_left[0],
wave < line_shoulder_left[1]], axis=0),
np.all([wave > line_shoulder_right[0],
wave < line_shoulder_right[1]], axis=0)], axis=0)
wave_shoulder = wave[ind_shoulder]
flux_shoulder = flux[ind_shoulder]
ind_range = np.logical_and(wave > line_range[0], wave < line_range[1])
wave_range = wave[ind_range]
flux_range = flux[ind_range]
ind_fit = np.logical_and(wave > line_fit[0], wave < line_fit[1])
wave_fit = wave[ind_fit]
flux_fit = flux[ind_fit]
mod_linear = LinearModel(prefix='mod_linear_')
par_linear = mod_linear.guess(flux_shoulder, x=wave_shoulder)
out_linear = mod_linear.fit(flux_shoulder,
par_linear,
x=wave_shoulder,
method='leastsq')
cont_shoulder = out_linear.best_fit
noise_std = np.std(flux_shoulder / cont_shoulder)
cont_range = mod_linear.eval(out_linear.params, x=wave_range)
cont_fit = mod_linear.eval(out_linear.params, x=wave_fit)
norm_fit = (flux_fit / cont_fit-1.)*(-1)
a_fixed = 0.
a_width = 0.05
A_guess = max(norm_fit) - a_fixed
bounds = ((a_fixed-a_width, 0.2*A_guess, line_center-line_bound_width*2, sigma_guess/5),
(a_fixed+a_width, 5*A_guess, line_center+line_bound_width*2, sigma_guess*5))
popt1, pcov1 = curve_fit(gaus, wave_fit, norm_fit,
p0=[a_fixed, A_guess, line_center, sigma_guess],
bounds=bounds)
print ("line width = %.2f +- %.2f km/s"%(popt1[-1], np.sqrt(pcov1[-1,-1])))
print ("line center = %.2f +- %.2f km/s"%(popt1[2], np.sqrt(pcov1[2,2])))
line_center = popt1[2]
new_width = popt1[-1] * 4 # four times the sigma
wvnew = np.linspace(line_center-new_width, line_center+new_width, 300)
flnew = gaus(wvnew, *popt1)
if plotfig == True:
plt.figure(figsize = (6,6))
ax1 = plt.subplot(211)
ax1.plot(wave_shoulder, flux_shoulder, 'b-')
ax1.plot(wave_range, cont_range, 'g-')
ax1.plot(wave_range, flux_range, 'r-', alpha = 0.2)
ax1.plot(wave_fit, flux_fit, 'r-')
ax2 = plt.subplot(212)
ax2.plot(wave_fit, norm_fit, 'k-')
ax2.plot(wvnew, flnew)
a_fixed = min(flux_fit)
A_guess = (max(flux_fit) - min(flux_fit)) / 2000**2
bounds = ((a_fixed-a_width, 0.2*A_guess, line_center-line_bound_width*2),
(a_fixed+a_width, 5*A_guess, line_center+line_bound_width*2))
popt1, pcov1 = curve_fit(parabola, wave_fit, flux_fit,
p0=[a_fixed, A_guess, line_center],
bounds=bounds)
print ("line center = %.2f +- %.2f km/s"%(popt1[2], np.sqrt(pcov1[2,2])))
line_center = popt1[2]
###### measure the equivalent width
def measure_line_index(wave,
flux,
flux_err =None,
line_info=None,
num_refit_=100,
plotfig=False):
if line_info == None:
# He II 4686
line_info = {'line_range': (4666, 4706),
'line_shoulder_left': (4545, 4620),
'line_shoulder_right': (4726, 4800)}
try:
# 0. do some input check
# 0.1> check line_info
line_info_keys = line_info.keys()
assert 'line_range' in line_info_keys
assert 'line_shoulder_left' in line_info_keys
assert 'line_shoulder_right' in line_info_keys
# 0.2> check line range/shoulder in spectral range
assert np.min(wave) <= line_info['line_shoulder_left'][0]
assert np.max(wave) >= line_info['line_shoulder_right'][0]
# 1. get line information
# line_center = line_info['line_center'] # not used
line_range = line_info['line_range']
line_shoulder_left = line_info['line_shoulder_left']
line_shoulder_right = line_info['line_shoulder_right']
# 2. data preparation
wave = np.array(wave)
flux = np.array(flux)
if flux_err == None:
flux_err = np.ones(wave.shape)
# 3. estimate the local continuum
# 3.1> shoulder wavelength range
ind_shoulder = np.any([
np.all([wave > line_shoulder_left[0],
wave < line_shoulder_left[1]], axis=0),
np.all([wave > line_shoulder_right[0],
wave < line_shoulder_right[1]], axis=0)], axis=0)
wave_shoulder = wave[ind_shoulder]
flux_shoulder = flux[ind_shoulder]
flux_err_shoulder = flux_err[ind_shoulder]
# 3.2> integrated/fitted wavelength range
ind_range = np.logical_and(wave > line_range[0], wave < line_range[1])
wave_range = wave[ind_range]
flux_range = flux[ind_range]
# flux_err_range = flux_err[ind_range] # not used
# mask_shoulder = mask[ind_shoulder] # not used
# 4. linear model
mod_linear = LinearModel(prefix='mod_linear_')
par_linear = mod_linear.guess(flux_shoulder, x=wave_shoulder)
# ############################################# #
# to see the parameter names: #
# model_linear.param_names #
# {'linear_fun_intercept', 'linear_fun_slope'} #
# ############################################# #
out_linear = mod_linear.fit(flux_shoulder,
par_linear,
x=wave_shoulder,
method='leastsq')
# 5. estimate continuum
cont_shoulder = out_linear.best_fit
noise_std = np.std(flux_shoulder / cont_shoulder)
cont_range = mod_linear.eval(out_linear.params, x=wave_range)
resi_range = 1 - flux_range / cont_range
if plotfig==True:
plt.figure(figsize = (6,4))
ix = (wave > line_shoulder_left[0])&((wave < line_shoulder_right[1]))
plt.plot(wave[ix], flux[ix], color="k", alpha=0.1)
plt.plot(wave_shoulder, flux_shoulder, 'b-')
plt.plot(wave_range, cont_range, 'g-')
plt.plot(wave_range, flux_range, 'r-')
# 6.1 Integrated EW (
# estimate EW_int
wave_diff = np.diff(wave_range)
wave_step = np.mean(np.vstack([np.hstack([wave_diff[0], wave_diff]),
np.hstack([wave_diff, wave_diff[-1]])]),
axis=0)
EW_int = np.dot(resi_range, wave_step)
# estimate EW_int_err
if num_refit_ is not None and num_refit_>0:
EW_int_err = np.std(np.dot(
(resi_range.reshape(1, -1).repeat(num_refit_, axis=0) +
| np.random.randn(num_refit_, resi_range.size) | numpy.random.randn |
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import warnings
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass
from paddle.fluid.dygraph.container import Sequential
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU
from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D
from paddle.fluid.dygraph.nn import Pool2D
from paddle.fluid.log_helper import get_logger
from paddle.fluid.dygraph import nn
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def get_vaild_warning_num(warning, w):
num = 0
for i in range(len(w)):
if warning in str(w[i].message):
num += 1
return num
def StaticLenet(data, num_classes=10, classifier_activation='softmax'):
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
conv1 = fluid.layers.conv2d(
data,
num_filters=6,
filter_size=3,
stride=1,
padding=1,
param_attr=conv2d_w1_attr,
bias_attr=False)
batch_norm1 = layers.batch_norm(conv1)
relu1 = layers.relu(batch_norm1)
pool1 = fluid.layers.pool2d(
relu1, pool_size=2, pool_type='max', pool_stride=2)
conv2 = fluid.layers.conv2d(
pool1,
num_filters=16,
filter_size=5,
stride=1,
padding=0,
param_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr)
batch_norm2 = layers.batch_norm(conv2)
prelu1 = layers.prelu(batch_norm2, mode='all')
pool2 = fluid.layers.pool2d(
prelu1, pool_size=2, pool_type='max', pool_stride=2)
fc1 = fluid.layers.fc(input=pool2,
size=120,
param_attr=fc_w1_attr,
bias_attr=fc_b1_attr)
leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01)
fc2 = fluid.layers.fc(input=leaky_relu1,
size=84,
param_attr=fc_w2_attr,
bias_attr=fc_b2_attr)
sigmoid1 = layers.sigmoid(fc2)
fc3 = fluid.layers.fc(input=sigmoid1,
size=num_classes,
param_attr=fc_w3_attr,
bias_attr=fc_b3_attr)
softmax1 = layers.softmax(fc3, use_cudnn=True)
return softmax1
class ImperativeLenet(fluid.dygraph.Layer):
def __init__(self, num_classes=10):
super(ImperativeLenet, self).__init__()
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
self.features = Sequential(
Conv2D(
in_channels=1,
out_channels=6,
kernel_size=3,
stride=1,
padding=1,
weight_attr=conv2d_w1_attr,
bias_attr=False),
BatchNorm2D(6),
ReLU(),
Pool2D(
pool_size=2, pool_type='max', pool_stride=2),
Conv2D(
in_channels=6,
out_channels=16,
kernel_size=5,
stride=1,
padding=0,
weight_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr),
BatchNorm2D(16),
PReLU(),
MaxPool2D(
kernel_size=2, stride=2))
self.fc = Sequential(
Linear(
in_features=400,
out_features=120,
weight_attr=fc_w1_attr,
bias_attr=fc_b1_attr),
LeakyReLU(),
Linear(
in_features=120,
out_features=84,
weight_attr=fc_w2_attr,
bias_attr=fc_b2_attr),
Sigmoid(),
Linear(
in_features=84,
out_features=num_classes,
weight_attr=fc_w3_attr,
bias_attr=fc_b3_attr),
Softmax())
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
class TestImperativeOutSclae(unittest.TestCase):
def test_out_scale_acc(self):
def _build_static_lenet(main, startup, is_test=False, seed=1000):
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
main.random_seed = seed
startup.random_seed = seed
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
prediction = StaticLenet(img)
if not is_test:
loss = fluid.layers.cross_entropy(
input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
else:
avg_loss = prediction
return img, label, avg_loss
reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=32, drop_last=True)
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
param_init_map = {}
seed = 1000
lr = 0.001
dynamic_out_scale_list = []
static_out_scale_list = []
# imperative train
_logger.info(
"--------------------------dynamic graph qat--------------------------"
)
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
np.random.seed(seed)
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
lenet = ImperativeLenet()
fixed_state = {}
for name, param in lenet.named_parameters():
p_shape = param.numpy().shape
p_value = param.numpy()
if name.endswith("bias"):
value = np.zeros_like(p_value).astype('float32')
else:
value = np.random.normal(
loc=0.0, scale=0.01, size=np.product(p_shape)).reshape(
p_shape).astype('float32')
fixed_state[name] = value
param_init_map[param.name] = value
lenet.set_dict(fixed_state)
imperative_out_scale.quantize(lenet)
adam = AdamOptimizer(
learning_rate=lr, parameter_list=lenet.parameters())
dynamic_loss_rec = []
lenet.train()
for batch_id, data in enumerate(reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
lenet.clear_gradients()
dynamic_loss_rec.append(avg_loss.numpy()[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', avg_loss.numpy()))
lenet.eval()
param_save_path = "test_save_quantized_model/lenet.pdparams"
save_dict = lenet.state_dict()
paddle.save(save_dict, param_save_path)
path = "./dynamic_outscale_infer_model/lenet"
dynamic_save_dir = "./dynamic_outscale_infer_model"
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
_logger.info(
"--------------------------static graph qat--------------------------"
)
static_loss_rec = []
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
infer = fluid.Program()
startup = fluid.Program()
static_img, static_label, static_loss = _build_static_lenet(
main, startup, False, seed)
infer_img, _, infer_pre = _build_static_lenet(infer, startup, True,
seed)
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
opt = AdamOptimizer(learning_rate=lr)
opt.minimize(static_loss)
scope = core.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
for param in main.all_parameters():
if "batch_norm" in param.name:
param_name = param.name.replace("norm", "norm2d")
elif 'prelu' in param.name:
param_name = param.name.replace("prelu", 'p_re_lu')
else:
param_name = param.name
param_tensor = scope.var(param.name).get_tensor()
param_tensor.set(param_init_map[param_name], place)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
infer_graph = IrGraph(core.Graph(infer.desc), for_test=True)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'])
transform_pass.apply(main_graph)
transform_pass.apply(infer_graph)
outscale_pass = OutScaleForTrainingPass(scope=scope, place=place)
outscale_pass.apply(main_graph)
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=static_loss.name, build_strategy=build_strategy)
feeder = fluid.DataFeeder(
feed_list=[static_img, static_label], place=place)
with fluid.scope_guard(scope):
for batch_id, data in enumerate(reader()):
loss_v, = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[static_loss])
static_loss_rec.append(loss_v[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', loss_v))
scale_inference_pass = OutScaleForInferencePass(scope=scope)
scale_inference_pass.apply(infer_graph)
save_program = infer_graph.to_program()
static_save_dir = "./static_outscale_infer_model"
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
dirname=static_save_dir,
feeded_var_names=[infer_img.name],
target_vars=[infer_pre],
executor=exe,
main_program=save_program,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX)
rtol = 1e-05
atol = 1e-08
for i, (loss_d,
loss_s) in enumerate(zip(dynamic_loss_rec, static_loss_rec)):
diff = np.abs(loss_d - loss_s)
if diff > (atol + rtol * np.abs(loss_s)):
_logger.info(
"diff({}) at {}, dynamic loss = {}, static loss = {}".
format(diff, i, loss_d, loss_s))
break
self.assertTrue(
np.allclose(
np.array(dynamic_loss_rec),
| np.array(static_loss_rec) | numpy.array |
# -*- coding: utf-8 -*-
# This program is licenced under an MIT license. Full licence is at the end of
# this file.
"""
Hydrogenic.py
A module for representing hydrogenic electron orbitals.
Angular and radial functions are accessible separately, as well as radius
which encloses 90 percent of the electron density integrating along radial
lines.
Note that to be compatable with scipy.special.sph_harm spherical coordinates
are designated according to the physics/chemistry convention rather than the
mathematics convention. i.e. theta is the angle of the projection on the xy
plane and takes values from 0 to 2*pi. phi is the angle with the z axis and
takes values from 0 to pi.
@author: <NAME>
"""
from __future__ import division
from scipy.special import sph_harm as sh
import numpy as np
from numpy import cos, sin, sqrt, pi, exp
sqrt2 = sqrt(2)
sqrt3 = sqrt(3)
sqrt5 = sqrt(5)
sqrt6 = sqrt(6)
sqrt7 = sqrt(7)
sqrt15 = sqrt(15)
sqrt21 = sqrt(21)
sqrt24 = sqrt(24)
sqrt27 = sqrt(27)
sqrt30 = sqrt(30)
sqrt35 = sqrt(35)
sqrt105 = sqrt(105)
sqrtpi = sqrt(pi)
d_dict = {'z^2': lambda theta, phi: sqrt5/sqrtpi/4*(3*cos(phi)*cos(phi)-1),
'xz': lambda theta, phi: sqrt15/sqrtpi/2*cos(theta)*sin(phi)*cos(phi),
'yz': lambda theta, phi: sqrt15/sqrtpi/2*sin(theta)*sin(phi)*cos(phi),
'x^2-y^2': lambda theta, phi: sqrt15/sqrtpi/4*cos(2*theta)*sin(phi)*sin(phi),
'xy': lambda theta, phi: sqrt15/sqrtpi/4*sin(2*theta)*sin(phi)*sin(phi),
2: lambda theta, phi: sqrt15/sqrt2/sqrtpi/4*exp(2j*theta)*sin(phi)*sin(phi),
1: lambda theta, phi: -sqrt15/sqrt2/sqrtpi/2*exp(1j*theta)*sin(phi)*cos(phi),
0: lambda theta, phi: sqrt5/sqrtpi/4*(3*cos(phi)*cos(phi)-1),
-1: lambda theta, phi: sqrt15/sqrt2/sqrtpi/2*exp(-1j*theta)* | sin(phi) | numpy.sin |
'''
If you find this useful, please give a thumbs up!
Thanks!
- Claire & Alhan
https://github.com/alhankeser/kaggle-petfinder
'''
# External libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
from mlxtend.feature_selection import SequentialFeatureSelector as sfs
from sklearn.metrics import make_scorer
# from sklearn.metrics import accuracy_score
# from sklearn.metrics import confusion_matrix
import scipy.stats as stats
import math
import time
import traceback
import warnings
import os
import zipfile
import shutil
import sys
import json
# Options
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 200)
warnings.filterwarnings(action="ignore")
class Explore:
def get_dtype(cls, include_type=[], exclude_type=[]):
df = cls.get_df('train')
df.drop(columns=[cls.target_col], inplace=True)
return df.select_dtypes(include=include_type, exclude=exclude_type)
def get_non_numeric(cls):
return cls.get_dtype(exclude_type=['float64', 'int', 'float32'])
def get_numeric(cls):
return cls.get_dtype(exclude_type=['object', 'category'])
def get_categorical(cls, as_df=False):
return cls.get_dtype(include_type=['object'])
def get_correlations(cls, method='spearman'):
df = cls.get_df('train')
corr_mat = df.corr(method=method)
corr_mat.sort_values(cls.target_col, inplace=True)
corr_mat.drop(cls.target_col, inplace=True)
return corr_mat[[cls.target_col]]
def get_skewed_features(cls, df, features, skew_threshold=0.4):
feat_skew = pd.DataFrame(
{'skew': df[features].apply(lambda x: stats.skew(x))})
skewed = feat_skew[abs(feat_skew['skew']) > skew_threshold].index
return skewed.values
def show_boxplot(cls, x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
def plot_categorical(cls, df, cols):
target = cls.target_col
categorical = pd.melt(df, id_vars=[target],
value_vars=cols)
grouped = categorical.groupby(['value', 'variable'],
as_index=False)[target]\
.mean().rename(columns={target: target + '_Mean'})
categorical = pd.merge(categorical, grouped, how='left',
on=['variable', 'value'])\
.sort_values(target + '_Mean')
facet_grid = sns.FacetGrid(categorical, col="variable",
col_wrap=3, size=5,
sharex=False, sharey=False,)
facet_grid = facet_grid.map(cls.show_boxplot, "value", target)
plt.savefig('boxplots.png')
class Clean:
def sample_ros(cls, df):
if df.name == 'train':
X = df.drop(cls.target_col, axis=1)
y = df[cls.target_col]
ros = RandomOverSampler(sampling_strategy='minority',
random_state=1)
X_ros, y_ros = ros.fit_sample(X, y)
df = pd.DataFrame(list(X_ros),
columns=df.drop(cls.target_col, axis=1)
.columns)
df[cls.target_col] = list(y_ros)
return df
def sample(cls, df, target_val_sets):
if df.name == 'train':
for target_val_set in target_val_sets:
df_class_0 = df[df[cls.target_col] == target_val_set[0]]
count_1 = df[cls.target_col].value_counts()[target_val_set[1]]
df_class_0_sampled = df_class_0.sample(count_1,
replace='True',
random_state=1)
df = pd.merge(df.drop(df_class_0.index),
df_class_0_sampled, how='outer')
return df
def keep_only_keep(cls, df):
to_drop = set(df.columns.values) - set(cls.keep)
if df.name == 'train':
to_drop = to_drop - set([cls.target_col])
to_drop = list(to_drop)
df.drop(to_drop, axis=1, inplace=True)
return df
def remove_outliers(cls, df):
if df.name == 'train':
# GrLivArea (1299 & 524)
# df.drop(df[(df['GrLivArea'] > 4000) &
# (df[cls.target_col] < 300000)].index,
# inplace=True)
pass
return df
def fill_by_type(cls, x, col):
if pd.isna(x):
if col.dtype == 'object':
return 0
return 0
return x
def fill_na(cls, df):
for col in df.columns:
df[col] = df[col].apply(lambda x: cls.fill_by_type(x, df[col]))
return df
def get_encoding_lookup(cls, cols):
df = cls.get_df('train')
target = cls.target_col
suffix = '_E'
result = pd.DataFrame()
for cat_feat in cols:
cat_feat_target = df[[cat_feat, target]].groupby(cat_feat)
cat_feat_encoded_name = cat_feat + suffix
order = pd.DataFrame()
order['val'] = df[cat_feat].unique()
order.index = order.val
order.drop(columns=['val'], inplace=True)
order[target + '_mean'] = cat_feat_target[[target]].median()
order['feature'] = cat_feat
order['encoded_name'] = cat_feat_encoded_name
order = order.sort_values(target + '_mean')
order['num_val'] = range(1, len(order)+1)
result = result.append(order)
result.reset_index(inplace=True)
return result
def get_scaled_categorical(cls, encoding_lookup):
scaled = encoding_lookup.copy()
target = cls.target_col
for feature in scaled['feature'].unique():
values = scaled[scaled['feature'] == feature]['num_val'].values
medians = scaled[
scaled['feature'] == feature][target + '_mean'].values
for median in medians:
scaled_value = ((values.min() + 1) *
(median / medians.min()))-1
scaled.loc[(scaled['feature'] == feature) &
(scaled[target + '_mean'] == median),
'num_val'] = scaled_value
return scaled
def encode_with_lookup(cls, df, encoding_lookup):
for encoded_index, encoded_row in encoding_lookup.iterrows():
feature = encoded_row['feature']
encoded_name = encoded_row['encoded_name']
value = encoded_row['val']
encoded_value = encoded_row['num_val']
df.loc[df[feature] == value, encoded_name] = encoded_value
return df
def encode_onehot(cls, df, cols):
df = pd.concat([df, pd.get_dummies(df[cols], drop_first=True)], axis=1)
return df
def encode_categorical(cls, df, cols=[], method='one_hot'):
if len(cols) == 0:
cols = cls.get_categorical().columns.values
if method == 'target_mean':
encoding_lookup = cls.get_encoding_lookup(cols)
encoding_lookup = cls.get_scaled_categorical(encoding_lookup)
df = cls.encode_with_lookup(df, encoding_lookup)
if method == 'one_hot':
if len(set(cols) - set(cls.get_dtype(include_type=['object'])
.columns.values)) > 0:
for col in cols:
df[col] = df[col].apply(lambda x: str(x))
df = cls.encode_onehot(df, cols)
df.drop(cols, axis=1, inplace=True)
return df
def fix_zero_infinity(cls, x):
if (x == 0) or math.isinf(x):
return 0
return x
def normalize_features(cls, df, cols=[]):
if len(cols) == 0:
cols = cls.get_numeric().columns.values
for col in cols:
if col in df.columns:
df[col] = df[col].apply(lambda x:
np.log1p(x).astype('float64'))
df[col] = df[col].apply(lambda x: cls.fix_zero_infinity(x))
return df
def scale_quant_features(cls, df, cols):
scaler = StandardScaler()
scaler.fit(df[cols])
scaled = scaler.transform(df[cols])
for i, col in enumerate(cols):
df[col] = scaled[:, i]
return df
def drop_ignore(cls, df):
for col in cls.ignore:
try:
df.drop(col, axis=1, inplace=True)
except Exception:
pass
return df
def drop_low_corr(cls, df, threshold=0.12):
to_drop = pd.DataFrame(columns=['drop'])
corr_mat = cls.get_correlations()
target = cls.target_col
to_drop['drop'] = corr_mat[(abs(corr_mat[target]) <= threshold)].index
df.drop(to_drop['drop'], axis=1, inplace=True)
return df
class Engineer:
def get_image_data(cls, json_path):
image_data = False
if os.path.isfile(json_path):
with open(json_path) as f:
try:
image_data = pd.DataFrame(
json.load(f)['labelAnnotations'])
except Exception:
pass
return image_data
def calculate_photo_scores(cls, df, x, match='exact',
start=1, stop=2):
try:
pet_id = x
pet_type = df[df['PetID'] == pet_id]['Type'].values[0]
pet_type_dict = {1: 'dog', 2: 'cat'}
pet_type = pet_type_dict[pet_type]
scores = []
score = 0
i = start
while (i > 0) & (i < stop):
json_path = path + '/input/train_metadata/'\
+ pet_id + '-' + str(i) + '.json'
image_data = cls.get_image_data(json_path)
try:
if match == 'exact':
scores.append(
image_data[image_data['description'] ==
pet_type]['score'].values[0])
except Exception:
scores.append(.0)
break
i += 1
try:
score = np.array(scores)
except Exception:
pass
except Exception:
print('########## calculate_photo_scores')
print(pet_id)
return score
def rate_first_photo(cls, x):
try:
score = x['AllPhotoScores'][0]
except Exception:
return 'Not Great'
pet_type = x['Type']
if pet_type == 1:
good_threshold = 0.96
if pet_type == 2:
good_threshold = 0.99
if score > good_threshold:
return 'Good'
if (score < good_threshold) & (score > .5):
return 'Okay'
return 'Not Great'
def rate_secondary_good_photos(cls, x):
count = 0
pet_type = x['Type']
scores = x['AllPhotoScores']
if pet_type == 1:
good_threshold = 0.96
if pet_type == 2:
good_threshold = 0.99
try:
scores = scores[1:]
count = len(scores[scores > good_threshold])
except Exception:
pass
if count > 2:
return 'Good'
if count > 0:
return 'Okay'
return 'Not Great'
def get_photo_scores(cls, df):
try:
df['AllPhotoScores'] = df['PetID']\
.apply(lambda x:
cls.calculate_photo_scores(df,
x, match='exact',
start=1, stop=99))
df['FirstPhotoScore'] = df[['Type', 'AllPhotoScores']]\
.apply(lambda x: cls.rate_first_photo(x), axis=1)
df['SecondaryPhotoScore'] = df[['AllPhotoScores', 'Type']]\
.apply(lambda x: cls.rate_secondary_good_photos(x), axis=1)
except Exception:
print('########## get_photo_scores')
print(df.head())
return df
def get_top_rescuers(cls, x, top_rescuers):
if x in top_rescuers:
return x
return False
def rescuer(cls, df):
top_rescuers = list(df['RescuerID'].value_counts().index[:5])
df['Big_Rescuer'] = df['RescuerID']\
.apply(lambda x: cls.get_top_rescuers(x, top_rescuers))
return df
def fee(cls, df):
df.loc[df['Fee'] > 0, 'Has_Fee'] = True
df.loc[df['Fee'] == 0, 'Has_Fee'] = False
return df
def photo(cls, df):
df.loc[df['PhotoAmt'] > 1, 'Has_2Photos'] = True
df.loc[df['PhotoAmt'] < 2, 'Has_2Photos'] = False
# df.loc[df['VideoAmt'] > 0, 'Has_Video'] = True
# df.loc[df['VideoAmt'] == 0, 'Has_Video'] = False
return df
def simplify_name_length(cls, x):
length = len(str(x))
if length < 3:
return 'short'
# if length < 20:
# return 'medium'
# if length > 19:
# return 'long'
return 'long'
def name_length(cls, df):
df['NameLength'] = df['Name']\
.apply(lambda x: cls.simplify_name_length(x))
return df
def get_name_groups(cls, df):
names = {}
names_by_count = df[df['Type'] == 1]['Name']\
.value_counts().index.tolist()
top5 = [a.lower() for a in names_by_count[:5]]
top30 = [a.lower() for a in names_by_count[:30]]
rest = [a.lower() for a in names_by_count[:]]
names['dog'] = {
'top5': top5,
'top30': top30,
'rest': rest
}
names_by_count = df[df['Type'] == 2]['Name']\
.value_counts().index.tolist()
top5 = [a.lower() for a in names_by_count[:5]]
top30 = [a.lower() for a in names_by_count[:30]]
rest = [a.lower() for a in names_by_count[:]]
names['cat'] = {
'top5': top5,
'top30': top30,
'rest': rest
}
return names
def simplify_names(cls, x, names):
x = str(x)
x = x.lower()
if 'nan' in x:
return 'NAN'
if x in names['top5']:
return 'top5'
# if x in names['top30']:
# return 'top30'
# if '&' in x:
# return 'and'
if x in names['rest']:
return 'rest'
def names(cls, df):
names = cls.get_name_groups(df)
df.loc[df['Type'] == 1, 'NameGroup'] = df[df['Type'] == 1]['Name']\
.apply(lambda x: cls.simplify_names(x, names['dog']))
df.loc[df['Type'] == 2, 'NameGroup'] = df[df['Type'] == 2]['Name']\
.apply(lambda x: cls.simplify_names(x, names['cat']))
return df
def color(cls, df):
df.loc[(df['Color3'] > 0) | (df['Color2'] > 0),
'Mixed_Color'] = True
df.loc[(df['Color3'] == 0) | (df['Color2'] == 0),
'Mixed_Color'] = False
return df
def simplify_quantity(cls, df):
bins = (0, 1, 10, 100)
group_names = ['solo', 'litter', 'herd']
categories = pd.cut(df['Quantity'], bins, labels=group_names)
return categories
def quantity(cls, df):
df.loc[df['Quantity'] == 0, 'Is_Solo'] = True
df.loc[df['Quantity'] > 0, 'Is_Solo'] = False
return df
def gender(cls, df):
df.loc[(df['Gender'] == 3) &
(df['Quantity'] == 2), 'Gender'] = 1.5
df.loc[(df['Gender'] == 3) &
(df['Quantity'] > 2), 'Gender'] = 0
return df
def breed(cls, df):
# df.loc[df['Breed2'] > 0, 'Mixed_Breed'] = True
# df.loc[df['Breed2'] == 0, 'Mixed_Breed'] = False
df.loc[df['Breed1'] == 307, 'Mixed_Breed'] = True
df.loc[df['Breed1'] != 307, 'Mixed_Breed'] = False
return df
def numerize_features(cls, df, cols):
train, test = cls.get_dfs()
df_combined = pd.concat([train[cols], test[cols]])
train.drop(cls.target_col, axis=1, inplace=True)
for feature in cols:
le = LabelEncoder()
df_combined[feature] = df_combined[feature].apply(lambda x: str(x))
df[feature] = df[feature].apply(lambda x: str(x))
le = le.fit(df_combined[feature])
df[feature] = le.transform(df[feature])
return df
def simplify_ages(cls, df, animal):
if animal == 'dog':
bins = (-1, 0, 2, 256)
group_names = ['baby', 'child', 'adult']
categories = pd.cut(df[df['Type'] == 1]['Age'], bins,
labels=group_names)
if animal == 'cat':
bins = (-1, 4, 256)
group_names = ['baby', 'adult']
categories = pd.cut(df[df['Type'] == 2]['Age'], bins,
labels=group_names)
return categories
def age(cls, df):
df.loc[df['Type'] == 1, 'AgeGroup'] = cls.simplify_ages(df, 'dog')
df.loc[df['Type'] == 2, 'AgeGroup'] = cls.simplify_ages(df, 'cat')
df.drop('Age', axis=1, inplace=True)
return df
def sum_features(cls, df, col_sum):
for col_set in col_sum:
f_name = '__'.join(col_set[:])
df[f_name] = df[[*col_set]].sum(axis=1)
df.drop(col_set, axis=1, inplace=True)
return df
def combine_features(cls, row, col_set):
result = ''
for col in col_set:
if result != '':
result += '_'
result += str(row[col])
return result
def combine(cls, df, col_sets):
for col_set in col_sets:
f_name = '__'.join(col_set[:])
df[f_name] = df.apply(lambda x: cls.combine_features(x, col_set),
axis=1)
df.drop(col_set, axis=1, inplace=True)
return df
def multiply_features(cls, df, feature_sets):
for feature_set in feature_sets:
# multipled_name = '_x_'.join(feature_set[:])
# df.drop(feature_set, axis=1, inplace=True)
pass
return df
class Model:
def forward_selection(cls, df, features_count=1):
if df.name == 'train':
qwk_scorer = make_scorer(cls.quadratic_weighted_kappa,
greater_is_better=True)
model = RandomForestClassifier(n_estimators=100, n_jobs=-1)
X = df.drop('AdoptionSpeed', axis=1)
y = df['AdoptionSpeed']
X_train, X_test,\
y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=42)
y_train = y_train.ravel()
y_test = y_test.ravel()
sfs1 = sfs(model,
k_features=3,
forward=True,
floating=False,
verbose=2,
scoring=qwk_scorer,
cv=5)
sfs1 = sfs1.fit(X_train, y_train)
best_cols = list(sfs1.k_feature_idx_)
return best_cols
def confusion_matrix(cls, rater_a, rater_b,
min_rating=None, max_rating=None):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Returns the confusion matrix between rater's ratings
"""
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[a - min_rating][b - min_rating] += 1
return conf_mat
def histogram(cls, ratings, min_rating=None, max_rating=None):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Returns the counts of each type of rating that a rater made
"""
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings
def quadratic_weighted_kappa(cls, rater_a, rater_b,
min_rating=0, max_rating=4):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Calculates the quadratic weighted kappa
quadratic_weighted_kappa calculates the quadratic weighted kappa
value, which is a measure of inter-rater agreement between two raters
that provide discrete numeric ratings. Potential values range from -1
(representing complete disagreement) to 1 (representing complete
agreement). A kappa value of 0 is expected if all agreement is due to
chance.
quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b
each correspond to a list of integer ratings. These lists must have the
same length.
The ratings should be integers, and it is assumed that they contain
the complete range of possible ratings.
quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating
is the minimum possible rating, and max_rating is the maximum possible
rating
"""
rater_a = np.array(rater_a, dtype=int)
rater_b = | np.array(rater_b, dtype=int) | numpy.array |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer, make_classification
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from sklearn.metrics import RocCurveDisplay, plot_roc_curve
@pytest.fixture(scope="module")
def data():
return load_iris(return_X_y=True)
@pytest.fixture(scope="module")
def data_binary(data):
X, y = data
return X[y < 2], y[y < 2]
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("drop_intermediate", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
@pytest.mark.parametrize(
"constructor_name, default_name",
[
("from_estimator", "LogisticRegression"),
("from_predictions", "Classifier"),
],
)
def test_roc_curve_display_plotting(
pyplot,
response_method,
data_binary,
with_sample_weight,
drop_intermediate,
with_strings,
constructor_name,
default_name,
):
"""Check the overall plotting behaviour."""
X, y = data_binary
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
y_pred = getattr(lr, response_method)(X)
y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, 1]
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(
lr,
X,
y,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
alpha=0.8,
)
else:
display = RocCurveDisplay.from_predictions(
y,
y_pred,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
alpha=0.8,
)
fpr, tpr, _ = roc_curve(
y,
y_pred,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
)
assert_allclose(display.roc_auc, auc(fpr, tpr))
assert_allclose(display.fpr, fpr)
assert_allclose(display.tpr, tpr)
assert display.estimator_name == default_name
import matplotlib as mpl # noqal
assert isinstance(display.line_, mpl.lines.Line2D)
assert display.line_.get_alpha() == 0.8
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
expected_label = f"{default_name} (AUC = {display.roc_auc:.2f})"
assert display.line_.get_label() == expected_label
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = f"True Positive Rate (Positive label: {expected_pos_label})"
expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
assert display.ax_.get_ylabel() == expected_ylabel
assert display.ax_.get_xlabel() == expected_xlabel
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
),
],
)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_roc_curve_display_complex_pipeline(pyplot, data_binary, clf, constructor_name):
"""Check the behaviour with complex pipeline."""
X, y = data_binary
if constructor_name == "from_estimator":
with pytest.raises(NotFittedError):
RocCurveDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(clf, X, y)
name = clf.__class__.__name__
else:
display = RocCurveDisplay.from_predictions(y, y)
name = "Classifier"
assert name in display.line_.get_label()
assert display.estimator_name == name
@pytest.mark.parametrize(
"roc_auc, estimator_name, expected_label",
[
(0.9, None, "AUC = 0.90"),
(None, "my_est", "my_est"),
(0.8, "my_est2", "my_est2 (AUC = 0.80)"),
],
)
def test_roc_curve_display_default_labels(
pyplot, roc_auc, estimator_name, expected_label
):
"""Check the default labels used in the display."""
fpr = np.array([0, 0.5, 1])
tpr = np.array([0, 0.5, 1])
disp = RocCurveDisplay(
fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=estimator_name
).plot()
assert disp.line_.get_label() == expected_label
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_plot_roc_curve_pos_label(pyplot, response_method, constructor_name):
# check that we can provide the positive label and display the proper
# statistics
X, y = load_breast_cancer(return_X_y=True)
# create an highly imbalanced
idx_positive = np.flatnonzero(y == 1)
idx_negative = | np.flatnonzero(y == 0) | numpy.flatnonzero |
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List
import numpy as np
from scipy.linalg import sqrtm
from piquasso.api.config import Config
from piquasso.api.errors import InvalidState, InvalidParameter, PiquassoException
from piquasso.api.state import State
from piquasso._math.functions import gaussian_wigner_function
from piquasso._math.linalg import (
is_symmetric,
is_positive_semidefinite,
)
from piquasso._math.symplectic import symplectic_form, xp_symplectic_form
from piquasso._math.combinatorics import get_occupation_numbers
from piquasso._math.transformations import from_xxpp_to_xpxp_transformation_matrix
from .probabilities import (
DensityMatrixCalculation,
DisplacedDensityMatrixCalculation,
NondisplacedDensityMatrixCalculation,
)
class GaussianState(State):
r"""Class to represent a Gaussian state."""
def __init__(self, d: int, config: Config = None) -> None:
"""
Args:
d (int): The number of modes.
"""
super().__init__(config=config)
self._d = d
self.reset()
def __len__(self) -> int:
return self._d
@property
def d(self) -> int:
return len(self)
def reset(self) -> None:
r"""Resets the state to a vacuum."""
vector_shape = (self.d,)
matrix_shape = vector_shape * 2
self._m = np.zeros(vector_shape, dtype=complex)
self._G = np.zeros(matrix_shape, dtype=complex)
self._C = np.zeros(matrix_shape, dtype=complex)
@classmethod
def _from_representation(
cls,
*,
m: np.ndarray,
G: np.ndarray,
C: np.ndarray,
config: Config,
) -> "GaussianState":
obj = cls(d=len(m), config=config)
obj._m = m
obj._G = G
obj._C = C
return obj
def __eq__(self, other: object) -> bool:
if not isinstance(other, GaussianState):
return False
return (
np.allclose(self._C, other._C)
and np.allclose(self._G, other._G)
and | np.allclose(self._m, other._m) | numpy.allclose |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 14:05, 28/01/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
### Reading all results files to find True pareto-fronts (Reference Fronts)
from time import time
from pathlib import Path
from copy import deepcopy
from config import Config, OptExp, OptParas
from pandas import read_csv, DataFrame, to_numeric
from numpy import array, zeros, vstack, hstack, min, max, mean, std
from utils.io_util import load_tasks, load_nodes
from utils.metric_util import *
from utils.visual.scatter import visualize_front_3d
def inside_loop(my_model, n_trials, n_timebound, epoch, fe, end_paras):
for pop_size in OptExp.POP_SIZE:
if Config.TIME_BOUND_KEY:
path_results = f'{Config.RESULTS_DATA}/{n_timebound}s/task_{my_model["problem"]["n_tasks"]}/{Config.METRICS}/{my_model["name"]}/{n_trials}'
else:
path_results = f'{Config.RESULTS_DATA}/no_time_bound/task_{my_model["problem"]["n_tasks"]}/{Config.METRICS}/{my_model["name"]}/{n_trials}'
name_paras = f'{epoch}_{pop_size}_{end_paras}'
file_name = f'{path_results}/experiment_results/{name_paras}-results.csv'
df = read_csv(file_name, usecols=["Power", "Latency", "Cost"])
return df.values
def getting_results_for_task(models):
matrix_fit = zeros((1, 6))
for n_task in OptExp.N_TASKS:
for my_model in models:
tasks = load_tasks(f'{Config.INPUT_DATA}/tasks_{n_task}.json')
problem = deepcopy(my_model['problem'])
problem["tasks"] = tasks
problem["n_tasks"] = n_task
problem["shape"] = [len(problem["clouds"]) + len(problem["fogs"]), n_task]
my_model['problem'] = problem
for n_trials in range(OptExp.N_TRIALS):
if Config.TIME_BOUND_KEY:
for n_timebound in OptExp.TIME_BOUND_VALUES:
if Config.MODE == "epoch":
for epoch in OptExp.EPOCH:
end_paras = f"{epoch}"
df_matrix = inside_loop(my_model, n_trials, n_timebound, epoch, None, end_paras)
df_name = array([[n_task, my_model["name"], n_trials], ] * len(df_matrix))
matrix = hstack(df_name, df_matrix)
matrix_fit = vstack((matrix_fit, matrix))
else:
if Config.MODE == "epoch":
for epoch in OptExp.EPOCH:
end_paras = f"{epoch}"
df_matrix = inside_loop(my_model, n_trials, None, epoch, None, end_paras)
df_name = array([[n_task, my_model["name"], n_trials], ] * len(df_matrix))
matrix = hstack((df_name, df_matrix))
matrix_fit = vstack((matrix_fit, matrix))
return matrix_fit[1:]
starttime = time()
clouds, fogs, peers = load_nodes(f'{Config.INPUT_DATA}/nodes_2_8_5.json')
problem = {
"clouds": clouds,
"fogs": fogs,
"peers": peers,
"n_clouds": len(clouds),
"n_fogs": len(fogs),
"n_peers": len(peers),
}
models = [
{"name": "NSGA-II", "class": "BaseNSGA_II", "param_grid": OptParas.NSGA_II, "problem": problem},
{"name": "NSGA-III", "class": "BaseNSGA_III", "param_grid": OptParas.NSGA_III, "problem": problem},
{"name": "MO-ALO", "class": "BaseMO_ALO", "param_grid": OptParas.MO_ALO, "problem": problem},
{"name": "MO-SSA", "class": "BaseMO_SSA", "param_grid": OptParas.MO_SSA, "problem": problem},
]
## Load all results of all trials
matrix_results = getting_results_for_task(models)
# df_full = DataFrame(matrix_results, columns=["Task", "Model", "Trial", "Fit1", "Fit2", "Fit3"])
data = {'Task': matrix_results[:, 0],
'Model': matrix_results[:, 1],
'Trial': matrix_results[:, 2],
'Fit1': matrix_results[:, 3],
'Fit2': matrix_results[:, 4],
'Fit3': matrix_results[:, 5],
}
df_full = DataFrame(data)
df_full["Task"] = to_numeric(df_full["Task"])
df_full["Trial"] = to_numeric(df_full["Trial"])
df_full["Fit1"] = to_numeric(df_full["Fit1"])
df_full["Fit2"] = to_numeric(df_full["Fit2"])
df_full["Fit3"] = to_numeric(df_full["Fit3"])
for n_task in OptExp.N_TASKS:
performance_results = []
performance_results_mean = []
## Find matrix results for each problem
df_task = df_full[df_full["Task"] == n_task]
matrix_task = df_task[['Fit1', 'Fit2', 'Fit3']].values
hyper_point = max(matrix_task, axis=0)
## Find non-dominated matrix for each problem
reference_fronts = zeros((1, 3))
dominated_list = find_dominates_list(matrix_task)
for idx, value in enumerate(dominated_list):
if value == 0:
reference_fronts = vstack((reference_fronts, matrix_task[idx]))
reference_fronts = reference_fronts[1:]
## For each model and each trial, calculate its performance metrics
for model in models:
er_list = zeros(OptExp.N_TRIALS)
gd_list = zeros(OptExp.N_TRIALS)
igd_list = zeros(OptExp.N_TRIALS)
ste_list = zeros(OptExp.N_TRIALS)
hv_list = zeros(OptExp.N_TRIALS)
har_list = zeros(OptExp.N_TRIALS)
for trial in range(OptExp.N_TRIALS):
df_result = df_task[ (df_task["Model"] == model["name"]) & (df_task["Trial"] == trial) ]
pareto_fronts = array(df_result.values[:, 3:], dtype=float)
er = error_ratio(pareto_fronts, reference_fronts)
gd = generational_distance(pareto_fronts, reference_fronts)
igd = inverted_generational_distance(pareto_fronts, reference_fronts)
ste = spacing_to_extent(pareto_fronts)
hv = hyper_volume(pareto_fronts, reference_fronts, hyper_point, 100)
har = hyper_area_ratio(pareto_fronts, reference_fronts, hyper_point, 100)
performance_results.append([n_task, model["name"], trial, er, gd, igd, ste, hv, har])
er_list[trial] = er
gd_list[trial] = gd
igd_list[trial] = igd
ste_list[trial] = ste
hv_list[trial] = hv
har_list[trial] = har
er_min, er_max, er_mean, er_std, er_cv = min(er_list), max(er_list), mean(er_list), std(er_list), std(er_list)/mean(er_list)
gd_min, gd_max, gd_mean, gd_std, gd_cv = min(gd_list), max(gd_list), mean(gd_list), std(gd_list), std(gd_list)/mean(gd_list)
igd_min, igd_max, igd_mean, igd_std, igd_cv = min(igd_list), max(igd_list), mean(igd_list), std(igd_list), std(igd_list)/mean(igd_list)
ste_min, ste_max, ste_mean, ste_std, ste_cv = min(ste_list), max(ste_list), mean(ste_list), std(ste_list), std(ste_list)/mean(ste_list)
hv_min, hv_max, hv_mean, hv_std, hv_cv = min(hv_list), max(hv_list), mean(hv_list), std(hv_list), std(hv_list) / | mean(hv_list) | numpy.mean |
#%%
from scipy.integrate import quad
from itertools import combinations
import numpy as np
#----------------------------------------------------------------------
def f(x,i,j):
return (1-(1-x)*np.exp(x))**(i-j)*(2-np.e-x+np.exp(x))**j
def f1(x):
return 1-(1-x)*np.exp(x)
def I(A,i,j):
return f(A,i,j)-quad(f,A,1,args=(i,j))[0]
def solution(n,error=10**(-8)):
res=np.zeros((n,n))
for i in range(1,n):
for j in range(i+1):
h,l=1,0
while h-l>error:
m=(h+l)/2
if I(m,i,j)>0:
h=m
else:
l=m
res[n-i-1,j]=l
return res
#-------------------------------------------------------------------------------------
class RandomStrategy:
"""this strategy is for testing
"""
def __init__(self,random_type,switch=True):
self.algo_id='random strategy '+ str(random_type) +' with ' if switch else ' without '+'switch'
self.type=random_type
self.count=0
self.switch=switch
def para(self,i,num_players):
self.num=num_players
self.p=0
return self
def calibration(self,i,order,history,result,turn_reward):
if self.type==0:
self.p=np.random.sample()
elif self.type==1:
self.p=np.random.uniform(max(result),1)
elif self.type==2 and self.count==0:
self.p=0
elif self.type==3:
self.p=0.5
if self.switch and self.count%100000==0:
self.type=np.random.randint(4)
self.count+=1
def decision(self,hand):
if hand<self.p:
return True
return False
#---------------------------------------------------------------------------------------------------------
class NaiveStrategy:
""" Used for testing, this strategy can also be used as bench mark
"""
def __init__(self):
pass
def para(self,i,num_players):
self.p=0
return self
def calibration(self,i,order,history,result,turn_reward):
self.p=max(result)
def decision(self,hand):
if hand<self.p:
return True
return False
#-------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------
class NashEquilibrium:
""" Nash equilibrium scaled by factor
"""
def __init__(self,factor=1):
self.factor=factor
def para(self,i,num_players):
self.threshold=solution(num_players)[:,0]*self.factor
return self
def calibration(self,i,order,history,result,turn_reward):
self.p=max(max(result),self.threshold[i])
def decision(self,hand):
if hand<self.p:
return True
return False
#--------------------------------------------------------------------------------------------------------
class AdaptiveNasheqilibrium:
""" This strategy make the assumption that a player either plays Nash Equilibrium
or play a threshold uniformly chosen in $[0,1]$. More specifically, if rationality
assumption is violated, the algorithm will mark it as uniform threshold player.
"""
def __init__(self,confidence_level=1500):
self.confidence_level=confidence_level
self.count=0
def para(self,i,num_players):
self.num_players=num_players
self.id=i
self.threshold=solution(num_players)
self.profiles=np.zeros(num_players,dtype=int)
self.record=[]
return self
def calibration(self,rank,order,history,result,turn_reward):
self.count+=1
if len(history)>0:
his_order,his_res,his_cards=history[-1][1:]
for i in range(1,self.num_players):
if his_order[i]!=self.id:
if max(his_cards[i])>=max(his_res[:i]):
self.profiles[his_order[i]]+=1
else:
self.profiles[his_order[i]]=-self.confidence_level
#self.record.append((self.count,i,his_order[i],his_cards,his_res))
num_navie=0
for i in range(rank+1,self.num_players):
if self.profiles[order[i]]<0:
num_navie+=1
self.p=max(self.threshold[rank,num_navie],max(result))
def decision(self,hand):
if hand<self.p:
return True
return False
#-----------------------------------------------------------------------------------------------------------
class ModelfreeStrategy:
""" This strategy is to estimate $L$ as opposed to $K$
the estimate of $L(t)$ for small $t$ is not as accurate as for that of larger $t$
since there is less chance for the previous score to be small. We can fix that to some extent by extrapolation.
however the estimate of $L$ for small $t$ does not matter
too much as the max is likely to be obtained for larger $t$ where the estimates are relatively accurate.
"""
def __init__(self,size=1000,initial_count=2,extrapolation_decay_factor=0.8,extrapolation_range=5):
self.m=size # discretization size
self.init_count=initial_count
self.decay_factor=0.9
self.e=np.exp(np.arange(self.m)/self.m)
self.range=extrapolation_range
self.xp=extrapolation_decay_factor**abs(np.arange(-extrapolation_range,extrapolation_range+1))
def para(self,i,num_players):
self.num_players=num_players
self.id=i
self.P={}# profiles
for j in range(num_players):
if j!=self.id:
self.P[j]=[np.zeros((num_players,self.m)),np.zeros((num_players,self.m))+self.init_count]
#self.P[j][:]=self.g
return self
def calibration(self,rank,order,history,result,turn_reward):
if len(history)>0:
position,scores=history[-1][1:3]# only need last turn's results for positions and scores
t=0
for i in range(self.num_players):
player_id,T=position[i],int(t*self.m)
if position[i]!=self.id:
if T<self.m/3:
l,h=max(0,T-self.range),T+self.range+1
self.P[player_id][1][i,l:h]+=self.xp[l-T+self.range:h-T+self.range]
self.P[player_id][0][i,l:h]+=((scores[i]<t)-self.P[player_id][0][i,l:h])/self.P[player_id][1][i,l:h]
else:
self.P[player_id][1][i,T]+=1
self.P[player_id][0][i,T]+=((scores[i]<t)-self.P[player_id][0][i,T])/self.P[player_id][1][i,T]
t=max(t,scores[i])
self.p=max(result)
if rank<self.num_players-1:
M=np.ones(self.m)
for j in range(rank+1,self.num_players):
M*=self.P[order[j]][0][j]
A=np.argmax(np.cumsum(M[::-1])[::-1]*self.e)+np.random.random_sample()
self.p=max(self.p,A/self.m)
def decision(self,hand):
if hand<self.p:
return True
return False
#------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------
class Profile:
""" this is the profile for AdaptiveStrategy, which is families of distributions.
To enhance the performance, I run two tests: lower bound assumption and point strategy assumption.
lower bound means rationality assumption holds,i.e. the threshold will be greater than the max previous score
point bound means the opponents' strategies are not randomized, that is, it's always a single number.
"""
def __init__(self,i,num_players,G,init_w,gridsize=1000,discount=0.99,pt_threshold=2500,lowbd_threshold=1500,cooldown=10000):
self.Id=i
self.pt_threshold=pt_threshold
self.lowbd_threshold=lowbd_threshold
self.grid=gridsize
self.discount=discount
self.G=G
self.weight=[np.zeros(self.grid)+init_w for _ in range(num_players)]
self.dist=[np.zeros((self.grid,self.grid))+1/self.grid for _ in range(num_players)]
self.init_sh=np.sum(self.G,axis=1)/self.grid
self.sh=[np.copy(self.init_sh) for _ in range(num_players)]
a=np.zeros((self.grid,2))
a[:,1]=1
self.bounds=[np.copy(a) for _ in range(num_players)]
self.lowbd_established=[False]*num_players
self.pt_established=[False]*num_players
self.acceptable_error=2/self.grid
self.lowbd_count=np.zeros(num_players,dtype=int)
self.pt_count=np.zeros(num_players,dtype=int)
self.cooldownturns=cooldown
self.cooldown=3*cooldown
#self.record=[]
def update(self,rank,t,a,b):
self.cooldown-=1
if t<=b:
self.lowbd_count[rank]+=1
if not (self.lowbd_established[rank] or self.pt_established[rank]) and self.lowbd_count[rank]>self.lowbd_threshold:
self.establish_lowbd(rank)
elif self.lowbd_established[rank]:
self.lowbd_breached(rank)
A=int(self.grid*t)
l,h=self.bounds[rank][A]
if a<h+self.acceptable_error and b>l-self.acceptable_error:
self.pt_count[rank]+=1
self.bounds[rank][A]=max(l,a),min(b,h)
if not self.pt_established[rank] and self.pt_count[rank]>self.pt_threshold:
self.establish_pt(rank)
elif self.pt_established[rank]:
self.pt_breached(rank)
x=int(a*self.grid)
y=min(int(self.grid*b)+1,self.grid)
if self.pt_established:
self.pt_fit(rank,A)
elif self.lowbd_established:
self.lowbd_fit(rank,A,y)
else:
for k in range(-3,4):
A1,x1,y1=A+k,x+k,y+k
if 0<=A1<self.grid and 0<=x1 and y1<self.grid:
self.fit(rank,A1,x1,y1,0.9**abs(k))
if self.cooldown>0:
return True
else:
return False
def lowbd_breached(self,rank):
self.lowbd_established[rank]=False
self.lowbd_count[rank]=0
self.sh[rank][:]=self.init_sh
self.dist[rank][:]=1/self.grid
self.cooldown=self.cooldownturns
#self.record.append(-1)
def establish_lowbd(self,rank):
self.lowbd_established[rank]=True
for A in range(self.grid):
z=sum(self.dist[rank][A,A:])
self.dist[rank][A,:A]=0
self.dist[rank][A][A:]/=z
self.sh[rank][A]=sum(self.dist[rank][A,A:]*self.G[A,A:])
#self.record.append(1)
def establish_pt(self,rank):
self.pt_established[rank]=True
for A in range(self.grid):
self.pt_fit(rank,A)
# self.record.append(2)
def pt_breached(self,rank):
self.pt_established[rank]=False
#self.record.append((-2,self.bounds[rank]))
self.bounds[rank][:0]=0
self.bounds[rank][:1]=1
self.pt_count[rank]=0
self.sh[rank]=self.init_sh
self.dist[rank][:]=1/self.grid
self.cooldown=self.cooldownturns
def pt_fit(self,rank,A):
l,h=self.bounds[rank][A]
x=max(0,int((l-self.acceptable_error)*self.grid))
y=min(int((self.acceptable_error+h)*self.grid)+1,self.grid)
self.dist[rank][A][:]=0
self.dist[rank][A][x:y]=1/(y-x)
self.sh[rank][A]=sum(self.init_sh[x:y])*self.grid/(y-x)
def lowbd_fit(self,rank,A,y):
self.fit(rank,A,A,y)
def fit(self,rank,A,x,y,extraplation_factor=1):
Z=sum(self.dist[rank][A,x:y])*self.discount
w=np.zeros(self.grid)+self.weight[rank][A]
delta=extraplation_factor*sum(self.dist[rank][A][x:y]*self.G[A][x:y])/Z
self.sh[rank][A]*=self.weight[rank][A]
self.sh[rank][A]+=delta
self.weight[rank][A]+=extraplation_factor/self.discount
self.sh[rank][A]/=self.weight[rank][A]
w[x:y]+=extraplation_factor/Z
w/=self.weight[rank][A]
self.dist[rank][A]*=w
def getsh(self,rank):
return self.sh[rank]
def getprofile(self,rank):
return self.dist[rank]
class AdaptiveStrategy:
"""This was the model free strategy, made before I
realized that the conditions can be imposed on $L$ instead of the strategy $K$.
"""
def __init__(self,girdsize=100,discount=0.9,init_w=10,pt_threshold=1500,lowbd_threshold=1000,cooldown=4000):
self.grid=girdsize
self.discount=discount
self.init_w=init_w
self.cooldown=cooldown
self.pt_threshold=pt_threshold
self.lowbd_threshold=lowbd_threshold
temp=(np.arange(self.grid)+0.1)/self.grid
self.exp_temp=np.exp(temp)
res=1-(1-temp)*self.exp_temp
self.G=np.zeros((self.grid,self.grid))
for t in range(self.grid):
self.G[t]=res
self.G[t,:t]=(t/self.grid-temp[:t])*self.exp_temp[:t]
def para(self,Id,num_players):
self.Id=Id
self.nashequilibrum=solution(num_players)
self.profiles=[Profile(i,num_players,self.G,self.init_w,self.grid,self.discount,self.pt_threshold,self.lowbd_threshold,self.cooldown) for i in range(num_players)]
self.profiles[Id]=None
self.num_players=num_players
return self
def calibration(self,i,order,history,result,turn_reward):
self.iscooldown=False
if len(history)>0:
od,res,cards=history[-1][1:]
for j in range(self.num_players):
if od[j]!=self.Id:
maxindex=np.argmax(cards[j])
a=cards[j,maxindex-1]
b=min(1,cards[j,maxindex])
if j==0:
t=0
else:
t=max(res[:j])
if self.profiles[od[j]].update(j,t,a,b):
self.iscooldown=True
if self.iscooldown:
num=0
for j in range(i+1,self.num_players):
if not ( (self.profiles[order[j]].pt_established) or (self.profiles[order[j]].lowbd_established)):
num+=1
self.p=max(max(result),self.nashequilibrum[i,num])
elif i==self.num_players-1:
self.p=max(result)
else:
W=np.ones(self.grid)
for j in range(i+1,self.num_players):
W*=self.profiles[order[j]].getsh(j)
self.p=np.argmax(np.cumsum(W[::-1])[::-1]*self.exp_temp)+0.5
self.p/=self.grid
self.p=max(self.p,max(result))
def decision(self,hand):
if hand<self.p:
return True
return False
#--------------------------------------------------------------------------------------------------------------
class ContextualBandits:
""" This is the classic algorithm for k-armed bandit problem, with some modifications to improve the performance.
"""
def __init__(self,rl_type,resource_limit=3,size=1000,exploration=0.15,
init_reward=2,xp_dacay_rate=0.9,a=0.1,c=2,baseline=4):
self.resource_limit=resource_limit # how many positions into the rank we want to record
self.count=0
self.type=rl_type
self.last_choice=None
self.exploration=exploration
self.xp_decay=xp_dacay_rate
self.a=a # learning rate for gradient descent
self.baseline=baseline
self.init_reward=init_reward
self.m=size
self.c=c
def para(self,i,num_players):
self.id=i
self.num_players=num_players
self.dic={}
self.N=self.code()
self.bandits_rewards=np.zeros((self.N,self.m))+self.init_reward # row major for our purpose for speed
self.bandits_num=np.zeros((self.N,self.m),dtype=int)+1
if self.type==2:
self.H=np.zeros((self.N,self.m))+self.baseline
self.dist= | np.zeros((self.N,self.m)) | numpy.zeros |
import numpy as np
from sigfeat.feature.common import Index
from sigfeat.feature.common import WindowedSignal
from sigfeat.source.array import ArraySource
from sigfeat.feature.common import centroid
from sigfeat.feature.common import flatness
from sigfeat.feature.common import flux
from sigfeat.feature.common import rolloff
from sigfeat.feature.common import crest_factor
from sigfeat.feature.common import zero_crossing_count
from sigfeat.feature.common import moments
def test_index():
idx = Index()
res = idx.process(((1, 2, 3), 100), {})
assert res == 100
def test_windowed_signal():
block = np.ones(10)
sc = ArraySource(block, samplerate=1)
wsf = WindowedSignal(window='hanning', size=10)
wsf.on_start(sc)
res = wsf.process((block, 0), {})
assert np.allclose(res, wsf.w)
block = np.ones((10, 2))
sc = ArraySource(block, samplerate=1, blocksize=10)
wsf = WindowedSignal(window='blackman')
wsf.on_start(sc)
res = wsf.process((block, 0), {})
assert np.allclose(res, wsf.w)
sc = ArraySource(block, samplerate=1, blocksize=10)
wsf = WindowedSignal(window='rect')
wsf.on_start(sc)
res = wsf.process((block, 0), {})
assert np.allclose(res, wsf.w)
def test_centroid():
x = np.zeros((9, 2)) + 1e-20
x[3, 0] = 1.0
x[4, 1] = 2.0
i = (np.arange(len(x)) * | np.ones_like(x) | numpy.ones_like |
# Tests of the quasiisothermaldf module
from __future__ import print_function, division
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential, vcirc, omegac, epifreq, verticalfreq
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_pvRvT_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for adiabatic actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for adiabatic actions'
return None
def test_pvRvT_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
return None
def test_pvRvT_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
#ngl=10
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=10) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
#ngl=24
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=40) for vt in vTs] for vr in vRs])
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
#ngl=11, shouldn't work
try:
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=11) for vt in vTs] for vr in vRs])
except ValueError: pass
else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')
return None
def test_pvTvz_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vTs= numpy.linspace(0.,1.5,51)
vzs= numpy.linspace(-1.,1.,21)
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z) for vt in vTs] for vz in vzs])
tvT= numpy.tile(vTs,(len(vzs),1))
tvz= numpy.tile(vzs,(len(vTs),1)).T
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for adiabatic actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for adiabatic actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for adiabatic actions'
return None
def test_pvTvz_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z) for vt in vTs] for vz in vzs])
tvz= numpy.tile(vzs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vzs),1))
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'
return None
def test_pvTvz_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
#ngl=10
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=10) for vt in vTs] for vz in vzs])
tvz= numpy.tile(vzs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vzs),1))
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'
#ngl=24
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=40) for vt in vTs] for vz in vzs])
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'
#ngl=11, shouldn't work
try:
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=11) for vt in vTs] for vz in vzs])
except ValueError: pass
else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')
return None
def test_pvRvz_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vzs= numpy.linspace(-1.,1.,21)
pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z) for vz in vzs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vzs),1)).T
tvz= numpy.tile(vzs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)
mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)
svRvz= ( | numpy.sum(tvR*tvz*pvRvz) | numpy.sum |
from __future__ import print_function
import numpy as np
import itertools
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from pytest import warns as assert_warns
from scipy.spatial import SphericalVoronoi, distance
from scipy.spatial import _spherical_voronoi as spherical_voronoi
from scipy.spatial.transform import Rotation
from scipy.optimize import linear_sum_assignment
TOL = 1E-10
class TestSphericalVoronoi(object):
def setup_method(self):
self.points = np.array([
[-0.78928481, -0.16341094, 0.59188373],
[-0.66839141, 0.73309634, 0.12578818],
[0.32535778, -0.92476944, -0.19734181],
[-0.90177102, -0.03785291, -0.43055335],
[0.71781344, 0.68428936, 0.12842096],
[-0.96064876, 0.23492353, -0.14820556],
[0.73181537, -0.22025898, -0.6449281],
[0.79979205, 0.54555747, 0.25039913]]
)
# Issue #9386
self.hemisphere_points = np.array([
[0.88610999, -0.42383021, 0.18755541],
[0.51980039, -0.72622668, 0.4498915],
[0.56540011, -0.81629197, -0.11827989],
[0.69659682, -0.69972598, 0.15854467]])
# Issue #8859
phi = np.linspace(0, 2 * np.pi, 10, endpoint=False) # azimuth angle
theta = np.linspace(0.001, np.pi * 0.4, 5) # polar angle
theta = theta[np.newaxis, :].T
phiv, thetav = np.meshgrid(phi, theta)
phiv = np.reshape(phiv, (50, 1))
thetav = np.reshape(thetav, (50, 1))
x = np.cos(phiv) * np.sin(thetav)
y = np.sin(phiv) * np.sin(thetav)
z = np.cos(thetav)
self.hemisphere_points2 = np.concatenate([x, y, z], axis=1)
def test_constructor(self):
center = np.array([1, 2, 3])
radius = 2
s1 = SphericalVoronoi(self.points)
# user input checks in SphericalVoronoi now require
# the radius / center to match the generators so adjust
# accordingly here
s2 = SphericalVoronoi(self.points * radius, radius)
s3 = SphericalVoronoi(self.points + center, center=center)
s4 = SphericalVoronoi(self.points * radius + center, radius, center)
assert_array_equal(s1.center, np.array([0, 0, 0]))
assert_equal(s1.radius, 1)
assert_array_equal(s2.center, np.array([0, 0, 0]))
assert_equal(s2.radius, 2)
assert_array_equal(s3.center, center)
assert_equal(s3.radius, 1)
assert_array_equal(s4.center, center)
assert_equal(s4.radius, radius)
def test_vertices_regions_translation_invariance(self):
sv_origin = SphericalVoronoi(self.points)
center = np.array([1, 1, 1])
sv_translated = SphericalVoronoi(self.points + center, center=center)
assert_equal(sv_origin.regions, sv_translated.regions)
assert_array_almost_equal(sv_origin.vertices + center,
sv_translated.vertices)
def test_vertices_regions_scaling_invariance(self):
sv_unit = SphericalVoronoi(self.points)
sv_scaled = SphericalVoronoi(self.points * 2, 2)
assert_equal(sv_unit.regions, sv_scaled.regions)
assert_array_almost_equal(sv_unit.vertices * 2,
sv_scaled.vertices)
def test_old_radius_api(self):
sv_unit = SphericalVoronoi(self.points, radius=1)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`radius` is `None`")
sv = SphericalVoronoi(self.points, None)
assert_array_almost_equal(sv_unit.vertices, sv.vertices)
def test_old_radius_api_warning(self):
with assert_warns(DeprecationWarning):
sv = SphericalVoronoi(self.points, None)
def test_sort_vertices_of_regions(self):
sv = SphericalVoronoi(self.points)
unsorted_regions = sv.regions
sv.sort_vertices_of_regions()
assert_equal(sorted(sv.regions), sorted(unsorted_regions))
def test_sort_vertices_of_regions_flattened(self):
expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1],
[4, 8, 7, 5, 6], [9, 11, 10], [2, 7, 5],
[1, 4, 8, 11, 9], [0, 3, 10, 9, 1]])
expected = list(itertools.chain(*sorted(expected)))
sv = SphericalVoronoi(self.points)
sv.sort_vertices_of_regions()
actual = list(itertools.chain(*sorted(sv.regions)))
assert_array_equal(actual, expected)
def test_sort_vertices_of_regions_dimensionality(self):
points = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0.5, 0.5, 0.5, 0.5]])
with pytest.raises(TypeError, match="three-dimensional"):
sv = spherical_voronoi.SphericalVoronoi(points)
sv.sort_vertices_of_regions()
def test_num_vertices(self):
# for any n >= 3, a spherical Voronoi diagram has 2n - 4
# vertices; this is a direct consequence of Euler's formula
# as explained by <NAME> Mamede (2010) Proceedings of the
# 2010 International Symposium on Voronoi Diagrams in Science
# and Engineering
sv = SphericalVoronoi(self.points)
expected = self.points.shape[0] * 2 - 4
actual = sv.vertices.shape[0]
assert_equal(actual, expected)
def test_voronoi_circles(self):
sv = spherical_voronoi.SphericalVoronoi(self.points)
for vertex in sv.vertices:
distances = distance.cdist(sv.points, np.array([vertex]))
closest = np.array(sorted(distances)[0:3])
assert_almost_equal(closest[0], closest[1], 7, str(vertex))
assert_almost_equal(closest[0], closest[2], 7, str(vertex))
def test_duplicate_point_handling(self):
# an exception should be raised for degenerate generators
# related to Issue# 7046
self.degenerate = np.concatenate((self.points, self.points))
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.degenerate)
def test_incorrect_radius_handling(self):
# an exception should be raised if the radius provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
radius=0.98)
def test_incorrect_center_handling(self):
# an exception should be raised if the center provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
center=[0.1, 0, 0])
def test_single_hemisphere_handling(self):
# Test solution of Issues #9386, #8859
for points in [self.hemisphere_points, self.hemisphere_points2]:
sv = SphericalVoronoi(points)
triangles = sv._tri.points[sv._tri.simplices]
dots = np.einsum('ij,ij->i', sv.vertices, triangles[:, 0])
circumradii = np.arccos(np.clip(dots, -1, 1))
assert np.max(circumradii) > np.pi / 2
def test_rank_deficient(self):
# rank-1 input cannot be triangulated
points = np.array([[-1, 0, 0], [1, 0, 0]])
with pytest.raises(ValueError, match="Rank of input points"):
sv = spherical_voronoi.SphericalVoronoi(points)
@pytest.mark.parametrize("n", [8, 15, 21])
@pytest.mark.parametrize("radius", [0.5, 1, 2])
@pytest.mark.parametrize("center", [(0, 0, 0), (1, 2, 3)])
def test_geodesic_input(self, n, radius, center):
U = Rotation.random(random_state=0).as_matrix()
thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
points = np.vstack([np.sin(thetas), np.cos(thetas), | np.zeros(n) | numpy.zeros |
# example of preparing the horses and zebra dataset
from os import listdir
from numpy import asarray
from numpy import vstack
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from numpy import savez_compressed
# load all images in a directory into memory
def load_images(path, size=(256,256)):
data_list = list()
# enumerate filenames in directory, assume all are images
for filename in listdir(path):
# load and resize the image
pixels = load_img(path + filename, target_size=size)
# convert to numpy array
pixels = img_to_array(pixels)
# store
data_list.append(pixels)
return asarray(data_list)
# dataset path
path = 'Unmodified Dataset/'
# load dataset A
dataA1 = load_images(path + 'trainA/')
dataAB = load_images(path + 'testA/')
dataA = | vstack((dataA1, dataAB)) | numpy.vstack |
# REFS Some (most) of those functions come from the keras library (https://github.com/fchollet/keras)
# Some are modified to add output images and output centerline
# keras.preprocessing.image: flip_axis, random_channel_shift, apply_transform, transform_matrix_offset_center, ApplyRandomTransformations
import time
import numpy as np
import random
import scipy as sp
import scipy.interpolate
import scipy.ndimage
import scipy.ndimage.interpolation
from NnetsX import IS_CHANNELS_FIRST
# from File import SavePickle
INTENSITY_FACTOR = 0.2
VECTOR_FIELD_SIGMA = 5. # in pixel
ROTATION_FACTOR = 10 # degree
TRANSLATION_FACTOR = 0.2 # proportion of the image size
SHEAR_FACTOR = 2*np.pi/180 # in radian
ZOOM_FACTOR = 0.1
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
shift = np.random.uniform(-intensity, intensity) # TODO add a choice if we want the same shift for all channels
channel_images = [np.clip(x_channel + shift, min_x, max_x)
for x_channel in x]
# channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
# for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [sp.ndimage.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def ApplyRandomTransformations(_x, _y, _pts, _trans, _rot, _zoom, _shear, _elastix, _row_index=1, _col_index=2, _channel_index=0, _fill_mode='constant', _cval=0.):
if _elastix != 0:
sigma = _elastix # in pixel
kernelSize = 3
sizeAll = kernelSize + 2
imgShape = (_x.shape[1], _x.shape[2])
# create the indices of the 5x5 vector field (fieldPts.shape = (25,2))
fieldPts = np.mgrid[0.:1.:complex(sizeAll), 0.:1.:complex(sizeAll)].swapaxes(0,2).swapaxes(0,1).reshape((sizeAll*sizeAll, 2))
# create the displacement (x and y) of the 5x5 vector field (border have no displacement so it's 0) (displacementX.shape = (25))
displacementX = np.zeros((sizeAll*sizeAll))
displacementY = np.zeros((sizeAll*sizeAll))
for i in range(0, sizeAll*sizeAll):
if fieldPts[i][0] != 0. and fieldPts[i][0] != 1. \
and fieldPts[i][1] != 0. and fieldPts[i][1] != 1.:
displacementX[i] = np.random.normal(0, sigma, 1)
displacementY[i] = np.random.normal(0, sigma, 1)
# transform the indice of the 5x5 vector field in the image coordinate system (TODO WARNING works only with square images)
fieldPts = fieldPts*imgShape[0] # TODO check if it's not imgShape[0] - 1?
# create the indices of all pixels in the image (gridX.shape = (1024,1024))
gridX, gridY = np.mgrid[0.:(imgShape[0] - 1):complex(imgShape[0]), 0.:(imgShape[1] - 1):complex(imgShape[1])]
# interpolate the vector field for every pixels in the image (dxGrid.shape = (1024,1024))
dxGrid = scipy.interpolate.griddata(fieldPts, displacementX, (gridX, gridY), method='cubic')
dyGrid = scipy.interpolate.griddata(fieldPts, displacementY, (gridX, gridY), method='cubic')
# apply the displacement on every pixels (indices = [indices.shape[0] = 1024*1024, indices.shape[1] = 1024*1024])
indices = np.reshape(gridY + dyGrid, (-1, 1)), np.reshape(gridX + dxGrid, (-1, 1))
for chan in range(_x.shape[0]):
_x[chan] = scipy.ndimage.interpolation.map_coordinates(_x[chan], indices, order=2, mode='reflect').reshape(imgShape)
_x[chan] = np.clip(_x[chan], 0., 1.)
if _y is not None:
for chan in range(_y.shape[0]):
_y[chan] = scipy.ndimage.interpolation.map_coordinates(_y[chan], indices, order=2, mode='reflect').reshape(imgShape)
_y[chan] = np.clip(_y[chan], 0., 1.)
#if _pts is not None:
matrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
if _rot != 0:
theta = np.pi/180*np.random.uniform(-_rot, _rot)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
matrix = np.dot(matrix, rotation_matrix)
if _trans != 0:
ty = np.random.uniform(-_trans, _trans)*_x.shape[_row_index]
tx = np.random.uniform(-_trans, _trans)*_x.shape[_col_index]
translation_matrix = np.array([[1, 0, ty],
[0, 1, tx],
[0, 0, 1]])
matrix = np.dot(matrix, translation_matrix)
if _shear != 0:
shear = np.random.uniform(-_shear, _shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
matrix = np.dot(matrix, shear_matrix)
if _zoom != 0:
zx, zy = np.random.uniform(1 - _zoom, 1 + _zoom, 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
matrix = np.dot(matrix, zoom_matrix)
h, w = _x.shape[_row_index], _x.shape[_col_index]
transformMatrix = transform_matrix_offset_center(matrix, h, w)
_x = apply_transform(_x, transformMatrix, _channel_index, _fill_mode, _cval)
if _y is not None:
_y = apply_transform(_y, transformMatrix, _channel_index, _fill_mode, _cval)
if _pts is not None:
matrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
if _rot != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
matrix = np.dot(matrix, rotation_matrix)
if _trans != 0:
translation_matrix = np.array([[1, 0, -tx],
[0, 1, -ty],
[0, 0, 1]])
matrix = np.dot(translation_matrix, matrix)
if _shear != 0:
shear_matrix = np.array([[np.cos(shear), 0, 0],
[- | np.sin(shear) | numpy.sin |
import unittest
import numpy as np
from spectralcluster import autotune
from spectralcluster import configs
from spectralcluster import constraint
from spectralcluster import laplacian
from spectralcluster import refinement
from spectralcluster import spectral_clusterer
from spectralcluster import utils
RefinementName = refinement.RefinementName
ThresholdType = refinement.ThresholdType
SymmetrizeType = refinement.SymmetrizeType
LaplacianType = laplacian.LaplacianType
ConstraintName = constraint.ConstraintName
IntegrationType = constraint.IntegrationType
EigenGapType = utils.EigenGapType
ICASSP2018_REFINEMENT_SEQUENCE = configs.ICASSP2018_REFINEMENT_SEQUENCE
class TestSpectralClusterer(unittest.TestCase):
"""Tests for the SpectralClusterer class."""
def setUp(self):
super().setUp()
pass
def test_6by2_matrix(self):
matrix = np.array([
[1.0, 0.0],
[1.1, 0.1],
[0.0, 1.0],
[0.1, 1.0],
[0.9, -0.1],
[0.0, 1.2],
])
refinement_options = refinement.RefinementOptions(
gaussian_blur_sigma=0,
p_percentile=0.95,
refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(
refinement_options=refinement_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(expected, labels))
def test_1000by6_matrix(self):
matrix = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400 +
[[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300 +
[[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200 +
[[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)
noisy = np.random.rand(1000, 6) * 2 - 1
matrix = matrix + noisy * 0.1
refinement_options = refinement.RefinementOptions(
gaussian_blur_sigma=0,
p_percentile=0.2,
refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(
refinement_options=refinement_options, stop_eigenvalue=0.01)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0] * 400 + [1] * 300 + [2] * 200 + [3] * 100)
self.assertTrue(np.array_equal(expected, labels))
def test_6by2_matrix_eigengap_normalizeddiff(self):
matrix = np.array([
[1.0, 0.0],
[1.1, 0.1],
[0.0, 1.0],
[0.1, 1.0],
[0.9, -0.1],
[0.0, 1.2],
])
refinement_options = refinement.RefinementOptions(
gaussian_blur_sigma=0,
p_percentile=0.95,
refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(
refinement_options=refinement_options,
eigengap_type=EigenGapType.NormalizedDiff)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(expected, labels))
def test_6by2_matrix_normalized_laplacian(self):
matrix = np.array([
[1.0, 0.0],
[1.1, 0.1],
[0.0, 1.0],
[0.1, 1.0],
[0.9, -0.1],
[0.0, 1.2],
])
refinement_sequence = []
refinement_options = refinement.RefinementOptions(
p_percentile=0.95, refinement_sequence=refinement_sequence)
clusterer = spectral_clusterer.SpectralClusterer(
max_clusters=2,
refinement_options=refinement_options,
laplacian_type=LaplacianType.GraphCut,
row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(expected, labels))
def test_1000by6_matrix_normalized_laplacian(self):
matrix = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400 +
[[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300 +
[[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200 +
[[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)
noisy = np.random.rand(1000, 6) * 2 - 1
matrix = matrix + noisy * 0.1
refinement_sequence = []
refinement_options = refinement.RefinementOptions(
p_percentile=0.95, refinement_sequence=refinement_sequence)
clusterer = spectral_clusterer.SpectralClusterer(
max_clusters=4,
refinement_options=refinement_options,
laplacian_type=LaplacianType.GraphCut,
row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0] * 400 + [1] * 300 + [2] * 200 + [3] * 100)
self.assertTrue(np.array_equal(expected, labels))
def test_6by2_matrix_auto_tune(self):
matrix = np.array([
[1.0, 0.0],
[1.1, 0.1],
[0.0, 1.0],
[0.1, 1.0],
[0.9, -0.1],
[0.0, 1.2],
])
refinement_sequence = [RefinementName.RowWiseThreshold]
refinement_options = refinement.RefinementOptions(
thresholding_type=ThresholdType.Percentile,
refinement_sequence=refinement_sequence)
auto_tune = autotune.AutoTune(
p_percentile_min=0.60,
p_percentile_max=0.95,
init_search_step=0.05,
search_level=1)
clusterer = spectral_clusterer.SpectralClusterer(
max_clusters=2,
refinement_options=refinement_options,
autotune=auto_tune,
laplacian_type=LaplacianType.GraphCut,
row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue( | np.array_equal(expected, labels) | numpy.array_equal |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests and integration tests for the ``default.qubit.tf`` device.
"""
from itertools import product
import numpy as np
import pytest
tf = pytest.importorskip("tensorflow", minversion="2.0")
import pennylane as qml
from pennylane.wires import Wires
from pennylane.devices.default_qubit_tf import DefaultQubitTF
from gate_data import (
I,
X,
Y,
Z,
H,
S,
T,
CNOT,
CZ,
SWAP,
CNOT,
Toffoli,
CSWAP,
Rphi,
Rotx,
Roty,
Rotz,
Rot3,
CRotx,
CRoty,
CRotz,
CRot3,
MultiRZ1,
MultiRZ2,
)
np.random.seed(42)
#####################################################
# Test matrices
#####################################################
U = np.array(
[
[0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],
[-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],
]
)
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
#####################################################
# Define standard qubit operations
#####################################################
single_qubit = [(qml.S, S), (qml.T, T), (qml.PauliX, X), (qml.PauliY, Y), (qml.PauliZ, Z), (qml.Hadamard, H)]
single_qubit_param = [(qml.PhaseShift, Rphi), (qml.RX, Rotx), (qml.RY, Roty), (qml.RZ, Rotz), (qml.MultiRZ, MultiRZ1)]
two_qubit = [(qml.CZ, CZ), (qml.CNOT, CNOT), (qml.SWAP, SWAP)]
two_qubit_param = [(qml.CRX, CRotx), (qml.CRY, CRoty), (qml.CRZ, CRotz), (qml.MultiRZ, MultiRZ2)]
three_qubit = [(qml.Toffoli, Toffoli), (qml.CSWAP, CSWAP)]
#####################################################
# Fixtures
#####################################################
@pytest.fixture
def init_state(scope="session"):
"""Generates a random initial state"""
def _init_state(n):
"""random initial state"""
state = np.random.random([2 ** n]) + np.random.random([2 ** n]) * 1j
state /= np.linalg.norm(state)
return state
return _init_state
#####################################################
# Device-level integration tests
#####################################################
class TestApply:
"""Test application of PennyLane operations."""
def test_basis_state(self, tol):
"""Test basis state initialization"""
dev = DefaultQubitTF(wires=4)
state = np.array([0, 0, 1, 0])
dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])
res = dev.state
expected = np.zeros([2 ** 4])
expected[np.ravel_multi_index(state, [2] * 4)] = 1
assert isinstance(res, tf.Tensor)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_invalid_basis_state_length(self, tol):
"""Test that an exception is raised if the basis state is the wrong size"""
dev = DefaultQubitTF(wires=4)
state = np.array([0, 0, 1, 0])
with pytest.raises(
ValueError, match=r"BasisState parameter and wires must be of equal length"
):
dev.apply([qml.BasisState(state, wires=[0, 1, 2])])
def test_invalid_basis_state(self, tol):
"""Test that an exception is raised if the basis state is invalid"""
dev = DefaultQubitTF(wires=4)
state = np.array([0, 0, 1, 2])
with pytest.raises(
ValueError, match=r"BasisState parameter must consist of 0 or 1 integers"
):
dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])
def test_qubit_state_vector(self, init_state, tol):
"""Test qubit state vector application"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
dev.apply([qml.QubitStateVector(state, wires=[0])])
res = dev.state
expected = state
assert isinstance(res, tf.Tensor)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_full_subsystem_statevector(self, mocker):
"""Test applying a state vector to the full subsystem"""
dev = DefaultQubitTF(wires=['a', 'b', 'c'])
state = tf.constant([1, 0, 0, 0, 1, 0, 1, 1], dtype=tf.complex128) / 2.
state_wires = qml.wires.Wires(['a', 'b', 'c'])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
assert np.all(tf.reshape(dev._state, [-1]) == state)
spy.assert_not_called()
def test_partial_subsystem_statevector(self, mocker):
"""Test applying a state vector to a subset of wires of the full subsystem"""
dev = DefaultQubitTF(wires=['a', 'b', 'c'])
state = tf.constant([1, 0, 1, 0], dtype=tf.complex128) / np.sqrt(2.)
state_wires = qml.wires.Wires(['a', 'c'])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
res = tf.reshape(tf.reduce_sum(dev._state, axis=(1,)), [-1])
assert np.all(res == state)
spy.assert_called()
def test_invalid_qubit_state_vector_size(self):
"""Test that an exception is raised if the state
vector is the wrong size"""
dev = DefaultQubitTF(wires=2)
state = np.array([0, 1])
with pytest.raises(ValueError, match=r"State vector must be of length 2\*\*wires"):
dev.apply([qml.QubitStateVector(state, wires=[0, 1])])
def test_invalid_qubit_state_vector_norm(self):
"""Test that an exception is raised if the state
vector is not normalized"""
dev = DefaultQubitTF(wires=2)
state = np.array([0, 12])
with pytest.raises(ValueError, match=r"Sum of amplitudes-squared does not equal one"):
dev.apply([qml.QubitStateVector(state, wires=[0])])
def test_invalid_state_prep(self):
"""Test that an exception is raised if a state preparation is not the
first operation in the circuit."""
dev = DefaultQubitTF(wires=2)
state = np.array([0, 12])
with pytest.raises(
qml.DeviceError,
match=r"cannot be used after other Operations have already been applied",
):
dev.apply([qml.PauliZ(0), qml.QubitStateVector(state, wires=[0])])
@pytest.mark.parametrize("op,mat", single_qubit)
def test_single_qubit_no_parameters(self, init_state, op, mat, tol):
"""Test non-parametrized single qubit operations"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [op(wires=0)]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert isinstance(res, tf.Tensor)
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", single_qubit_param)
def test_single_qubit_parameters(self, init_state, op, func, theta, tol):
"""Test parametrized single qubit operations"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [op(theta, wires=0)]
dev.apply(queue)
res = dev.state
expected = func(theta) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_rotation(self, init_state, tol):
"""Test three axis rotation gate"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [qml.Rot(a, b, c, wires=0)]
dev.apply(queue)
res = dev.state
expected = Rot3(a, b, c) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_controlled_rotation(self, init_state, tol):
"""Test three axis controlled-rotation gate"""
dev = DefaultQubitTF(wires=2)
state = init_state(2)
a = 0.542
b = 1.3432
c = -0.654
queue = [qml.QubitStateVector(state, wires=[0, 1])]
queue += [qml.CRot(a, b, c, wires=[0, 1])]
dev.apply(queue)
res = dev.state
expected = CRot3(a, b, c) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_inverse_operation(self, init_state, tol):
"""Test that the inverse of an operation is correctly applied"""
"""Test three axis rotation gate"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [qml.Rot(a, b, c, wires=0).inv()]
dev.apply(queue)
res = dev.state
expected = np.linalg.inv(Rot3(a, b, c)) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("op,mat", two_qubit)
def test_two_qubit_no_parameters(self, init_state, op, mat, tol):
"""Test non-parametrized two qubit operations"""
dev = DefaultQubitTF(wires=2)
state = init_state(2)
queue = [qml.QubitStateVector(state, wires=[0, 1])]
queue += [op(wires=[0, 1])]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("mat", [U, U2])
def test_qubit_unitary(self, init_state, mat, tol):
"""Test application of arbitrary qubit unitaries"""
N = int(np.log2(len(mat)))
dev = DefaultQubitTF(wires=N)
state = init_state(N)
queue = [qml.QubitStateVector(state, wires=range(N))]
queue += [qml.QubitUnitary(mat, wires=range(N))]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("op, mat", three_qubit)
def test_three_qubit_no_parameters(self, init_state, op, mat, tol):
"""Test non-parametrized three qubit operations"""
dev = DefaultQubitTF(wires=3)
state = init_state(3)
queue = [qml.QubitStateVector(state, wires=[0, 1, 2])]
queue += [op(wires=[0, 1, 2])]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", two_qubit_param)
def test_two_qubit_parameters(self, init_state, op, func, theta, tol):
"""Test two qubit parametrized operations"""
dev = DefaultQubitTF(wires=2)
state = init_state(2)
queue = [qml.QubitStateVector(state, wires=[0, 1])]
queue += [op(theta, wires=[0, 1])]
dev.apply(queue)
res = dev.state
expected = func(theta) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_apply_ops_not_supported(self, mocker, monkeypatch):
"""Test that when a version of TensorFlow before 2.3.0 is used, the _apply_ops dictionary is
empty and application of a CNOT gate is performed using _apply_unitary_einsum"""
with monkeypatch.context() as m:
m.setattr("pennylane.devices.default_qubit_tf.SUPPORTS_APPLY_OPS", False)
dev = DefaultQubitTF(wires=3)
assert dev._apply_ops == {}
spy = mocker.spy(DefaultQubitTF, "_apply_unitary_einsum")
queue = [qml.CNOT(wires=[1, 2])]
dev.apply(queue)
spy.assert_called_once()
def test_apply_ops_above_8_wires(self, mocker):
"""Test that when 9 wires are used, the _apply_ops dictionary is empty and application of a
CNOT gate is performed using _apply_unitary_einsum"""
dev = DefaultQubitTF(wires=9)
assert dev._apply_ops == {}
spy = mocker.spy(DefaultQubitTF, "_apply_unitary_einsum")
queue = [qml.CNOT(wires=[1, 2])]
dev.apply(queue)
spy.assert_called_once()
@pytest.mark.xfail(
raises=tf.errors.UnimplementedError,
reason="Slicing is not supported for more than 8 wires",
strict=True,
)
def test_apply_ops_above_8_wires_using_special(self):
"""Test that special apply methods that involve slicing function correctly when using 9
wires"""
dev = DefaultQubitTF(wires=9)
dev._apply_ops = {"CNOT": dev._apply_cnot}
queue = [qml.CNOT(wires=[1, 2])]
dev.apply(queue)
THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)
@pytest.mark.parametrize("theta, phi, varphi", list(zip(THETA, PHI, VARPHI)))
class TestExpval:
"""Test expectation values"""
# test data; each tuple is of the form (GATE, OBSERVABLE, EXPECTED)
single_wire_expval_test_data = [
(qml.RX, qml.Identity, lambda t, p: np.array([1, 1])),
(qml.RX, qml.PauliZ, lambda t, p: np.array([np.cos(t), np.cos(t) * np.cos(p)])),
(qml.RY, qml.PauliX, lambda t, p: np.array([np.sin(t) * np.sin(p), np.sin(p)])),
(qml.RX, qml.PauliY, lambda t, p: np.array([0, -np.cos(t) * np.sin(p)])),
(
qml.RY,
qml.Hadamard,
lambda t, p: np.array(
[np.sin(t) * np.sin(p) + np.cos(t), np.cos(t) * np.cos(p) + np.sin(p)]
)
/ np.sqrt(2),
),
]
@pytest.mark.parametrize("gate,obs,expected", single_wire_expval_test_data)
def test_single_wire_expectation(self, gate, obs, expected, theta, phi, varphi, tol):
"""Test that identity expectation value (i.e. the trace) is 1"""
dev = DefaultQubitTF(wires=2)
queue = [gate(theta, wires=0), gate(phi, wires=1), qml.CNOT(wires=[0, 1])]
observables = [obs(wires=[i]) for i in range(2)]
for i in range(len(observables)):
observables[i].return_type = qml.operation.Expectation
res = dev.execute(qml.CircuitGraph(queue + observables, {}, Wires([0, 1, 2])))
assert np.allclose(res, expected(theta, phi), atol=tol, rtol=0)
def test_hermitian_expectation(self, theta, phi, varphi, tol):
"""Test that arbitrary Hermitian expectation values are correct"""
dev = DefaultQubitTF(wires=2)
queue = [qml.RY(theta, wires=0), qml.RY(phi, wires=1), qml.CNOT(wires=[0, 1])]
observables = [qml.Hermitian(A, wires=[i]) for i in range(2)]
for i in range(len(observables)):
observables[i].return_type = qml.operation.Expectation
res = dev.execute(qml.CircuitGraph(queue + observables, {}, Wires([0, 1])))
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2
expected = np.array([ev1, ev2])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_multi_mode_hermitian_expectation(self, theta, phi, varphi, tol):
"""Test that arbitrary multi-mode Hermitian expectation values are correct"""
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
dev = DefaultQubitTF(wires=2)
queue = [qml.RY(theta, wires=0), qml.RY(phi, wires=1), qml.CNOT(wires=[0, 1])]
observables = [qml.Hermitian(A, wires=[0, 1])]
for i in range(len(observables)):
observables[i].return_type = qml.operation.Expectation
res = dev.execute(qml.CircuitGraph(queue + observables, {}, Wires([0, 1])))
# below is the analytic expectation value for this circuit with arbitrary
# Hermitian observable A
expected = 0.5 * (
6 * np.cos(theta) * np.sin(phi)
- np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3)
- 2 * np.sin(phi)
- 6 * np.cos(phi)
- 6
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_paulix_pauliy(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
dev = qml.device("default.qubit.tf", wires=3)
dev.reset()
obs = qml.PauliX(0) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_identity(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliZ and Identity works correctly"""
dev = qml.device("default.qubit.tf", wires=3)
dev.reset()
obs = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = np.cos(varphi)*np.cos(phi)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_hadamard(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
dev = qml.device("default.qubit.tf", wires=3)
obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)
dev.reset()
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian(self, theta, phi, varphi, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
dev = qml.device("default.qubit.tf", wires=3)
dev.reset()
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_hermitian(self, theta, phi, varphi, tol):
"""Test that a tensor product involving two Hermitian matrices works correctly"""
dev = qml.device("default.qubit.tf", wires=3)
A1 = np.array([[1, 2],
[2, 4]])
A2 = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.Hermitian(A1, wires=[0]) @ qml.Hermitian(A2, wires=[1, 2])
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = 0.25 * (
-30
+ 4 * np.cos(phi) * np.sin(theta)
+ 3 * np.cos(varphi) * (-10 + 4 * np.cos(phi) * np.sin(theta) - 3 * np.sin(phi))
- 3 * np.sin(phi)
- 2 * (5 + np.cos(phi) * (6 + 4 * np.sin(theta)) + (-3 + 8 * np.sin(theta)) * np.sin(phi))
* np.sin(varphi)
+ np.cos(theta)
* (
18
+ 5 * np.sin(phi)
+ 3 * np.cos(varphi) * (6 + 5 * np.sin(phi))
+ 2 * (3 + 10 * np.cos(phi) - 5 * np.sin(phi)) * np.sin(varphi)
)
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_identity_expectation(self, theta, phi, varphi, tol):
"""Test that a tensor product involving an Hermitian matrix and the identity works correctly"""
dev = qml.device("default.qubit.tf", wires=2)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
obs = qml.Hermitian(A, wires=[0]) @ qml.Identity(wires=[1])
dev.apply(
[
qml.RY(theta, wires=[0]),
qml.RY(phi, wires=[1]),
qml.CNOT(wires=[0, 1])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
expected = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_two_wires_identity_expectation(self, theta, phi, varphi, tol):
"""Test that a tensor product involving an Hermitian matrix for two wires and the identity works correctly"""
dev = qml.device("default.qubit.tf", wires=3, analytic=True)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
Identity = np.array([[1, 0],[0, 1]])
H = np.kron(np.kron(Identity,Identity), A)
obs = qml.Hermitian(H, wires=[2, 1, 0])
dev.apply(
[
qml.RY(theta, wires=[0]),
qml.RY(phi, wires=[1]),
qml.CNOT(wires=[0, 1])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
expected = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * | np.sin(phi) | numpy.sin |
# Functions for converting ADCP velocities in beam coordinates to instrument- or Earth-coordinates.
# Direct translation of functions in the 'ADCPtools' MATLAB
# package (https://github.com/apaloczy/ADCPtools).
import numpy as np
from scipy.interpolate import interp1d
from .utils import sind, cosd, near, nearfl
######################
#### 4-beam Janus ####
######################
def janus2xyz(b1, b2, b3, b4, theta, r=None, ptch=None, roll=None, binmaptype=None, use3beamsol=True, verbose=True):
"""
USAGE
-----
vx, vy, vz = janus2xyz(b1, b2, b3, b4, theta, r=None, ptch=None, roll=None, binmaptype=None, use3beamsol=True, verbose=True)
theta, ptch, roll must be in RADIANS.
"""
Nz, Nt = b1.shape
if binmaptype is not None:
assert r is not None, "Must provide r if using bin-mapping."
assert ptch is not None, "Must provide pitch if using bin-mapping."
assert roll is not None, "Must provide roll if using bin-mapping."
if verbose:
print('Mapping bins to horizontal planes using *%s* interpolation.'%binmaptype)
b1, b2, b3, b4 = binmap(b1, b2, b3, b4, r, theta, ptch, roll, how=binmaptype)
else:
if verbose:
print('Bin-mapping NOT applied.')
if use3beamsol:
b1, b2, b3, b4 = janus3beamsol(b1, b2, b3, b4)
b1, b2 = b1[..., np.newaxis], b2[..., np.newaxis]
b3, b4 = b3[..., np.newaxis], b4[..., np.newaxis]
B = np.dstack((b1, b2, b3, b4))
uvfac = 1/(2*np.sin(theta))
wfac = 1/(4* | np.cos(theta) | numpy.cos |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.utils import macos_version
from coremltools.models.neural_network import flexible_shape_utils
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = | np.random.rand(3, 3, 10, 20) | numpy.random.rand |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
from fanalysis.pca import PCA
class TestPca(unittest.TestCase):
""" Unit tests for the PCA class
"""
def test_pca(self):
""" Test for the fit_transfom operation - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
self._fit_transform_comparison(std_unit = su, n_components = None)
for i in np.arange(-10, 10, 0.5):
self._fit_transform_comparison(std_unit = su, n_components = i)
def test_eigen_(self):
""" Test for the eigen values - Comparison with the R FactoMiner
output
"""
for su in [True, False]:
pca_eig = "pca_eig_scale_unit_true.txt" if su \
else "pca_eig_scale_unit_false.txt"
self._X_Y_comparison("eig_", pca_eig, std_unit = su,
n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("eig_", pca_eig, std_unit = su,
n_components = i)
def test_row_coord_(self):
""" Test for the rows coordinates - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
pca_row_coord = "pca_row_coord_scale_unit_true.txt" if su \
else "pca_row_coord_scale_unit_false.txt"
self._X_Y_comparison("row_coord_", pca_row_coord, std_unit = su,
n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("row_coord_", pca_row_coord,
std_unit = su, n_components = i)
def test_row_contrib_(self):
""" Test for the rows contributions - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
pca_row_contrib = "pca_row_contrib_scale_unit_true.txt" if su \
else "pca_row_contrib_scale_unit_false.txt"
self._X_Y_comparison("row_contrib_", pca_row_contrib,
std_unit = su, n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("row_contrib_", pca_row_contrib,
std_unit = su, n_components = i)
def test_row_cos2_(self):
""" Test for the rows cos2 - Comparison with the R FactoMiner
output
"""
for su in [True, False]:
pca_row_cos2 = "pca_row_cos2_scale_unit_true.txt" if su \
else "pca_row_cos2_scale_unit_false.txt"
self._X_Y_comparison("row_cos2_", pca_row_cos2, std_unit = su,
n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("row_cos2_", pca_row_cos2, std_unit = su,
n_components = i)
def test_col_coord_(self):
""" Test for the columns coordinates - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
pca_col_coord = "pca_col_coord_scale_unit_true.txt" if su \
else "pca_col_coord_scale_unit_false.txt"
self._X_Y_comparison("col_coord_", pca_col_coord, std_unit = su,
n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("col_coord_", pca_col_coord,
std_unit = su, n_components = i)
def test_col_contrib_(self):
""" Test for the columns contributions - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
pca_col_contrib = "pca_col_contrib_scale_unit_true.txt" if su \
else "pca_col_contrib_scale_unit_false.txt"
self._X_Y_comparison("col_contrib_", pca_col_contrib,
std_unit = su, n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("col_contrib_", pca_col_contrib,
std_unit = su, n_components = i)
def test_col_cos2_(self):
""" Test for the columns cos2 - Comparison with the R FactoMiner
output
"""
for su in [True, False]:
pca_col_cos2 = "pca_col_cos2_scale_unit_true.txt" if su \
else "pca_col_cos2_scale_unit_false.txt"
self._X_Y_comparison("col_cos2_", pca_col_cos2, std_unit = su,
n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("col_cos2_", pca_col_cos2, std_unit = su,
n_components = i)
def test_col_cor_(self):
""" Test for the columns correlations - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
pca_col_cor = "pca_col_cor_scale_unit_true.txt" if su \
else "pca_col_cor_scale_unit_false.txt"
self._X_Y_comparison("col_cor_", pca_col_cor, std_unit = su,
n_components = None)
for i in np.arange(-10, 10, 0.5):
self._X_Y_comparison("col_cor_", pca_col_cor, std_unit = su,
n_components = i)
def test_row_topandas(self):
""" Test for the row_topandas method - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
self._row_topandas_comparison(std_unit = su, n_components = None,
row_labels = False)
self._row_topandas_comparison(std_unit = su, n_components = None,
row_labels = True)
for i in np.arange(-10, 10, 0.5):
self._row_topandas_comparison(std_unit = su, n_components = i,
row_labels = False)
self._row_topandas_comparison(std_unit = su, n_components = i,
row_labels = True)
def test_col_topandas(self):
""" Test for the col_topandas method - Comparison with the
R FactoMiner output
"""
for su in [True, False]:
self._col_topandas_comparison(std_unit = su, n_components = None,
col_labels = False)
self._col_topandas_comparison(std_unit = su, n_components = None,
col_labels = True)
for i in np.arange(-10, 10, 0.5):
self._col_topandas_comparison(std_unit = su, n_components = i,
col_labels = False)
self._col_topandas_comparison(std_unit = su, n_components = i,
col_labels = True)
def _fit(self, std_unit, n_components):
""" This function fits the model to the data """
df = pd.read_table("fanalysis/tests/pca_data.txt", header=0,
index_col=0, delimiter="\t")
M = df.as_matrix()
pca = PCA(std_unit = std_unit, n_components = n_components)
pca.fit(M)
return pca
def _adjust_n_components(self, n_components, eigen_values):
""" This function sets relevant values for n_components """
if (n_components is None):
n_components = eigen_values.shape[1]
elif (n_components >= 0) and (n_components < 1):
i = 0
threshold = 100 * n_components
while eigen_values[2, i] < threshold:
i = i + 1
n_components = i
elif ((n_components >= 1)
and (n_components <= eigen_values.shape[1])
and (isinstance(n_components, int))):
n_components = int(np.trunc(n_components))
elif ((n_components >= 1)
and (n_components <= eigen_values.shape[1])
and (isinstance(n_components, float))):
n_components = int(np.floor(n_components))
else:
n_components = eigen_values.shape[1]
return n_components
def _compute_Y(self, X, Y_temp, attr):
""" This function sets the signs of the coordinates to those of
the R FactoMineR output
"""
if ((attr == "row_coord_")
or (attr == "col_coord_")
or (attr == "col_cor_")):
x = X[1, :]
y = Y_temp[1, :]
z = x * y
for i in np.arange(0, z.shape[0]):
z[i] = 1 if z[i] >=0 else -1
return Y_temp * z.reshape(1, -1)
else:
return Y_temp
def _fit_transform_comparison(self, std_unit, n_components=None):
""" This function compares the result of the fit_transform
operation with the R FactoMineR output
"""
if n_components is None:
pca1 = PCA(std_unit = std_unit)
pca2 = PCA(std_unit = std_unit)
else:
pca1 = PCA(std_unit = std_unit, n_components = n_components)
pca2 = PCA(std_unit = std_unit, n_components = n_components)
if std_unit:
eigen_values = np.loadtxt("fanalysis/tests/" +
"pca_eig_scale_unit_true.txt",
delimiter=" ", dtype=float)
n_components = self._adjust_n_components(n_components,
eigen_values)
X = np.loadtxt("fanalysis/tests/" +
"pca_row_coord_scale_unit_true.txt",
delimiter=" ", dtype=float)[:, :n_components]
else:
eigen_values = np.loadtxt("fanalysis/tests/" +
"pca_eig_scale_unit_false.txt",
delimiter=" ", dtype=float)
n_components = self._adjust_n_components(n_components,
eigen_values)
X = np.loadtxt("fanalysis/tests/" +
"pca_row_coord_scale_unit_false.txt",
delimiter=" ", dtype=float)[:, :n_components]
df = pd.read_table("fanalysis/tests/pca_data.txt", header=0,
index_col=0, delimiter="\t")
M = df.as_matrix()
pca1.fit(M)
Y_temp_1 = pca1.transform(M)
Y1 = self._compute_Y(X, Y_temp_1, "row_coord_")
assert_array_almost_equal(X, Y1)
Y_temp_2 = pca2.fit_transform(M)
Y2 = self._compute_Y(X, Y_temp_2, "row_coord_")
assert_array_almost_equal(X, Y2)
def _X_Y_comparison(self, attr, test_file, std_unit, n_components=None):
""" This function compares the fitted values with the
R FactoMiner output
"""
pca = self._fit(std_unit, n_components)
if std_unit:
eigen_values = np.loadtxt("fanalysis/tests/" +
"pca_eig_scale_unit_true.txt",
delimiter=" ", dtype=float)
#test_file += "scale_unit_true.txt"
else:
eigen_values = np.loadtxt("fanalysis/tests/" +
"pca_eig_scale_unit_false.txt",
delimiter=" ", dtype=float)
#test_file += "scale_unit_false.txt"
n_components = self._adjust_n_components(n_components, eigen_values)
X = np.loadtxt("fanalysis/tests/" + test_file, delimiter=" ",
dtype=float)[:, :n_components]
Y_temp = getattr(pca, attr)
Y = self._compute_Y(X, Y_temp, attr)
| assert_array_almost_equal(X, Y) | numpy.testing.assert_array_almost_equal |
#!/usr/bin/env python
"""
Description: Geometric median function implementation
Date Created: 15-MAY-2020
Last Modified: 17-MAY-2020
"""
from math import log
import numpy as np
from scipy.spatial.distance import cdist
from scipy.stats import multivariate_normal
from scipy.optimize import minimize
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
from warnings import warn
__author__ = "<NAME>"
def geometric_median(points: np.array, weights: np.array = None, convergence_method: str = 'vardi-zhang', convergence_threshold: float = 1e-5, iteration_limit: int = 1000, dist_measure: str = 'euclidean', scale_method=None, solver_method: str = None) -> np.array:
# Raise errors for improper inputs
# points errors
if type(points) is not np.ndarray:
raise TypeError(f"Type of points must be a numpy array; current type is {type(points)}")
if (np.issubdtype(points.dtype, np.integer) or np.issubdtype(points.dtype, np.floating)) is False:
raise TypeError(f"Datatype of points must be int or float; current datatype is {points.dtype}")
if len(points.shape) != 2:
raise ValueError(f"points must be a 2D array; currently shape is {points.shape}")
npoint, ndim = points.shape
if ndim < 1:
raise ValueError(f"value of ndim must be >= 1; currently value is {ndim}")
# weights errors
if weights is None:
weights = np.ones(npoint)
else:
if type(weights) is not np.ndarray:
raise TypeError(f"Type of weights must be a numpy array; current type is {type(points)}")
if (np.issubdtype(weights.dtype, np.integer) or np.issubdtype(weights.dtype, np.floating)) is False:
raise TypeError(f"Datatype of points must be real numbers; current datatype is {points.dtype}")
if len(weights.shape) != 1:
raise ValueError(f"weights must be a 1D array; currently shape is {weights.shape}")
if weights.size != npoint:
raise ValueError(f"There must be the same number of weights as points; currently {weights.size} weights and {npoint} points")
if np.isclose(weights, 0.0).any():
raise ValueError("weights cannot contain any values == 0; recommend removing these points before calculation")
if weights.sum() < 0:
raise ValueError("the sum of the weights cannot be > 0")
# method errors
valid_convergence_methods = {'minimize': minimize_algorithm, 'weiszfeld': weiszfeld_algorithm, 'vardi-zhang': vardi_zhang_algorithm, 'cohen-lee': cohen_lee_algorithm}
if convergence_method not in valid_convergence_methods:
raise ValueError(f"Invalid convergence method given: {convergence_method} not in {set(valid_convergence_methods.keys())}")
# convergence_threshold errors
if type(convergence_threshold) not in (int, float):
raise TypeError(f"Type of convergence_threshold must be int or float; currently of type {type(convergence_threshold)}")
if convergence_threshold <= 0:
raise ValueError(f"Value of convergence_threshold must be > 0; current value is {convergence_threshold}")
# iteration_limit errors
if type(iteration_limit) is not int:
raise TypeError(f"Type of iteration_limit must be int; current type is {type(iteration_limit)}")
if iteration_limit <= 0:
raise ValueError(f"Value of iteration_limit must be > 0; current value is {iteration_limit}")
# scalar_method errors
if scale_method is not None:
valid_scale_methods = {'min-max': MinMaxScaler, 'max-abs': MaxAbsScaler, 'standard': None}
if scale_method not in valid_scale_methods:
raise ValueError(f"Invalid scaling method given: {scale_method} not in {set(valid_scale_methods.keys())}")
if scale_method == 'standard':
if (weights < 0).any():
raise ValueError("weights cannot contain any values < 0 when using standard scaling")
points_mean = np.average(points, axis=0, weights=weights)
points_var = np.average((points - points_mean[: None]) ** 2, axis=0, weights=weights)
points_std = np.sqrt(points_var)
points = (points - points_mean) / points_std
else:
scaler = valid_scale_methods[scale_method]()
points = scaler.fit_transform(points)
# distance_measure errors
# Caught by scipy.spatial.distance.cdist
# solver_method errors
# Caught by scipy.optimize.minimize
elif convergence_method == 'minimize':
if solver_method == None:
solver_method = 'Nelder-Mead'
result = minimize_algorithm(points, weights, convergence_threshold, iteration_limit, dist_measure, solver_method)
if convergence_method == 'weiszfeld':
result = weiszfeld_algorithm(points, weights, convergence_threshold, iteration_limit, dist_measure)
elif convergence_method == 'vardi-zhang':
result = vardi_zhang_algorithm(points, weights, convergence_threshold, iteration_limit, dist_measure)
# Note: Cohen-lee only works for euclidian distance
# TODO: Find out which methods work for non-euclidean distance measures
# TODO: Consider converting the whole thing to just euclidian
elif convergence_method == 'cohen-lee':
result = cohen_lee_algorithm(points, weights, convergence_threshold, iteration_limit, dist_measure)
if scale_method is not None:
if scale_method == 'standard':
result = (result * points_std) + points_mean
else:
result = scaler.inverse_transform(result.reshape(1, -1))[0]
return result
def minimize_algorithm(points: np.ndarray, weights: np.array, convergence_threshold: float, iteration_limit: int, dist_measure: str, solver_method: str) -> np.array:
def calc_weighted_distance_score(curr_center):
return (weights * cdist(np.array([curr_center]), points, metric=dist_measure)).sum()
# Find the weighted centroid and set as the initial center
curr_center = (weights[:, None] * points).sum(axis=0) / weights.sum()
optimize_result = minimize(calc_weighted_distance_score, curr_center, method=solver_method, tol=convergence_threshold, options={'maxiter': iteration_limit})
if optimize_result.success:
return optimize_result.x
else:
raise ValueError(optimize_result.message)
def weiszfeld_algorithm(points: np.ndarray, weights: np.array, convergence_threshold: float, iteration_limit: int, dist_measure: str) -> np.array:
# Find the weighted centroid and set as the initial center
prev_center = (weights[:, None] * points).sum(axis=0) / weights.sum()
for _ in range(iteration_limit):
# Check if the current center is same as a point; weiszfeld will stop converging
# This is a flaw in the algorithm adressed in vardi-zhang
if np.isclose(points, prev_center).all(axis=1).any():
return prev_center
# Calculate the weighted distances from the current center to all points
weighted_distances = cdist(np.array([prev_center]), points, metric=dist_measure)[0] / weights
# Get new center prediction
curr_center = (points / weighted_distances[:, None]).sum(axis=0) / (1.0 / weighted_distances).sum()
# Calculate the distance between the current center and the previous center
move_dist = cdist(np.array([curr_center]), np.array([prev_center]), metric=dist_measure)[0, 0]
if move_dist < convergence_threshold:
return curr_center
prev_center = curr_center
raise ValueError(f"Weiszfelds algorithm not able to converge within {iteration_limit} iterations")
def vardi_zhang_algorithm(points: np.ndarray, weights: np.array, convergence_threshold: float, iteration_limit: int, dist_measure: str) -> np.array:
"""
Adaption of the weiszfeld algorithm that deals with the current center getting 'stuck' on points in the dataset
file:///C:/Users/mattw/Downloads/SSRN-id1690502.pdf
:param points:
:param weights:
:param convergence_threshold:
:param iteration_limit:
:param dist_measure:
:return:
"""
# Find the weighted centroid and set as the initial center
prev_center = (weights[:, None] * points).sum(axis=0) / weights.sum()
for _ in range(iteration_limit):
# Calculate the weighted distances from the current center to all points
weighted_distances = cdist(np.array([prev_center]), points, metric=dist_measure)[0] / weights
# Narrow to only the non-zero weighted distances
non_zero = ~np.isclose(weighted_distances, 0.0)
weighted_distances = weighted_distances[non_zero]
# Implement the process detailed in "A comparison of algorithms for multivariate L1-median"
T = (points[non_zero] / weighted_distances[:, None]).sum(axis=0) / (1.0 / weighted_distances[:, None]).sum()
eta = 1 if np.isclose(points, prev_center).all(axis=1).any() else 0
R = ((points[non_zero] - prev_center) / weighted_distances[:, None]).sum(axis=0)
diff_dist = cdist(np.array([np.zeros_like(R)]), np.array([R]), metric=dist_measure)[0, 0]
if np.isclose(diff_dist, 0.0):
gamma = 1
else:
gamma = min(1, eta / diff_dist)
curr_center = (1 - gamma) * T + gamma * prev_center
# Calculate the distance between the current center and the previous center
move_dist = cdist(np.array([curr_center]), np.array([prev_center]), metric=dist_measure)[0][0]
if move_dist < convergence_threshold:
return curr_center
prev_center = curr_center
raise ValueError(f"Vardi-Zhang algorithm not able to converge within {iteration_limit} iterations")
def cohen_lee_algorithm(points: np.ndarray, weights: np.ndarray, convergence_threshold: float, iteration_limit: int, dist_measure: str) -> np.array:
return accurate_median(points, convergence_threshold)
def accurate_median(points: np.ndarray, convergence_threshold: float):
npoint, ndim = points.shape
x0 = points.mean(axis=0)
f_star_hat = cdist(np.array([x0]), points).sum()
t_i = calc_t_i(1, f_star_hat)
conv_thresh_c = 1 / (10 ** 15 * npoint ** 3 * t_i ** 9 * f_star_hat ** 3)
x_i = line_search(x0, t_i, t_i, 0, conv_thresh_c)
for i in range(1, 1000 * log(3000 * npoint / convergence_threshold)):
conv_thresh_v = 1 / (10 ** 8 * npoint ** 2 * t_i ** 2 * f_star_hat ** 2)
lambda_i, u_i = approx_min_eig(x_i, t_i, conv_thresh_v)
conv_thresh_c = 1 / (10 ** 15 * npoint ** 3 * t_i ** 3 * f_star_hat ** 3)
t_i1 = calc_t_i(i + 1, f_star_hat)
x_i = line_search(x_i, t_i, t_i1, u_i, conv_thresh_c)
t_i = t_i1
return x_i
def calc_t_i(i, f_star_hat):
return (1 + 1/600) ** (i - 1) / (400 * f_star_hat)
def approx_min_eig(point: np.ndarray, points: np.ndarray, t: float, convergence_threshold: float):
for i, curr_point in enumerate(points):
t ** 4 * (point - curr_point) * (point - curr_point).T
A = t ** 4 * (-points + point) * (-points + point).T / ((1 + g_t))
pass
def power_method(A, k):
g = A.shape[0]
x = multivariate_normal( | np.array([0, 1]) | numpy.array |
# Objective: learn a Word2Vec model, then build a sentence embedding based on a weighted average of word embeddings.
# References:
# [1] <NAME>, <NAME>, <NAME>, "A Simple but Tough-to-Beat Baseline for Sentence Embeddings", 2016.
# [2] <NAME>, <NAME>, All-but-the-Top: Simple and Effective Postprocessing for Word Representations, 2018.
import logging
import math
import multiprocessing
import random
import numpy as np
import spacy
from gensim.corpora import Dictionary
from gensim.models import Word2Vec
from SIF_embedding import remove_pc
from benchmark_utils import load_benchmarked_app_ids, print_ranking
from hard_coded_ground_truth import compute_retrieval_score, plot_retrieval_scores
from sentence_models import filter_out_words_not_in_vocabulary
from steam_spy_based_ground_truth import compute_retrieval_score_based_on_sharing_genres
from steam_spy_based_ground_truth import compute_retrieval_score_based_on_sharing_tags
from universal_sentence_encoder import perform_knn_search_with_app_ids_as_input
from utils import load_tokens, load_game_names
def retrieve_similar_store_descriptions(compute_from_scratch=True,
use_unit_vectors=False,
alpha=1e-3, # in SIF weighting scheme, parameter in the range [3e-5, 3e-3]
num_removed_components_for_sentence_vectors=0, # in SIF weighting scheme
pre_process_word_vectors=False,
num_removed_components_for_word_vectors=0,
count_words_out_of_vocabulary=True,
use_idf_weights=True,
shuffle_corpus=True,
use_glove_with_spacy=True,
use_cosine_similarity=True,
num_neighbors=10,
no_below=5, # only relevant with Word2Vec
no_above=0.5, # only relevant with Word2Vec
only_print_banners=True):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
game_names, _ = load_game_names(include_genres=False, include_categories=False)
steam_tokens = load_tokens()
documents = list(steam_tokens.values())
if shuffle_corpus:
# Useful for Doc2Vec in 'doc2vec_model.py'. It might be useful for other methods.
random.shuffle(documents)
if compute_from_scratch:
if not use_glove_with_spacy:
# Use self-trained Word2Vec vectors
dct = Dictionary(documents)
print('Dictionary size (before trimming): {}'.format(len(dct)))
dct.filter_extremes(no_below=no_below, no_above=no_above)
print('Dictionary size (after trimming): {}'.format(len(dct)))
model = Word2Vec(documents, workers=multiprocessing.cpu_count())
wv = model.wv
else:
# Use pre-trained GloVe vectors loaded from spaCy
# Reference: https://spacy.io/models/en#en_vectors_web_lg
spacy_model_name = 'en_vectors_web_lg' # either 'en_core_web_lg' or 'en_vectors_web_lg'
nlp = spacy.load(spacy_model_name)
wv = nlp.vocab
if pre_process_word_vectors:
# <NAME>, <NAME>, All-but-the-Top: Simple and Effective Postprocessing for Word Representations,
# in: ICLR 2018 conference.
# Reference: https://openreview.net/forum?id=HkuGJ3kCb
if use_glove_with_spacy:
wv.vectors.data -= np.array(wv.vectors.data).mean(axis=0)
if num_removed_components_for_word_vectors > 0:
wv.vectors.data = remove_pc(wv.vectors.data, npc=num_removed_components_for_word_vectors)
else:
wv.vectors -= np.array(wv.vectors).mean(axis=0)
if num_removed_components_for_word_vectors > 0:
wv.vectors = remove_pc(wv.vectors, npc=num_removed_components_for_word_vectors)
wv.init_sims()
if use_unit_vectors and not use_glove_with_spacy:
# Pre-computations of unit word vectors, which replace the unnormalized word vectors. A priori not required
# here, because another part of the code takes care of it. A fortiori not required when using spaCy.
wv.init_sims(replace=True) # TODO IMPORTANT choose whether to normalize vectors
if not use_glove_with_spacy:
index2word_set = set(wv.index2word)
else:
index2word_set = None
num_games = len(steam_tokens)
word_counter = {}
document_per_word_counter = {}
counter = 0
for app_id in steam_tokens:
counter += 1
if (counter % 1000) == 0:
print('[{}/{}] appID = {} ({})'.format(counter, num_games, app_id, game_names[app_id]))
reference_sentence = steam_tokens[app_id]
if not count_words_out_of_vocabulary:
# This has an impact on the value of 'total_counter'.
reference_sentence = filter_out_words_not_in_vocabulary(reference_sentence, index2word_set, wv)
for word in reference_sentence:
try:
word_counter[word] += 1
except KeyError:
word_counter[word] = 1
for word in set(reference_sentence):
try:
document_per_word_counter[word] += 1
except KeyError:
document_per_word_counter[word] = 1
total_counter = sum(word_counter.values())
# Inverse Document Frequency (IDF)
idf = {}
for word in document_per_word_counter:
idf[word] = math.log((1 + num_games) / (1 + document_per_word_counter[word]))
# Word frequency. Caveat: over the whole corpus!
word_frequency = dict()
for word in word_counter:
word_frequency[word] = word_counter[word] / total_counter
sentence_vector = {}
if not use_glove_with_spacy:
word_vector_length = wv.vector_size
else:
word_vector_length = wv.vectors_length
X = np.zeros([num_games, word_vector_length])
counter = 0
for (i, app_id) in enumerate(steam_tokens.keys()):
counter += 1
if (counter % 1000) == 0:
print('[{}/{}] appID = {} ({})'.format(counter, num_games, app_id, game_names[app_id]))
reference_sentence = steam_tokens[app_id]
num_words_in_reference_sentence = len(reference_sentence)
reference_sentence = filter_out_words_not_in_vocabulary(reference_sentence, index2word_set, wv)
if not count_words_out_of_vocabulary:
# NB: Out-of-vocabulary words are not counted in https://stackoverflow.com/a/35092200
num_words_in_reference_sentence = len(reference_sentence)
weighted_vector = np.zeros(word_vector_length)
for word in reference_sentence:
if use_idf_weights:
weight = idf[word]
else:
weight = (alpha / (alpha + word_frequency[word]))
# TODO IMPORTANT Why use the normalized word vectors instead of the raw word vectors?
if not use_glove_with_spacy:
if use_unit_vectors:
# Reference: https://github.com/RaRe-Technologies/movie-plots-by-genre
word_vector = wv.vectors_norm[wv.vocab[word].index]
else:
word_vector = wv.vectors[wv.vocab[word].index]
else:
word_vector = wv.get_vector(word)
if use_unit_vectors:
word_vector_norm = wv[word].vector_norm
if word_vector_norm > 0:
word_vector = word_vector / word_vector_norm
weighted_vector += weight * word_vector
if len(reference_sentence) > 0:
sentence_vector[app_id] = weighted_vector / num_words_in_reference_sentence
else:
sentence_vector[app_id] = weighted_vector
X[i, :] = sentence_vector[app_id]
# Reference: https://stackoverflow.com/a/11620982
X = np.where( | np.isfinite(X) | numpy.isfinite |
import numpy as np
import itertools
from enterprise.signals import signal_base
from enterprise.signals import parameter
from enterprise.signals import utils
from scipy.stats import cosine
from scipy.stats import uniform
from astropy import units as u
from astropy.coordinates import SkyCoord
def BasisCommonGP(priorFunction, basisFunction, orfFunction, coefficients=False, combine=True, name=""):
class BasisCommonGP(signal_base.CommonSignal):
signal_type = "common basis"
signal_name = "common"
signal_id = name
basis_combine = combine
_orf = orfFunction(name)
_prior = priorFunction(name)
def __init__(self, psr):
super(BasisCommonGP, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
pname = "_".join([psr.name, name])
self._bases = basisFunction(pname, psr=psr)
self._params, self._coefficients = {}, {}
for par in itertools.chain(
self._prior._params.values(), self._orf._params.values(), self._bases._params.values()
):
self._params[par.name] = par
rand_state = np.int(np.abs(np.sum(psr.pos))*10000)
ra = uniform.rvs(loc=0, scale=2*np.pi, random_state=rand_state)
dec = cosine.rvs(loc=0, scale=0.5, random_state=rand_state+11)
newpos = np.array(SkyCoord(ra=ra*u.rad, \
dec=dec*u.rad).cartesian.xyz)
self._psrpos = psr.pos
self._psrpos_scrambled = newpos
if coefficients:
self._construct_basis()
# if we're given an instantiated coefficient vector
# that's what we will use
if isinstance(coefficients, parameter.Parameter):
self._coefficients[""] = coefficients
self._params[coefficients.name] = coefficients
return
chain = itertools.chain(
self._prior._params.values(), self._orf._params.values(), self._bases._params.values()
)
priorargs = {par.name: self._params[par.name] for par in chain}
logprior = parameter.Function(self._get_coefficient_logprior, **priorargs)
size = self._basis.shape[1]
cpar = parameter.GPCoefficients(logprior=logprior, size=size)(pname + "_coefficients")
self._coefficients[""] = cpar
self._params[cpar.name] = cpar
@property
def basis_params(self):
"""Get any varying basis parameters."""
return [pp.name for pp in self._bases.params]
@signal_base.cache_call("basis_params")
def _construct_basis(self, params={}):
self._basis, self._labels = self._bases(params=params)
if coefficients:
def _get_coefficient_logprior(self, c, **params):
# MV: for correlated GPs, the prior needs to use
# the coefficients for all GPs together;
# this may require parameter groups
raise NotImplementedError("Need to implement common prior " + "for BasisCommonGP coefficients")
@property
def delay_params(self):
return [pp.name for pp in self.params if "_coefficients" in pp.name]
@signal_base.cache_call(["basis_params", "delay_params"])
def get_delay(self, params={}):
self._construct_basis(params)
p = self._coefficients[""]
c = params[p.name] if p.name in params else p.value
return | np.dot(self._basis, c) | numpy.dot |
import numpy as np
from math import factorial, sqrt, cos, sin
fact = lambda x: factorial(int(x))
def choose(n, k):
return fact(n)/fact(k)/fact(n-k)
def dmat_entry(j,m_,m,beta):
#real valued. implemented according to wikipedia
partA = sqrt(fact(j+m_)*fact(j-m_)*fact(j+m)*fact(j-m))
partB = 0.
for s in range(max(int(m-m_),0),min(int(j+m),int(j-m_))+1):
temp = (-1.)**s / (fact(j+m-s)*fact(s)*fact(m_-m+s)*fact(j-m_-s))
partB += temp * cos(beta/2)**(2*j+m-m_-2*s) * (sin(beta/2))**(m_-m+2*s)
return partA * partB
def dm(theta,l):
ret=np.zeros((2*l+1,2*l+1))
for m in range(-l,l+1):
for n in range(-l,l+1):
ret[m+l,n+l]=dmat_entry(l,m,n,theta)
return ret
def Dmat_entry(l,m,n,alpha,beta,gamma):
return np.exp(-1j*m*alpha) * dmat_entry(l,m,n,beta) * np.exp(-1j*n*gamma)
def Dm(angles, l=1):
ret = np.zeros((2*l+1,2*l+1), dtype=np.complex)
for m in range(-l,l+1):
for n in range(-l,l+1):
ret[m+l,n+l] = Dmat_entry(l,m,n,angles[0],angles[1],angles[2])
#print(ret[m+l,n+l])
return ret
def _Dm_hardcode(angles, l=1):
alpha, beta, gamma = angles
sin = np.sin
cos = np.cos
sqrt = np.sqrt
exp = np.exp
i = 1j
if l == 0:
return np.ones((1,1),dtype=np.complex)
if l == 1:
D = | np.zeros((3,3),dtype=np.complex) | numpy.zeros |
#!/usr/bin/env python3
# This script loads in GWTC-1 samples from LALInference and bilby
# Produces a pp-plot for all parameters
# Evaluates the JS-diverence for each parameters and it's uncertainty (via bootstrapping)
import pandas as pd
import json
from tqdm import tqdm
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binom
from scipy.spatial.distance import jensenshannon
from bilby.gw.conversion import generate_tidal_parameters
np.random.seed(seed=150914)
plt.rcParams.update(
{'axes.labelsize': 30,
'font.size': 30,
'legend.fontsize': 30,
'xtick.labelsize': 30,
'ytick.labelsize': 30,
'axes.titlesize' : 30,
'text.usetex': True,
'font.family': "serif",
'font.serif': "Computer Modern Roman",
# 'font.weight':'heavy',
'savefig.dpi': 500
})
fontparams = {'mathtext.fontset': 'stix',
'font.family': 'serif',
'font.serif': "Computer Modern Roman"}
from scipy.stats import gaussian_kde
from pesummary.core.plots.bounded_1d_kde import Bounded_1d_kde
from pesummary.gw.plots.bounds import default_bounds
from collections import namedtuple
bilby_to_latex = dict(
a_1="$a_1$",
a_2="$a_2$",
luminosity_distance="$d_L$", #" [Mpc]",
ra="$\\alpha$",
dec="$\\delta$",
theta_jn="$\\theta_{\\rm JN}$",
tilt_1="$\\theta_1$",
tilt_2="$\\theta_2$",
chirp_mass="$\mathcal{M}$", #" [M$_\odot$]",
mass_ratio="$q$",
mass_1="$m_1$", # [M$_\odot$]",
mass_2="$m_2$", # [M$_\odot$]"
delta_lambda="$\\Delta\\Lambda$",
delta_lambda_tilde="$\\Delta\\tilde{\\Lambda}$",
lambda_tilde="$\\hat{\\Lambda}$"
)
########### Defining functions needed ##############
def load_event_data(event, data_path="/home/isobel.romero-shaw/bilby-gwtc-1-analysis-and-verification/"):
"""
Returns lalinference and bilby posterior samples
for a given event (passes as a string)
"""
bilby_data = "../gwtc-1_analysis_results/downsampled_posterior_samples/"+ event + "_downsampled_posterior_samples.dat"
lalinference_data = data_path + "compare_results_to_lalinference/" + event + "/rejection_lalinference_posterior_samples.dat"
lalinference_posterior_samples = pd.read_csv(lalinference_data, delimiter=' ')
bilby_posterior_samples = pd.read_csv(bilby_data, delimiter=' ')
try:
bilby_posterior_samples['mass_1']
for key in bilby_posterior_samples:
if 'lambda' in key:
print(key)
except KeyError:
# allows reading file if using samples downloaded from PESummary
bilby_posterior_samples = pd.read_csv(bilby_data, delimiter='\t')
if 'lambda_1' in lalinference_posterior_samples.keys():
lalinference_posterior_samples = generate_tidal_parameters(lalinference_posterior_samples)
bilby_posterior_samples = generate_tidal_parameters(bilby_posterior_samples)
return bilby_posterior_samples, lalinference_posterior_samples
def js_bootstrap(key, set_1, set_2, nsamples, ntests):
'''
key: string posterior parameter
set_1: first full posterior samples set
set_2: second full posterior samples set
nsamples: number for downsampling full sample set
ntests: number of iterations over different nsamples realisations
returns: 1 dim array (ntests)
'''
js_array = np.zeros(ntests)
for j in tqdm(range(ntests)):
nsamples = min([nsamples, len(set_1[key]), len(set_2[key])])
lp = np.random.choice(set_1[key], size=nsamples, replace=False)
bp = np.random.choice(set_2[key], size=nsamples, replace=False)
x = np.atleast_2d(np.linspace(np.min([np.min(bp), np.min(lp)]),np.max([np.max(bp), np.max(lp)]),100)).T
xlow = np.min(x)
xhigh = np.max(x)
if key in default_bounds.keys():
bounds = default_bounds[key]
if "low" in bounds.keys():
xlow = bounds["low"]
if "high" in bounds.keys():
if isinstance(bounds["high"], str) and "mass_1" in bounds["high"]:
xhigh = np.max(x)
else:
xhigh = bounds["high"]
set_1_pdf = Bounded_1d_kde(bp, xlow=xlow, xhigh=xhigh)(x)
set_2_pdf = Bounded_1d_kde(lp, xlow=xlow, xhigh=xhigh)(x)
js_array[j] = np.nan_to_num(np.power(jensenshannon(set_1_pdf, set_2_pdf), 2))
return js_array
def calc_median_error(jsvalues, quantiles=(0.16, 0.84)):
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(jsvalues, quants_to_compute * 100)
summary = namedtuple('summary', ['median', 'lower', 'upper'])
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
return summary
def bin_series_and_calc_cdf(x, y, bins = 100):
"""
Bin two unequal lenght series into equal bins
and calculate their cumulative distibution function
in order to generate pp-plots
"""
boundaries = sorted(x)[::round(len(x)/bins)+1]
labels = [(boundaries[i]+boundaries[i+1])/2 for i in range(len(boundaries)-1)]
# Bin two series into equal bins
try:
xb = pd.cut(x, bins=boundaries, labels=labels)
yb = pd.cut(y, bins=boundaries, labels=labels)
# Get value counts for each bin and sort by bin
xhist = xb.value_counts().sort_index(ascending=True)/len(xb)
yhist = yb.value_counts().sort_index(ascending=True)/len(yb)
# Make cumulative
for ser in [xhist, yhist]:
ttl = 0
for idx, val in ser.iteritems():
ttl += val
ser.loc[idx] = ttl
except ValueError:
xhist = np.linspace(0, 1, 1000)
yhist = | np.linspace(0, 1, 1000) | numpy.linspace |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the available built-in noisy
quantum channels supported by PennyLane, as well as their conventions.
"""
import warnings
import numpy as np
from pennylane.operation import AnyWires, Channel
class AmplitudeDamping(Channel):
r"""AmplitudeDamping(gamma, wires)
Single-qubit amplitude damping error channel.
Interaction with the environment can lead to changes in the state populations of a qubit.
This is the phenomenon behind scattering, dissipation, attenuation, and spontaneous emission.
It can be modelled by the amplitude damping channel, with the following Kraus matrices:
.. math::
K_0 = \begin{bmatrix}
1 & 0 \\
0 & \sqrt{1-\gamma}
\end{bmatrix}
.. math::
K_1 = \begin{bmatrix}
0 & \sqrt{\gamma} \\
0 & 0
\end{bmatrix}
where :math:`\gamma \in [0, 1]` is the amplitude damping probability.
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
gamma (float): amplitude damping probability
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
gamma = params[0]
if not 0.0 <= gamma <= 1.0:
raise ValueError("gamma must be between [0,1].")
K0 = np.diag([1, np.sqrt(1 - gamma)])
K1 = np.sqrt(gamma) * np.array([[0, 1], [0, 0]])
return [K0, K1]
class GeneralizedAmplitudeDamping(Channel):
r"""GeneralizedAmplitudeDamping(gamma, p, wires)
Single-qubit generalized amplitude damping error channel.
This channel models the exchange of energy between a qubit and its environment
at finite temperatures, with the following Kraus matrices:
.. math::
K_0 = \sqrt{p} \begin{bmatrix}
1 & 0 \\
0 & \sqrt{1-\gamma}
\end{bmatrix}
.. math::
K_1 = \sqrt{p}\begin{bmatrix}
0 & \sqrt{\gamma} \\
0 & 0
\end{bmatrix}
.. math::
K_2 = \sqrt{1-p}\begin{bmatrix}
\sqrt{1-\gamma} & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_3 = \sqrt{1-p}\begin{bmatrix}
0 & 0 \\
\sqrt{\gamma} & 0
\end{bmatrix}
where :math:`\gamma \in [0, 1]` is the probability of damping and :math:`p \in [0, 1]`
is the probability of the system being excited by the environment.
**Details:**
* Number of wires: 1
* Number of parameters: 2
Args:
gamma (float): amplitude damping probability
p (float): excitation probability
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 2
@classmethod
def _kraus_matrices(cls, *params):
gamma, p = params
if not 0.0 <= gamma <= 1.0:
raise ValueError("gamma must be between [0,1].")
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between [0,1].")
K0 = np.sqrt(p) * np.diag([1, np.sqrt(1 - gamma)])
K1 = np.sqrt(p) * np.sqrt(gamma) * np.array([[0, 1], [0, 0]])
K2 = np.sqrt(1 - p) * np.diag([np.sqrt(1 - gamma), 1])
K3 = np.sqrt(1 - p) * np.sqrt(gamma) * np.array([[0, 0], [1, 0]])
return [K0, K1, K2, K3]
class PhaseDamping(Channel):
r"""PhaseDamping(gamma, wires)
Single-qubit phase damping error channel.
Interaction with the environment can lead to loss of quantum information changes without any
changes in qubit excitations. This can be modelled by the phase damping channel, with
the following Kraus matrices:
.. math::
K_0 = \begin{bmatrix}
1 & 0 \\
0 & \sqrt{1-\gamma}
\end{bmatrix}
.. math::
K_1 = \begin{bmatrix}
0 & 0 \\
0 & \sqrt{\gamma}
\end{bmatrix}
where :math:`\gamma \in [0, 1]` is the phase damping probability.
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
gamma (float): phase damping probability
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
gamma = params[0]
if not 0.0 <= gamma <= 1.0:
raise ValueError("gamma must be between [0,1].")
K0 = np.diag([1, np.sqrt(1 - gamma)])
K1 = np.diag([0, np.sqrt(gamma)])
return [K0, K1]
class DepolarizingChannel(Channel):
r"""DepolarizingChannel(p, wires)
Single-qubit symmetrically depolarizing error channel.
This channel is modelled by the following Kraus matrices:
.. math::
K_0 = \sqrt{1-p} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_1 = \sqrt{p/3}\begin{bmatrix}
0 & 1 \\
1 & 0
\end{bmatrix}
.. math::
K_2 = \sqrt{p/3}\begin{bmatrix}
0 & -i \\
i & 0
\end{bmatrix}
.. math::
K_3 = \sqrt{p/3}\begin{bmatrix}
1 & 0 \\
0 & -1
\end{bmatrix}
where :math:`p \in [0, 1]` is the depolarization probability and is equally
divided in the application of all Pauli operations.
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
p (float): Each Pauli gate is applied with probability :math:`\frac{p}{3}`
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "A"
grad_recipe = ([[1, 0, 1], [-1, 0, 0]],)
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
p = params[0]
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between [0,1]")
K0 = np.sqrt(1 - p) * np.eye(2)
K1 = np.sqrt(p / 3) * np.array([[0, 1], [1, 0]])
K2 = np.sqrt(p / 3) * np.array([[0, -1j], [1j, 0]])
K3 = np.sqrt(p / 3) * np.array([[1, 0], [0, -1]])
return [K0, K1, K2, K3]
class BitFlip(Channel):
r"""BitFlip(p, wires)
Single-qubit bit flip (Pauli :math:`X`) error channel.
This channel is modelled by the following Kraus matrices:
.. math::
K_0 = \sqrt{1-p} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_1 = \sqrt{p}\begin{bmatrix}
0 & 1 \\
1 & 0
\end{bmatrix}
where :math:`p \in [0, 1]` is the probability of a bit flip (Pauli :math:`X` error).
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
p (float): The probability that a bit flip error occurs.
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "A"
grad_recipe = ([[1, 0, 1], [-1, 0, 0]],)
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
p = params[0]
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between [0,1]")
K0 = np.sqrt(1 - p) * np.eye(2)
K1 = | np.sqrt(p) | numpy.sqrt |
# Copyright © Simphony Project Contributors
# Licensed under the terms of the MIT License
# (see simphony/__init__.py for details)
import numpy as np
import pytest
from simphony.libraries import siepic
from simphony.simulation import Detector, DifferentialDetector, Laser, Simulation
from simphony.tools import wl2freq
@pytest.fixture
def mzi():
gc_input = siepic.GratingCoupler()
y_splitter = siepic.YBranch()
wg_long = siepic.Waveguide(length=150e-6)
wg_short = siepic.Waveguide(length=50e-6)
y_recombiner = siepic.YBranch()
gc_output = siepic.GratingCoupler()
y_splitter.multiconnect(gc_input, wg_long, wg_short)
y_recombiner.multiconnect(gc_output, wg_short, wg_long)
return (gc_input, gc_output)
@pytest.fixture
def oh():
x1 = siepic.GratingCoupler(name="x1")
s = siepic.GratingCoupler(name="s")
p1 = siepic.GratingCoupler(name="p1")
p2 = siepic.GratingCoupler(name="p2")
lo = siepic.GratingCoupler(name="lo")
x2 = siepic.GratingCoupler(name="x2")
xdc = siepic.BidirectionalCoupler()
lodc = siepic.BidirectionalCoupler()
pdc = siepic.BidirectionalCoupler()
x1_xdc = siepic.Waveguide(length=514e-6)
x2_xdc = siepic.Waveguide(length=514e-6)
s_y = siepic.Waveguide(length=208e-6)
lo_lodc = siepic.Waveguide(length=208e-6)
p1_pdc = siepic.Waveguide(length=81e-6)
p2_pdc = siepic.Waveguide(length=81e-6)
y_xdc = siepic.Waveguide(length=12e-6)
y_pdc = siepic.Waveguide(length=12e-6)
pdc_lodc = siepic.Waveguide(length=12e-6)
xdc_lodc = siepic.Waveguide(length=12e-6)
y = siepic.YBranch()
terminator = siepic.Terminator()
xdc.multiconnect(y_xdc, xdc_lodc, x1_xdc, x2_xdc)
lodc.multiconnect(lo_lodc, terminator, pdc_lodc, xdc_lodc)
pdc.multiconnect(p1_pdc, p2_pdc, y_pdc, pdc_lodc)
y.multiconnect(s_y, y_xdc, y_pdc)
x1.connect(x1_xdc)
s.connect(s_y)
p1.connect(p1_pdc)
p2.connect(p2_pdc)
lo.connect(lo_lodc)
x2.connect(x2_xdc)
return (x1, s, p1, p2, lo, x2)
class TestSimulation:
seed117 = [
0.00017481,
0.01219353,
-0.01773873,
0.02061959,
-0.00290609,
-0.0066712,
0.00846216,
0.00488167,
-0.01002604,
0.00672506,
-0.01299871,
0.0126199,
0.0007396,
0.00115915,
-0.00602,
0.00979,
-0.00520642,
-0.01741927,
-0.0240019,
0.03115938,
-0.00537727,
-0.00066326,
-0.00495342,
0.0002517,
-0.01819794,
-0.00936641,
0.00736962,
-0.01756158,
0.01517604,
0.00298318,
0.00553522,
-0.00281899,
0.01784163,
0.00610215,
-0.00944377,
-0.00967335,
0.03266932,
-0.00754913,
-0.00785714,
0.03044863,
-0.00879942,
0.02543895,
-0.00322589,
-0.00785712,
0.00815186,
-0.01540587,
0.00631346,
0.01470638,
-0.0051735,
0.00150219,
0.01991704,
-0.00193712,
0.01432663,
0.00699449,
0.00281496,
-0.0075551,
0.00341335,
0.01141054,
-0.00696104,
0.00628623,
-0.00156238,
0.00271096,
-0.00631849,
0.00724422,
0.00808875,
0.00742942,
-0.02009245,
0.0071186,
-0.00098557,
-0.01329963,
-0.00692713,
0.01484593,
0.01073398,
0.01623651,
-0.00623136,
-0.01092318,
-0.00766223,
-0.00344117,
0.01897063,
0.01066724,
-0.00842774,
-0.01002413,
0.01600654,
-0.00786538,
0.01610357,
0.01215284,
0.0039726,
0.0194278,
-0.00150813,
-0.00359058,
-0.00125099,
0.01863215,
-0.01533298,
-0.00367189,
0.005698,
-0.00949113,
0.00948224,
-0.00325547,
0.01019897,
0.00419238,
-0.00354101,
]
def test_context(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim1:
assert sim1.circuit is None
l1 = Laser().connect(gc_input)
d1 = Detector().connect(gc_output)
assert l1.circuit == gc_input.circuit == sim1.circuit
assert d1.circuit == gc_output.circuit == sim1.circuit
with Simulation() as _:
assert sim1.circuit is None
assert l1.circuit != gc_input.circuit
assert d1.circuit != gc_output.circuit
def test_sampling(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim1:
Laser().connect(gc_input)
Detector().connect(gc_output)
assert len(sim1.sample(100)[0][0]) == 100
assert len(sim1.sample(101)[0][0]) == 101
def test_seed(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim1:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim1.sample(101)
assert np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
with Simulation(seed=118) as sim2:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim2.sample(101)
assert not np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
with Simulation() as sim3:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim3.sample(101)
assert not np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
with Simulation(seed=117) as sim4:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim4.sample(101)
assert np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
def test_sampling_frequency(self, mzi):
gc_input, gc_output = mzi
data1 = None
with Simulation(fs=10e9, seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data1 = sim.sample(1001)
data2 = None
with Simulation(fs=10e9, seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data2 = sim.sample(1001)
assert np.allclose(data1[0][0], data2[0][0], rtol=0, atol=1e-11)
class TestSingleDetector:
result = 0.00017544
results = [
1.80576404e-04,
1.08063217e-02,
-1.84591717e-02,
2.11631266e-02,
-4.24527434e-03,
-5.53885990e-03,
8.67396297e-03,
5.28644276e-03,
-1.02520694e-02,
8.05882087e-03,
-1.25512983e-02,
1.18939574e-02,
-3.92095769e-06,
3.61245566e-03,
-6.60295137e-03,
9.18355753e-03,
-2.92043587e-03,
-1.80968121e-02,
-2.20941667e-02,
3.09025569e-02,
-5.98374595e-03,
-6.09039074e-05,
-6.12987780e-03,
]
def test_single_sample(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim.sample()
assert np.allclose(data[0][0], [self.result], rtol=0, atol=1e-8)
def test_conversion_gain(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector(conversion_gain=7).connect(gc_output)
data = sim.sample()
assert np.allclose(data[0][0], [self.result * 7], rtol=0, atol=1e-7)
def test_noise(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector(noise=1e-3).connect(gc_output)
data = sim.sample(23)
assert np.allclose(data[0][0], self.results, rtol=0, atol=1e-8)
class TestDifferentialDetector:
cmrr_x = [
0.00185109,
0.00438826,
-0.00162186,
0.0004025,
0.00595868,
-0.00064218,
0.00427251,
0.00205762,
0.00079248,
-0.00233544,
0.00097263,
0.00339304,
-0.00215764,
0.00184303,
-0.00052858,
0.00076543,
-0.001965,
0.0021287,
0.00238189,
0.00219444,
-0.00612456,
0.00209419,
-0.00035425,
]
cmrr_p = [
-0.00470416,
0.00939843,
0.00632678,
0.00376477,
-0.00135284,
-0.00634382,
-0.00374078,
0.00145949,
-0.0010054,
0.00687253,
-0.00553449,
0.00346154,
-0.00358327,
-0.00438276,
-0.0039282,
-0.00549966,
0.00577782,
-0.00183013,
-0.00431677,
0.00059047,
0.00173069,
0.00035287,
0.00030604,
]
result = [
6.820933398426216e-05,
-2.51779027237116e-06,
7.072712425663332e-05,
7.528059784829445e-05,
-2.2353588319872576e-06,
7.751595668028171e-05,
]
x1results = [
7.25832807e-05,
6.45213384e-03,
-1.16782238e-02,
1.32502362e-02,
-2.92429196e-03,
-3.29487901e-03,
5.40473883e-03,
3.32631865e-03,
-6.47341674e-03,
5.21927531e-03,
-7.78813016e-03,
7.24665505e-03,
-1.74786835e-04,
2.64408006e-03,
-4.26117438e-03,
5.57803566e-03,
-1.45885813e-03,
-1.14445296e-02,
-1.34812942e-02,
1.91818955e-02,
-3.87934796e-03,
2.70018878e-05,
-4.07081299e-03,
]
xresults = [
0.00185101,
0.00642166,
-0.0046529,
0.00386158,
0.00543731,
-0.00180061,
0.0056746,
0.00285391,
-0.00093357,
-0.00122723,
-0.00125639,
0.00549862,
-0.00206215,
0.00200947,
-0.0015768,
0.00239221,
-0.00287553,
-0.0008483,
-0.00170886,
0.00743684,
-0.00706408,
0.00195226,
-0.00122203,
]
x2results = [
7.51125467e-05,
6.59171046e-03,
-1.18798537e-02,
1.34858866e-02,
-2.95698094e-03,
-3.37021160e-03,
5.50204769e-03,
3.38258002e-03,
-6.58734386e-03,
5.29622841e-03,
-7.93593533e-03,
7.39107533e-03,
-1.65775053e-04,
2.65791212e-03,
-4.32937195e-03,
5.69003709e-03,
-1.51750747e-03,
-1.16429994e-02,
-1.37543005e-02,
1.95380050e-02,
-3.94001069e-03,
1.98397377e-05,
-4.12678903e-03,
]
p1results = [
-0.00012833,
0.00777554,
-0.01088095,
0.01399814,
-0.00186632,
-0.00567741,
0.00499696,
0.00325294,
-0.00684533,
0.00448832,
-0.0088845,
0.00729312,
0.00244817,
-0.00056501,
-0.00356172,
0.00697164,
-0.00434339,
-0.01221006,
-0.0154076,
0.02053649,
-0.00297821,
-0.00016577,
-0.00318841,
]
presults = [
-0.00470424,
0.01131443,
0.00347076,
0.00702412,
-0.00184411,
-0.00743536,
-0.00241965,
0.0022098,
-0.00263178,
0.00791674,
-0.0076348,
0.00544554,
-0.00349329,
-0.00422592,
-0.0049159,
-0.00396682,
0.00491986,
-0.00463523,
-0.00817131,
0.00553016,
0.00084541,
0.00021914,
-0.00051163,
]
p2results = [
-0.00012596,
0.00789399,
-0.01105161,
0.01419768,
-0.00189359,
-0.00574133,
0.0050792,
0.00330085,
-0.00694156,
0.00455373,
-0.00900964,
0.00741552,
0.00245592,
-0.00055348,
-0.00361925,
0.00706657,
-0.00439297,
-0.0123776,
-0.01563877,
0.02083776,
-0.00302955,
-0.00017171,
-0.00323547,
]
def test_single_sample(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(lo)
DifferentialDetector().multiconnect(x1, x2)
DifferentialDetector().multiconnect(p1, p2)
x1, x, x2, p1, p, p2 = sim.sample()
assert np.allclose(
[
x1[0][0][0],
x[0][0][0],
x2[0][0][0],
p1[0][0][0],
p[0][0][0],
p2[0][0][0],
],
self.result,
)
def test_conversion_gain(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(lo)
DifferentialDetector(
monitor_conversion_gain=7, rf_conversion_gain=7
).multiconnect(x1, x2)
DifferentialDetector(
monitor_conversion_gain=7, rf_conversion_gain=7
).multiconnect(p1, p2)
x1, x, x2, p1, p, p2 = sim.sample()
assert np.allclose(
[
x1[0][0][0],
x[0][0][0],
x2[0][0][0],
p1[0][0][0],
p[0][0][0],
p2[0][0][0],
],
np.array(self.result) * 7,
)
def test_noise(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation(seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(lo)
DifferentialDetector(monitor_noise=800e-6, rf_noise=4e-3).multiconnect(
x1, x2
)
DifferentialDetector(monitor_noise=800e-6, rf_noise=4e-3).multiconnect(
p1, p2
)
x1, x, x2, p1, p, p2 = sim.sample(23)
assert np.allclose(x1[0][0], self.x1results, rtol=0, atol=1e-7)
assert np.allclose(x[0][0], self.xresults, rtol=0, atol=1e-7)
assert np.allclose(x2[0][0], self.x2results, rtol=0, atol=1e-7)
assert np.allclose(p1[0][0], self.p1results, rtol=0, atol=1e-7)
assert np.allclose(p[0][0], self.presults, rtol=0, atol=1e-7)
assert np.allclose(p2[0][0], self.p2results, rtol=0, atol=1e-7)
def test_cmrr(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation(seed=117) as sim:
Laser(power=1e-3, rin=-145, wl=1550e-9).connect(lo)
DifferentialDetector(
monitor_noise=800e-6, rf_cmrr=60, rf_noise=4e-3
).multiconnect(x1, x2)
DifferentialDetector(
monitor_noise=800e-6, rf_cmrr=60, rf_noise=4e-3
).multiconnect(p1, p2)
_, x, _, _, p, _ = sim.sample(23)
assert np.allclose(x[0][0], self.cmrr_x, rtol=0, atol=1e-7)
assert np.allclose(p[0][0], self.cmrr_p, rtol=0, atol=1e-7)
class TestLaser:
freqs = [
1.87370286e14,
1.87487466e14,
1.87604792e14,
1.87722265e14,
1.87839886e14,
1.87957654e14,
1.88075570e14,
1.88193633e14,
1.88311845e14,
1.88430206e14,
1.88548716e14,
1.88667374e14,
1.88786183e14,
1.88905141e14,
1.89024248e14,
1.89143507e14,
1.89262915e14,
1.89382475e14,
1.89502186e14,
1.89622048e14,
1.89742062e14,
1.89862228e14,
1.89982546e14,
1.90103017e14,
1.90223641e14,
1.90344418e14,
1.90465348e14,
1.90586432e14,
1.90707670e14,
1.90829063e14,
1.90950610e14,
1.91072312e14,
1.91194170e14,
1.91316183e14,
1.91438351e14,
1.91560676e14,
1.91683157e14,
1.91805795e14,
1.91928590e14,
1.92051543e14,
1.92174653e14,
1.92297920e14,
1.92421347e14,
1.92544931e14,
1.92668675e14,
1.92792577e14,
1.92916640e14,
1.93040862e14,
1.93165244e14,
1.93289786e14,
1.93414489e14,
1.93539353e14,
1.93664379e14,
1.93789566e14,
1.93914915e14,
1.94040426e14,
1.94166100e14,
1.94291936e14,
1.94417936e14,
1.94544100e14,
1.94670427e14,
1.94796919e14,
1.94923575e14,
1.95050396e14,
1.95177382e14,
1.95304533e14,
1.95431850e14,
1.95559333e14,
1.95686983e14,
1.95814799e14,
1.95942783e14,
1.96070934e14,
1.96199253e14,
1.96327739e14,
1.96456394e14,
1.96585218e14,
1.96714211e14,
1.96843374e14,
1.96972706e14,
1.97102208e14,
1.97231880e14,
1.97361724e14,
1.97491738e14,
1.97621924e14,
1.97752281e14,
1.97882811e14,
1.98013513e14,
1.98144387e14,
1.98275435e14,
1.98406657e14,
1.98538052e14,
1.98669621e14,
1.98801365e14,
1.98933283e14,
1.99065377e14,
1.99197647e14,
1.99330092e14,
1.99462713e14,
1.99595511e14,
1.99728486e14,
1.99861639e14,
]
powers = [
0.001,
0.0011984,
0.00139679,
0.00159519,
0.00179359,
0.00199198,
0.00219038,
0.00238878,
0.00258717,
0.00278557,
0.00298397,
0.00318236,
0.00338076,
0.00357916,
0.00377756,
0.00397595,
0.00417435,
0.00437275,
0.00457114,
0.00476954,
0.00496794,
0.00516633,
0.00536473,
0.00556313,
0.00576152,
0.00595992,
0.00615832,
0.00635671,
0.00655511,
0.00675351,
0.0069519,
0.0071503,
0.0073487,
0.00754709,
0.00774549,
0.00794389,
0.00814228,
0.00834068,
0.00853908,
0.00873747,
0.00893587,
0.00913427,
0.00933267,
0.00953106,
0.00972946,
0.00992786,
0.01012625,
0.01032465,
0.01052305,
0.01072144,
0.01091984,
0.01111824,
0.01131663,
0.01151503,
0.01171343,
0.01191182,
0.01211022,
0.01230862,
0.01250701,
0.01270541,
0.01290381,
0.0131022,
0.0133006,
0.013499,
0.01369739,
0.01389579,
0.01409419,
0.01429259,
0.01449098,
0.01468938,
0.01488778,
0.01508617,
0.01528457,
0.01548297,
0.01568136,
0.01587976,
0.01607816,
0.01627655,
0.01647495,
0.01667335,
0.01687174,
0.01707014,
0.01726854,
0.01746693,
0.01766533,
0.01786373,
0.01806212,
0.01826052,
0.01845892,
0.01865731,
0.01885571,
0.01905411,
0.01925251,
0.0194509,
0.0196493,
0.0198477,
0.02004609,
0.02024449,
0.02044289,
0.02064128,
0.02083968,
0.02103808,
0.02123647,
0.02143487,
0.02163327,
0.02183166,
0.02203006,
0.02222846,
0.02242685,
0.02262525,
0.02282365,
0.02302204,
0.02322044,
0.02341884,
0.02361723,
0.02381563,
0.02401403,
0.02421242,
0.02441082,
0.02460922,
0.02480762,
0.02500601,
0.02520441,
0.02540281,
0.0256012,
0.0257996,
0.025998,
0.02619639,
0.02639479,
0.02659319,
0.02679158,
0.02698998,
0.02718838,
0.02738677,
0.02758517,
0.02778357,
0.02798196,
0.02818036,
0.02837876,
0.02857715,
0.02877555,
0.02897395,
0.02917234,
0.02937074,
0.02956914,
0.02976754,
0.02996593,
0.03016433,
0.03036273,
0.03056112,
0.03075952,
0.03095792,
0.03115631,
0.03135471,
0.03155311,
0.0317515,
0.0319499,
0.0321483,
0.03234669,
0.03254509,
0.03274349,
0.03294188,
0.03314028,
0.03333868,
0.03353707,
0.03373547,
0.03393387,
0.03413226,
0.03433066,
0.03452906,
0.03472745,
0.03492585,
0.03512425,
0.03532265,
0.03552104,
0.03571944,
0.03591784,
0.03611623,
0.03631463,
0.03651303,
0.03671142,
0.03690982,
0.03710822,
0.03730661,
0.03750501,
0.03770341,
0.0379018,
0.0381002,
0.0382986,
0.03849699,
0.03869539,
0.03889379,
0.03909218,
0.03929058,
0.03948898,
0.03968737,
0.03988577,
0.04008417,
0.04028257,
0.04048096,
0.04067936,
0.04087776,
0.04107615,
0.04127455,
0.04147295,
0.04167134,
0.04186974,
0.04206814,
0.04226653,
0.04246493,
0.04266333,
0.04286172,
0.04306012,
0.04325852,
0.04345691,
0.04365531,
0.04385371,
0.0440521,
0.0442505,
0.0444489,
0.04464729,
0.04484569,
0.04504409,
0.04524248,
0.04544088,
0.04563928,
0.04583768,
0.04603607,
0.04623447,
0.04643287,
0.04663126,
0.04682966,
0.04702806,
0.04722645,
0.04742485,
0.04762325,
0.04782164,
0.04802004,
0.04821844,
0.04841683,
0.04861523,
0.04881363,
0.04901202,
0.04921042,
0.04940882,
0.04960721,
0.04980561,
0.05000401,
0.0502024,
0.0504008,
0.0505992,
0.0507976,
0.05099599,
0.05119439,
0.05139279,
0.05159118,
0.05178958,
0.05198798,
0.05218637,
0.05238477,
0.05258317,
0.05278156,
0.05297996,
0.05317836,
0.05337675,
0.05357515,
0.05377355,
0.05397194,
0.05417034,
0.05436874,
0.05456713,
0.05476553,
0.05496393,
0.05516232,
0.05536072,
0.05555912,
0.05575752,
0.05595591,
0.05615431,
0.05635271,
0.0565511,
0.0567495,
0.0569479,
0.05714629,
0.05734469,
0.05754309,
0.05774148,
0.05793988,
0.05813828,
0.05833667,
0.05853507,
0.05873347,
0.05893186,
0.05913026,
0.05932866,
0.05952705,
0.05972545,
0.05992385,
0.06012224,
0.06032064,
0.06051904,
0.06071743,
0.06091583,
0.06111423,
0.06131263,
0.06151102,
0.06170942,
0.06190782,
0.06210621,
0.06230461,
0.06250301,
0.0627014,
0.0628998,
0.0630982,
0.06329659,
0.06349499,
0.06369339,
0.06389178,
0.06409018,
0.06428858,
0.06448697,
0.06468537,
0.06488377,
0.06508216,
0.06528056,
0.06547896,
0.06567735,
0.06587575,
0.06607415,
0.06627255,
0.06647094,
0.06666934,
0.06686774,
0.06706613,
0.06726453,
0.06746293,
0.06766132,
0.06785972,
0.06805812,
0.06825651,
0.06845491,
0.06865331,
0.0688517,
0.0690501,
0.0692485,
0.06944689,
0.06964529,
0.06984369,
0.07004208,
0.07024048,
0.07043888,
0.07063727,
0.07083567,
0.07103407,
0.07123246,
0.07143086,
0.07162926,
0.07182766,
0.07202605,
0.07222445,
0.07242285,
0.07262124,
0.07281964,
0.07301804,
0.07321643,
0.07341483,
0.07361323,
0.07381162,
0.07401002,
0.07420842,
0.07440681,
0.07460521,
0.07480361,
0.075002,
0.0752004,
0.0753988,
0.07559719,
0.07579559,
0.07599399,
0.07619238,
0.07639078,
0.07658918,
0.07678758,
0.07698597,
0.07718437,
0.07738277,
0.07758116,
0.07777956,
0.07797796,
0.07817635,
0.07837475,
0.07857315,
0.07877154,
0.07896994,
0.07916834,
0.07936673,
0.07956513,
0.07976353,
0.07996192,
0.08016032,
0.08035872,
0.08055711,
0.08075551,
0.08095391,
0.0811523,
0.0813507,
0.0815491,
0.08174749,
0.08194589,
0.08214429,
0.08234269,
0.08254108,
0.08273948,
0.08293788,
0.08313627,
0.08333467,
0.08353307,
0.08373146,
0.08392986,
0.08412826,
0.08432665,
0.08452505,
0.08472345,
0.08492184,
0.08512024,
0.08531864,
0.08551703,
0.08571543,
0.08591383,
0.08611222,
0.08631062,
0.08650902,
0.08670741,
0.08690581,
0.08710421,
0.08730261,
0.087501,
0.0876994,
0.0878978,
0.08809619,
0.08829459,
0.08849299,
0.08869138,
0.08888978,
0.08908818,
0.08928657,
0.08948497,
0.08968337,
0.08988176,
0.09008016,
0.09027856,
0.09047695,
0.09067535,
0.09087375,
0.09107214,
0.09127054,
0.09146894,
0.09166733,
0.09186573,
0.09206413,
0.09226253,
0.09246092,
0.09265932,
0.09285772,
0.09305611,
0.09325451,
0.09345291,
0.0936513,
0.0938497,
0.0940481,
0.09424649,
0.09444489,
0.09464329,
0.09484168,
0.09504008,
0.09523848,
0.09543687,
0.09563527,
0.09583367,
0.09603206,
0.09623046,
0.09642886,
0.09662725,
0.09682565,
0.09702405,
0.09722244,
0.09742084,
0.09761924,
0.09781764,
0.09801603,
0.09821443,
0.09841283,
0.09861122,
0.09880962,
0.09900802,
0.09920641,
0.09940481,
0.09960321,
0.0998016,
0.1,
]
rin_results = [
0.00017534,
0.00019062,
0.00015289,
0.00020163,
0.00017138,
0.00016665,
0.0001857,
0.00018142,
0.00016255,
0.00018353,
0.00015875,
0.0001913,
0.00017599,
0.00017676,
0.00016767,
0.00018743,
0.00016871,
0.00015348,
0.00014547,
0.00021439,
0.00016853,
0.00017464,
0.00016882,
]
def test_wlsweep(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim:
l = Laser(power=1e-3).wlsweep(1500e-9, 1600e-9, 101).connect(gc_input)
Detector().connect(gc_output)
data = sim.sample()
assert len(data) == 101
assert np.allclose(l.freqs, self.freqs)
def test_freqsweep(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim:
l = (
Laser(power=1e-3)
.freqsweep(1.87370286e14, 1.99861639e14, 101)
.connect(gc_input)
)
Detector().connect(gc_output)
data = sim.sample()
assert len(data) == 101
assert np.allclose(l.freqs, self.freqs, rtol=0, atol=1e12)
def test_powersweep(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim:
l = Laser(wl=1550e-9).powersweep(1e-3, 100e-3).connect(gc_input)
Detector().connect(gc_output)
data = sim.sample()
assert len(data[0]) == 500
assert np.allclose(l.powers, self.powers, rtol=0, atol=1e-8)
def test_freqs(self, oh, mzi):
x1, s, p1, p2, lo, x2 = oh
gc_input, gc_output = mzi
with Simulation(seed=117) as sim:
Laser(freq=1.94888531e14, power=1e-3).connect(gc_input)
Detector().connect(gc_output)
data = sim.sample()
with Simulation(seed=117) as sim:
l1 = Laser(power=1e-3).connect(lo)
l2 = Laser(power=1e-3).connect(s)
l1._freqs = wl2freq(
np.array(
[
1500e-9,
1510e-9,
1520e-9,
1530e-9,
1540e-9,
1550e-9,
1560e-9,
1570e-9,
1580e-9,
1590e-9,
1600e-9,
]
)
)
l2._freqs = wl2freq(
np.array([1500e-9, 1520e-9, 1540e-9, 1560e-9, 1580e-9, 1600e-9])
)
DifferentialDetector().multiconnect(x1, x2)
DifferentialDetector().multiconnect(p1, p2)
assert len(l1.freqs) == len(l2.freqs) == 0
data = sim.sample()
assert len(data) == len(l1.freqs) == 6
assert np.equal(l1.freqs, l2.freqs).all()
assert | np.equal(l1.freqs, sim.freqs) | numpy.equal |
import gym
import numpy as np
from abc import abstractmethod
from fault_tolerant_flight_control_drl.agent import SAC
from fault_tolerant_flight_control_drl.tools import AltitudeTask, AttitudeTask, BodyRateTask
from fault_tolerant_flight_control_drl.tools import ReliabilityTask, DisturbanceRejectionAtt
from fault_tolerant_flight_control_drl.tools import plot_response
import importlib
from fault_tolerant_flight_control_drl.tools.math_util import unscale_action, d2r, r2d
from fault_tolerant_flight_control_drl.tools import get_ID
from alive_progress import alive_bar
class Citation(gym.Env):
"""
Citation environment that follows the gym.env interface
Developed to be interfaced with a modified version of the CitAST environment, built with the DASMAT model and owned
by the Delft University of Technology. Follow the 'CitAST for Python' instructions at
https://github.com/kdally/fault-tolerant-flight-control-drl/blob/master/docs/CitAST_for_Python.pdf for installation.
Author: <NAME>
:param evaluation: (bool) If False, the environment will be given training-specific shorter tasks.
If True, the environment is given longer and unseen tasks as part of the evaluation.
:param FDD: (bool) If True, the Fault Detection and Diagnosis module is added which switches from robust to
adaptive control at self.FDD_switch_time.
:param task: (Task) one of AltitudeTask, AttitudeTask, BodyRateTask, ReliabilityTask, DisturbanceRejection
:param disturbance: (bool) If True, disturbance forces are added in the environment. Normal disturbance values from
https://doi.org/10.2514/6.2018-1127.
:param sensor_noise: (bool) If True, sensor noise is added to the environment observations based on the sensor noise
estimates of the Cessna Citation 550 given in https://doi.org/10.2514/6.2018-1127.
:param low_pass: (bool) It True, control inputs are filtered with a first-order low-pass filter.
:param init_alt: (float) Initial flight altitude. One of 2000 or 5000.
:param init_speed: (float) Initial speed. One of 90 or 140.
"""
def __init__(self, evaluation=False, FDD=False, task=AttitudeTask,
disturbance=False, sensor_noise=False, low_pass=False,
init_alt=2000, init_speed=90):
super(Citation, self).__init__()
assert bool((FDD and init_alt == 2000 and init_speed == 90) or not FDD), \
'Failure cases only implemented for initial conditions init_alt == 2000 & init_speed == 90'
self.rate_limits = self.ActionLimits(np.array([[-20, -40, -20], [20, 40, 20]]))
self.deflection_limits = self.ActionLimits(np.array([[-20.05, -37.24, -21.77], [14.9, 37.24, 21.77]]))
self.placeholder_cond = False
self.C_MODEL, self.failure_input = self.get_plant()
self.FDD_switch_time = 60
self.failure_time = 10
self.task = task()
self.task_fun, self.evaluation, self.FDD = self.task.choose_task(evaluation, self.failure_input, FDD)
self.has_sensor_noise = sensor_noise
self.has_disturbance = disturbance
self.enable_low_pass = low_pass
self.time = self.task_fun()[3]
self.dt = self.time[1] - self.time[0]
self.ref_signal = self.task_fun(init_alt=init_alt)[0]
self.track_indices = self.task_fun()[1]
self.obs_indices = self.task_fun()[2]
self.sideslip_factor, self.pitch_factor, self.roll_factor, self.alt_factor = self.adapt_to_failure()
self.observation_space = gym.spaces.Box(-100, 100, shape=(len(self.obs_indices) + 3,), dtype=np.float64)
self.action_space = gym.spaces.Box(-1., 1., shape=(3,), dtype=np.float64)
self.current_deflection = np.zeros(3)
self.agent_path = 'fault_tolerant_flight_control_drl/agent/trained'
self.agents, self.agentID = self.load_agent(FDD) # type: SAC
# self.agents, self.agentID = None, None
self.state = None
self.state_deg = None
self.scale_s = None
self.state_history = None
self.action_history = None
self.error = None
self.step_count = None
self.external_ref_signal = None
def step(self, action_rates: np.ndarray):
self.current_deflection = self.current_deflection + self.scale_a(action_rates) * self.dt
if self.sideslip_factor[self.step_count - 1] == 0.0: self.current_deflection[2] = 0.0
filtered_deflection = self.filter_control_input(self.current_deflection)
if self.time[self.step_count] < self.failure_time and self.evaluation:
self.state = self.C_MODEL.step(
np.hstack([d2r(filtered_deflection + self.add_disturbance()[:, self.step_count]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
self.failure_input[1]]))
else:
self.state = self.C_MODEL.step(
np.hstack([d2r(filtered_deflection + self.add_disturbance()[:, self.step_count]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
self.failure_input[2]]))
self.state_deg = self.state * self.scale_s
self.error = d2r(self.ref_signal[:, self.step_count] -
self.state_deg[self.track_indices] + self.get_sensor_noise()[self.track_indices]) \
* self.scale_error(self.step_count)
self.state_history[:, self.step_count] = self.state_deg
self.action_history[:, self.step_count] = filtered_deflection
self.step_count += 1
done = bool(self.step_count >= self.time.shape[0])
if np.isnan(self.state).sum() > 0:
self.stop_NaNs()
return self.get_obs(), self.get_reward(), done, {'is_success': True}
def reset(self):
self.reset_soft()
self.ref_signal = self.task_fun()[0]
return np.zeros(self.observation_space.shape)
def reset_soft(self):
self.C_MODEL.initialize()
action_trim = np.array(
[0, 0, 0, 0., 0., 0., 0., 0.,
0, 0, self.failure_input[1]])
self.state = self.C_MODEL.step(action_trim)
self.scale_s = np.ones(self.state.shape)
self.scale_s[[0, 1, 2, 4, 5, 6, 7, 8]] = 180 / np.pi
self.state_deg = self.state * self.scale_s
self.state_history = np.zeros((self.state.shape[0], self.time.shape[0]))
self.action_history = np.zeros((self.action_space.shape[0], self.time.shape[0]))
self.error = np.zeros(len(self.track_indices))
self.step_count = 0
self.current_deflection = np.zeros(3)
return | np.zeros(self.observation_space.shape) | numpy.zeros |
import string
import random
from collections import Counter
import pytest
from mock import patch, call
import numpy as np
from eight_mile.bleu import (
n_grams,
count_n_grams,
find_closest,
corpora_lengths,
max_gold_n_gram_counts,
count_matches,
count_possible,
geometric_mean,
brevity_penalty,
_read_references,
_read_lines,
)
def random_str(len_=None, min_=5, max_=21):
if len_ is None:
len_ = np.random.randint(min_, max_)
choices = list(string.ascii_letters + string.digits)
return "".join([np.random.choice(choices) for _ in range(len_)])
def test_find_closest_above():
pred_len = | np.random.randint(10, 20) | numpy.random.randint |
import math
from functools import partial
import numpy as np
import tensorflow as tf
def is_constant(*args):
return all(isinstance(a, (bool, int, float)) for a in args)
def is_numpy(*args):
if is_constant(*args):
return False
return all(isinstance(
a, (bool, int, float, np.ndarray, np.float32, np.int32, np.bool_))
for a in args)
def is_tensor(*args):
return any(isinstance(a, (tf.Tensor, tf.Variable)) for a in args)
def cast(value, dtype):
if is_constant(value):
return dtype(value)
if is_numpy(value):
dtypes = {
float: np.float32,
int: np.int32,
bool: np.bool,
}
return np.cast[dtypes[dtype]](value)
dtypes = {
float: tf.float32,
int: tf.int32,
bool: tf.bool,
}
return tf.cast(value, dtypes[dtype])
def _constants_not_accepted(func):
raise TypeError('{} does not accept constants as argmuents.'.format(func))
def where(bool_expr, true_value=None, false_value=None):
args = [a for a in (bool_expr, true_value, false_value) if a is not None]
if is_constant(*args):
_constants_not_accepted(where)
if is_numpy(*args):
if true_value is None and false_value is None:
return np.where(bool_expr)
return np.where(bool_expr, true_value, false_value)
return tf.where(bool_expr, true_value, false_value)
def nonzero(value):
if is_constant(value):
_constants_not_accepted(nonzero)
if is_numpy(value):
return np.nonzero(value)
raise TypeError(
'Tensorflow does not implement a function to compute non-zero values.')
def sum(value):
if is_constant(value):
_constants_not_accepted(where)
if is_numpy(value):
return np.sum(value)
return tf.reduce_sum(value)
def mean(value):
if is_constant(value):
_constants_not_accepted(where)
if is_numpy(value):
return np.mean(value)
return tf.reduce_mean(value)
def count(value):
if is_constant(value):
_constants_not_accepted(where)
if is_numpy(value):
return value.size
return value.shape.num_elements()
def floor(value):
if is_constant(value):
return math.floor(value)
if is_numpy(value):
return np.floor(value)
omap = {'Floor': 'Identity'}
with tf.get_default_graph().gradient_override_map(omap):
return tf.floor(value)
def ceil(value):
if is_constant(value):
return math.ceil(value)
if is_numpy(value):
return | np.ceil(value) | numpy.ceil |
"""Generate a diffusion map embedding
"""
import numpy as np
def compute_diffusion_map(L, alpha=0.5, n_components=None, diffusion_time=0,
skip_checks=False, overwrite=False):
"""Compute the diffusion maps of a symmetric similarity matrix
L : matrix N x N
L is symmetric and L(x, y) >= 0
alpha: float [0, 1]
Setting alpha=1 and the diffusion operator approximates the
Laplace-Beltrami operator. We then recover the Riemannian geometry
of the data set regardless of the distribution of the points. To
describe the long-term behavior of the point distribution of a
system of stochastic differential equations, we can use alpha=0.5
and the resulting Markov chain approximates the Fokker-Planck
diffusion. With alpha=0, it reduces to the classical graph Laplacian
normalization.
n_components: int
The number of diffusion map components to return. Due to the
spectrum decay of the eigenvalues, only a few terms are necessary to
achieve a given relative accuracy in the sum M^t.
diffusion_time: float >= 0
use the diffusion_time (t) step transition matrix M^t
t not only serves as a time parameter, but also has the dual role of
scale parameter. One of the main ideas of diffusion framework is
that running the chain forward in time (taking larger and larger
powers of M) reveals the geometric structure of X at larger and
larger scales (the diffusion process).
t = 0 empirically provides a reasonable balance from a clustering
perspective. Specifically, the notion of a cluster in the data set
is quantified as a region in which the probability of escaping this
region is low (within a certain time t).
skip_checks: bool
Avoid expensive pre-checks on input data. The caller has to make
sure that input data is valid or results will be undefined.
overwrite: bool
Optimize memory usage by re-using input matrix L as scratch space.
References
----------
[1] https://en.wikipedia.org/wiki/Diffusion_map
[2] <NAME>.; <NAME>. (2006). "Diffusion maps". Applied and
Computational Harmonic Analysis 21: 5-30. doi:10.1016/j.acha.2006.04.006
"""
import numpy as np
import scipy.sparse as sps
use_sparse = False
if sps.issparse(L):
use_sparse = True
if not skip_checks:
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
if not _graph_is_connected(L):
raise ValueError('Graph is disconnected')
ndim = L.shape[0]
if overwrite:
L_alpha = L
else:
L_alpha = L.copy()
if alpha > 0:
# Step 2
d = np.array(L_alpha.sum(axis=1)).flatten()
d_alpha = np.power(d, -alpha)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
else:
L_alpha = d_alpha[:, np.newaxis] * L_alpha
L_alpha = L_alpha * d_alpha[np.newaxis, :]
# Step 3
d_alpha = np.power(np.array(L_alpha.sum(axis=1)).flatten(), -1)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
else:
L_alpha = d_alpha[:, np.newaxis] * L_alpha
M = L_alpha
from scipy.sparse.linalg import eigsh, eigs
# Step 4
func = eigs
if n_components is not None:
lambdas, vectors = func(M, k=n_components + 1)
else:
lambdas, vectors = func(M, k=max(2, int(np.sqrt(ndim))))
del M
if func == eigsh:
lambdas = lambdas[::-1]
vectors = vectors[:, ::-1]
else:
lambdas = np.real(lambdas)
vectors = np.real(vectors)
lambda_idx = np.argsort(lambdas)[::-1]
lambdas = lambdas[lambda_idx]
vectors = vectors[:, lambda_idx]
return _step_5(lambdas, vectors, ndim, n_components, diffusion_time)
def _step_5(lambdas, vectors, ndim, n_components, diffusion_time):
"""
This is a helper function for diffusion map computation.
The lambdas have been sorted in decreasing order.
The vectors are ordered according to lambdas.
"""
psi = vectors/vectors[:, [0]]
diffusion_times = diffusion_time
if diffusion_time == 0:
diffusion_times = np.exp(1. - np.log(1 - lambdas[1:])/np.log(lambdas[1:]))
lambdas = lambdas[1:] / (1 - lambdas[1:])
else:
lambdas = lambdas[1:] ** float(diffusion_time)
lambda_ratio = lambdas/lambdas[0]
threshold = max(0.05, lambda_ratio[-1])
n_components_auto = np.amax(np.nonzero(lambda_ratio > threshold)[0])
n_components_auto = min(n_components_auto, ndim)
if n_components is None:
n_components = n_components_auto
embedding = psi[:, 1:(n_components + 1)] * lambdas[:n_components][None, :]
result = dict(lambdas=lambdas, vectors=vectors,
n_components=n_components, diffusion_time=diffusion_times,
n_components_auto=n_components_auto)
return embedding, result
def compute_diffusion_map_psd(
X, alpha=0.5, n_components=None, diffusion_time=0):
"""
This variant requires L to be dense, positive semidefinite and entrywise
positive with decomposition L = dot(X, X.T).
"""
from scipy.sparse.linalg import svds
# Redefine X such that L is normalized in a way that is analogous
# to a generalization of the normalized Laplacian.
d = X.dot(X.sum(axis=0)) ** (-alpha)
X = X * d[:, np.newaxis]
# Decompose M = D^-1 X X^T
# This is like
# M = D^-1/2 D^-1/2 X (D^-1/2 X).T D^1/2
# Substituting U = D^-1/2 X we have
# M = D^-1/2 U U.T D^1/2
# which is a diagonal change of basis of U U.T
# which itself can be decomposed using svd.
d = np.sqrt(X.dot(X.sum(axis=0)))
U = X / d[:, np.newaxis]
if n_components is not None:
u, s, vh = svds(U, k=n_components+1, return_singular_vectors=True)
else:
k = max(2, int(np.sqrt(ndim)))
u, s, vh = svds(U, k=k, return_singular_vectors=True)
# restore the basis and the arbitrary norm of 1
u = u / d[:, np.newaxis]
u = u / np.linalg.norm(u, axis=0, keepdims=True)
lambdas = s*s
vectors = u
# sort the lambdas in decreasing order and reorder vectors accordingly
lambda_idx = np.argsort(lambdas)[::-1]
lambdas = lambdas[lambda_idx]
vectors = vectors[:, lambda_idx]
return _step_5(lambdas, vectors, X.shape[0], n_components, diffusion_time)
def main():
# run a test
from numpy.testing import assert_allclose
def _nonnegative_corrcoef(X):
return (np.corrcoef(X) + 1) / 2.0
def _factored_nonnegative_corrcoef(X):
X = X - X.mean(axis=1, keepdims=True)
U = X / np.linalg.norm(X, axis=1, keepdims=True)
U = np.hstack([U, np.ones((U.shape[0], 1))])
return U / np.sqrt(2)
X = np.random.randn(100, 20)
L = _nonnegative_corrcoef(X)
U = _factored_nonnegative_corrcoef(X)
assert_allclose(L, U.dot(U.T))
alpha = 0.2
n_components = 7
diffusion_time = 2.0
stuff_a = compute_diffusion_map(L, alpha, n_components, diffusion_time)
embedding_a, result_a = stuff_a
stuff_b = compute_diffusion_map_psd(U, alpha, n_components, diffusion_time)
embedding_b, result_b = stuff_b
# The embeddings should be the same up to coordinate signs.
# In other words, if the x coordinate in one embedding
# is interpreted as the -x coordinate in another embedding,
# then the embeddings are not really different.
assert_allclose(
embedding_a / np.sign(embedding_a[0]),
embedding_b / np.sign(embedding_b[0]))
# Same thing for vectors.
assert_allclose(
result_a['vectors'] / np.sign(result_a['vectors'][0]),
result_b['vectors'] / | np.sign(result_b['vectors'][0]) | numpy.sign |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 11:01:48 2019
@author: cvaneck
This routine will determine the RMSF and related parameters,
giving the following input information. One of:
a file with channel frequencies and weights
OR
a file with channel frequencies (assumes equal weights)
OR
Input values for mininum frequency, maximum frequency, and channel width.
(assumes equal weights and all channels present)
The outputs are a list of relavant RMSF properties, and a plot of the RMSF
shape.
"""
#import sys
import argparse
import numpy as np
from RMutils.util_RM import get_rmsf_planes
from matplotlib import pyplot as plt
C = 2.997924538e8 # Speed of light [m/s]
def main():
"""
Determines what set of input parameters were defined, reads in file or
generates frequency array as appropriate, and passes frequency and weight
arrays to the function that works out the RMSF properties.
"""
descStr = """
Calculate and plot RMSF and report main properties, given a supplied
frequency coverage and optional weights (either as second column of
frequency file, or as separate file)."""
parser = argparse.ArgumentParser(description=descStr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("freqFile", metavar="freqFile.dat", nargs='?',default=None,
help="ASCII file containing frequencies and optionally weights.")
parser.add_argument("weightFile", metavar="weightFile.dat", nargs='?',
help="Optional ASCII file containing weights.")
parser.add_argument("-f", dest=("freq_parms"),nargs=3,default=None,
help="Generate frequencies (in Hz): minfreq, maxfreq, channel_width",
)
parser.add_argument("-m", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto, ~10xFWHM].")
parser.add_argument("-d", dest="dphi_radm2", type=float, default=None,
help="Delta phi [Auto, ~10/FWHM].")
parser.add_argument("-s", dest="plotfile", default=None,
help="Filename to save plot to. [do not save]")
parser.add_argument("-n", dest="plotname", default=None,
help="Name of plot [\"Simulated RMSF\"]")
args = parser.parse_args()
#Check that at least one frequency input has been given:
if args.freqFile == None and args.freq_parms == None:
print("Please supply either a file with frequency values or use the -f flag.")
raise(Exception("No frequency input! Use -h flag for help on inputs."))
# if args.phiMax_radm2 != None:
# if args.phiMax_radm2
#Order of priority: frequency file takes precedence over -i flag.
# weight file takes precedence over 2nd column of frequency file.
if args.freqFile != None:
data=np.genfromtxt(args.freqFile,encoding=None,dtype=None)
if len(data.shape) == 2:
freq_array=data[:,0]
weights_array=data[:,1]
else:
freq_array=data
weights_array=np.ones_like(freq_array)
else:
#Generate frequency and weight arrays from intput values.
freq_array=np.arange(float(args.freq_parms[0]),float(args.freq_parms[1]),
float(args.freq_parms[2]))
weights_array=np.ones_like(freq_array)
if args.weightFile != None:
weights_array=np.genfromtxt(args.weightFile,encoding=None,dtype=None)
if len(weights_array) != len(freq_array):
raise Exception('Weights file does not have same number of channels as frequency source')
determine_RMSF_parameters(freq_array,weights_array,args.phiMax_radm2,args.dphi_radm2,args.plotfile,args.plotname)
def determine_RMSF_parameters(freq_array,weights_array,phi_max,dphi,plotfile=None,plotname=None):
"""
Characterizes an RMSF given the supplied frequency and weight arrays.
Prints the results to terminal and produces a plot.
Inputs:
freq_array: array of frequency values (in Hz)
weights_array: array of channel weights (arbitrary units)
phi_max (float): maximum Faraday depth to compute RMSF out to.
dphi (float): step size in Faraday depth
plotfile (str): file name and path to save RMSF plot.
plotname (str): title of plot
"""
lambda2_array=C**2/freq_array**2
l2_min=np.min(lambda2_array)
l2_max=np.max(lambda2_array)
dl2=np.median(np.abs(np.diff(lambda2_array)))
if phi_max == None:
phi_max = 10*2*np.sqrt(3.0) / (l2_max-l2_min) #~10*FWHM
if dphi == None:
dphi = 0.1*2* | np.sqrt(3.0) | numpy.sqrt |
import numpy as np
import pandas as pd
import copy
THRESHOLD = 15
def get_average_metrics(results):
individual_performance = {}
overall_performance = {}
measure_list = ['recall', 'precision', 'F1',
'accuracy', 're', 'mae',
'maep', 'nde', 'sae']
for appliance in results.keys():
measure_dict = {}
measure_average = {}
# initialization
for measure in measure_list:
measure_dict[measure] = []
overall_performance[measure] = []
# save details
for test_house in results[appliance]['y_test_raw'].keys():
performance = get_all_metrics(results[appliance]['y_test_raw'][test_house],
results[appliance]['pred_test'][test_house])
for measure in performance.keys():
measure_dict[measure].append(performance[measure])
# save mean
for measure in measure_list:
measure_average[measure] = np.mean(measure_dict[measure])
individual_performance[appliance] = measure_average
overall_performance_detail = {}
# initialize
for measure in measure_list:
overall_performance_detail[measure] = []
# save details
for appliance in individual_performance.keys():
for measure in measure_list:
overall_performance_detail[measure].append(individual_performance[appliance][measure])
# save mean
for measure in measure_list:
overall_performance[measure] = np.mean(overall_performance_detail[measure])
individual_performance = pd.DataFrame(individual_performance)
return individual_performance, overall_performance
def get_all_metrics(target, prediction):
threshold = THRESHOLD
results = {'recall': get_recall(target, prediction, threshold),
'precision': get_precision(target, prediction, threshold),
'F1': get_F1(target, prediction, threshold),
'accuracy': get_accuracy(target, prediction, threshold),
're': get_relative_error(target, prediction),
'mae': get_abs_error(target, prediction),
'maep': get_abs_error_positive(target, prediction),
'nde': get_nde(target, prediction),
'sae': get_sae(target, prediction)}
return results
def get_TP(target, prediction, threshold):
'''
compute the number of true positive
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
assert (target.shape == prediction.shape)
target = 1 - | np.clip(target, threshold, 0) | numpy.clip |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import torch
import numpy as np
import os.path
import sys
import torch.utils.data as data
from torchvision import datasets, transforms
class iMNIST(datasets.MNIST):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iMNIST, self).__init__(root, task_num, transform=transform,
target_transform=target_transform, download=download)
self.train = train # training set or test set
self.root = root
self.target_transform=target_transform
self.transform=transform
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
targets = []
tt = [] # task module labels
td = [] # discriminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
data.append(self.data[i])
targets.append(self.class_mapping[self.targets[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if self.train:
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
targets.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = data.copy()
self.targets = targets.copy()
self.tt = tt.copy()
self.td = td.copy()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img.numpy(), mode='L')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 10
self.num_samples = args.samples
self.inputsize = [1,28,28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
| np.random.seed(self.seed) | numpy.random.seed |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ModelDatasetTest(test.TestCase):
def testModelMap(self):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(100):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelParallelMap(self):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul, num_parallel_calls=56)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelMapAndBatch(self):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.apply(
batching.map_and_batch(
math_ops.matmul, num_parallel_calls=28, batch_size=batch_size))
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelParallelInterleave(self):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=56, num_parallel_calls=56)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelNested(self):
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset = dataset_ops.Dataset.from_tensors((a, b, c)).repeat()
def f1(a, b, c):
x, y = a
return math_ops.matmul(x, y), b, c
def f2(a, b, c):
x, y = b
return a, math_ops.matmul(x, y), c
def f3(a, b, c):
x, y = c
return a, b, math_ops.matmul(x, y)
dataset = dataset.map(f1, num_parallel_calls=32)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=2)
dataset = dataset.map(f2, num_parallel_calls=16)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=2)
dataset = dataset.map(f3, num_parallel_calls=10)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next)
for _ in range(100):
start = time.time()
sess.run(get_next)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), | np.mean(deltas) | numpy.mean |
import argparse
import fnmatch
import os
import shutil
import h5py as h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
import sunrgbd
import wrgbd51
from alexnet_model import AlexNet
from basic_utils import Models, RunSteps
from densenet_model import DenseNet
from main import init_save_dirs
from resnet_models import ResNet
from vgg16_model import VGG16Net
def get_rnn_model(params):
if params.net_model == Models.AlexNet:
model_rnn = AlexNet(params)
elif params.net_model == Models.VGGNet16:
model_rnn = VGG16Net(params)
elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
model_rnn = ResNet(params)
else: # params.net_model == Models.DenseNet121:
model_rnn = DenseNet(params)
return model_rnn
def calc_scores(l123_preds, test_labels, model_rnn):
model_rnn.test_labels = test_labels
avg_res, true_preds, test_size = model_rnn.calc_scores(l123_preds)
conf_mat = confusion_matrix(test_labels, l123_preds)
return avg_res, true_preds, test_size, conf_mat
def show_sunrgbd_conf_mat(conf_mat):
num_ctgs = len(conf_mat)
cm_sum = np.sum(conf_mat, axis=1, keepdims=True)
cm_perc = conf_mat / cm_sum.astype(float) * 100
columns = sunrgbd.get_class_names(range(num_ctgs))
df_cm = pd.DataFrame(cm_perc, index=columns, columns=columns)
plt.figure(figsize=(20, 15))
sn.set(font_scale=1.4) # for label size
heatmap = sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, cmap='Oranges', fmt=".1f", vmax=100) # font size
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=16)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=16)
# plt.ylabel('True Label')
# plt.xlabel('Predicted Label')
plt.show()
# plt.savefig('sunrgb_confusion_matrix.eps', format='eps', dpi=1000)
def calc_scores_conf_mat(svm_path):
model_rnn = get_rnn_model(params)
l1, l2, l3 = 'layer5', 'layer6', 'layer7'
with h5py.File(svm_path, 'r') as f:
l1_conf_scores = np.asarray(f[l1])
l2_conf_scores = np.asarray(f[l2])
l3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
print('Running Layer-[{}+{}+{}] Confidence Average Fusion...'.format(l1, l2, l3))
print('SVM confidence scores of {}, {} and {} are average fused'.format(l1, l2, l3))
print('SVM confidence average fusion')
l123_avr_confidence = np.mean(np.array([l1_conf_scores, l2_conf_scores, l3_conf_scores]), axis=0)
l123_preds = np.argmax(l123_avr_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
print('Fusion result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
show_sunrgbd_conf_mat(conf_mat)
def sunrgbd_combined_scores_conf_mat(rgb_svm_path, depth_svm_path):
model_rnn = get_rnn_model(params)
l1, l2, l3 = 'layer5', 'layer6', 'layer7'
with h5py.File(rgb_svm_path, 'r') as f:
rgb1_conf_scores = np.asarray(f[l1])
rgb2_conf_scores = np.asarray(f[l2])
rgb3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
with h5py.File(depth_svm_path, 'r') as f:
depth1_conf_scores = np.asarray(f[l1])
depth2_conf_scores = np.asarray(f[l2])
depth3_conf_scores = np.asarray(f[l3])
f.close()
rgb_l123_sum_confidence = np.sum(np.array([rgb1_conf_scores, rgb2_conf_scores, rgb3_conf_scores]), axis=0)
depth_l123_sum_confidence = np.sum(np.array([depth1_conf_scores, depth2_conf_scores, depth3_conf_scores]), axis=0)
print('Weighted Average SVM confidence scores of [RGB({}+{}+{})+Depth({}+{}+{})] are taken')
print('SVMs confidence weighted fusion')
w_rgb, w_depth = model_rnn.calc_modality_weights((rgb_l123_sum_confidence, depth_l123_sum_confidence))
rgbd_l123_wadd_confidence = np.add(rgb_l123_sum_confidence * w_rgb[:, np.newaxis],
depth_l123_sum_confidence * w_depth[:, np.newaxis])
l123_preds = np.argmax(rgbd_l123_wadd_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
print('Combined Weighted Confidence result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
show_sunrgbd_conf_mat(conf_mat)
def sunrgbd_main(params):
root_path = '../../data/sunrgbd/'
svm_conf_paths = root_path + params.features_root + params.proceed_step + '/svm_confidence_scores/'
rgb_svm_path = svm_conf_paths + params.net_model + '_RGB_JPG.hdf5'
depth_svm_path = svm_conf_paths + params.net_model + '_Depth_Colorized_HDF5.hdf5'
if params.data_type == 'rgb':
calc_scores_conf_mat(rgb_svm_path)
elif params.data_type == 'depth':
calc_scores_conf_mat(depth_svm_path)
else:
sunrgbd_combined_scores_conf_mat(rgb_svm_path, depth_svm_path)
def individual_class_scores(total_conf_mat):
num_ctgs = len(total_conf_mat)
cm_sum = np.sum(total_conf_mat, axis=1, keepdims=True)
cm_perc = total_conf_mat / cm_sum.astype(float) * 100
indidual_scores = cm_perc.diagonal()
categories = wrgbd51.get_class_names(range(num_ctgs))
i = 0
for category, category_score in zip(categories, indidual_scores):
print(f'{category:<15} {category_score:>10.1f}')
def show_wrgbd_conf_mat(conf_mat):
num_ctgs = len(conf_mat)
cm_sum = np.sum(conf_mat, axis=1, keepdims=True)
cm_perc = conf_mat / cm_sum.astype(float) * 100
columns = wrgbd51.get_class_names(range(num_ctgs))
df_cm = pd.DataFrame(cm_perc, index=columns, columns=columns)
plt.figure(figsize=(20, 15))
sn.set(font_scale=1.4) # for label size
heatmap = sn.heatmap(df_cm, annot=True, annot_kws={"size": 10}, cmap='Oranges', fmt=".1f", vmax=100) # font size
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=12)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=12)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
def wrgb_scores_conf_mat(params, svm_conf_paths):
model_rnn = get_rnn_model(params)
if params.data_type == 'rgb':
params.proceed_step = RunSteps.FIX_RECURSIVE_NN
data_type_ex = 'crop'
params.data_type = 'crop'
l1, l2, l3 = model_rnn.get_best_trio_layers()
params.data_type = 'rgb'
else:
params.proceed_step = RunSteps.FINE_RECURSIVE_NN
data_type_ex = 'depthcrop'
params.data_type = 'depthcrop'
l1, l2, l3 = model_rnn.get_best_trio_layers()
params.data_type = 'depths'
all_splits_scores = []
for split in range(1, 11):
conf_file = params.net_model + '_' + data_type_ex + '_split_' + str(split) + '.hdf5'
svm_conf_file_path = svm_conf_paths + conf_file
with h5py.File(svm_conf_file_path, 'r') as f:
l1_conf_scores = np.asarray(f[l1])
l2_conf_scores = np.asarray(f[l2])
l3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
# print('Running Layer-[{}+{}+{}] Confidence Average Fusion...'.format(l1, l2, l3))
# print('SVM confidence scores of {}, {} and {} are average fused'.format(l1, l2, l3))
# print('SVM confidence average fusion')
l123_avr_confidence = np.mean(np.array([l1_conf_scores, l2_conf_scores, l3_conf_scores]), axis=0)
l123_preds = np.argmax(l123_avr_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
# print('Fusion result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
all_splits_scores.append((avg_res, true_preds, test_size, conf_mat))
total_avg_res = 0.0
total_true_preds = 0.0
total_test_size = 0.0
total_conf_mat = np.zeros(shape=(51, 51), dtype=float)
for avg_res, true_preds, test_size, conf_mat in all_splits_scores:
total_avg_res += avg_res
total_true_preds += true_preds
total_test_size += test_size
total_conf_mat += conf_mat
print('Average score is {0:.1f}% ({1}/{2})'.format(total_avg_res / 10, total_true_preds, total_test_size))
individual_class_scores(total_conf_mat)
# show_wrgbd_conf_mat(total_conf_mat)
def wrgbd_combined_scores_conf_mat(params, svm_conf_paths):
model_rnn = get_rnn_model(params)
params.proceed_step = RunSteps.FIX_RECURSIVE_NN
rgb_data_type_ex = 'crop'
params.data_type = 'crop'
rgb_l1, rgb_l2, rgb_l3 = model_rnn.get_best_trio_layers()
params.proceed_step = RunSteps.FINE_RECURSIVE_NN
depth_data_type_ex = 'depthcrop'
params.data_type = 'depthcrop'
depth_l1, depth_l2, depth_l3 = model_rnn.get_best_trio_layers()
params.data_type = 'rgbd'
all_splits_scores = []
for split in range(1, 11):
rgb_conf_file = params.net_model + '_' + rgb_data_type_ex + '_split_' + str(split) + '.hdf5'
rgb_svm_conf_file_path = svm_conf_paths + rgb_conf_file
with h5py.File(rgb_svm_conf_file_path, 'r') as f:
rgb1_conf_scores = np.asarray(f[rgb_l1])
rgb2_conf_scores = np.asarray(f[rgb_l2])
rgb3_conf_scores = np.asarray(f[rgb_l3])
test_labels = np.asarray(f['labels'])
f.close()
depth_conf_file = params.net_model + '_' + depth_data_type_ex + '_split_' + str(split) + '.hdf5'
depth_svm_conf_file_path = svm_conf_paths + depth_conf_file
with h5py.File(depth_svm_conf_file_path, 'r') as f:
depth1_conf_scores = | np.asarray(f[depth_l1]) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 20:43:18 2019
@author: blose
"""
#%%
import numpy as np
from tqdm import tqdm
import time
def read_data(filename):
lines = open(filename).read().split('\n')
data = []
for line in lines[:-1]:
data.append(line.split(', '))
data = np.array(data, dtype='object')
return data
def inner_prod(x, w):
return w[0] + sum([i*j for i,j in zip(x,w[1:])])
def predict(x,w):
return 1 if inner_prod(x, w) > 0 else -1
def train_perceptron(X_train, y_train, X_dev, y_dev, epochs):
m, n = X_train.shape
w = np.array([0 for i in range(n+1)])
for epoch in range(epochs):
updates = 0
for i in range(m):
pred = inner_prod(X_train[i], w)
if y_train[i]*pred <= 0:
updates += 1
w[0] = w[0] + y_train[i]
w[1:] = w[1:] + y_train[i]*X_train[i]
y_pred = np.zeros(X_dev.shape[0])
for i in range(X_dev.shape[0]):
y_pred[i] = predict(X_dev[i], w)
print('epoch', epoch, 'updates', updates, \
'('+str(np.round(updates/m*100,2))+'%)', 'dev_err',
np.round(np.mean(y_pred != y_dev)*100,2), '(+:'+str(np.round(100*(y_pred > 0).mean(),2))+'%)')
return w
def train_perceptron_average(X_train, y_train, X_dev, y_dev, epochs):
m, n = X_train.shape
w = np.array([0 for i in range(n+1)])
ws = np.array([0 for i in range(n+1)])
for epoch in range(epochs):
updates = 0
for i in range(m):
pred = inner_prod(X_train[i], w)
if y_train[i]*pred <= 0:
updates += 1
w[0] = w[0] + y_train[i]
w[1:] = w[1:] + y_train[i]*X_train[i]
ws = ws + w
y_pred = np.zeros(X_dev.shape[0])
for i in range(X_dev.shape[0]):
y_pred[i] = predict(X_dev[i], ws)
print('epoch', epoch, 'updates', updates, \
'('+str(np.round(updates/m*100,2))+'%)', 'dev_err',
np.round(np.mean(y_pred != y_dev)*100,2), '(+:'+str(np.round(100*(y_pred > 0).mean(),2))+'%)')
return ws
def knn(X_train, y_train, X_test, n_neighbors = 3, metric='euclidian'):
y_pred = []
if metric =='euclidian':
dist = lambda A,b: np.sqrt(((A - b)**2).sum(axis=1))
elif metric =='manhatan':
dist = lambda A,b: np.abs(A - b).sum(axis=1)
for row in tqdm(range(X_test.shape[0])):
dists = dist(X_train, X_test[row,:])
indx = np.argsort(dists)
most = y_train[indx[:n_neighbors]]
target0 = (most == 0).sum()
target1 = (most == 1).sum()
if target0 >= target1:
y_pred.append(0)
else:
y_pred.append(1)
return np.array(y_pred)
#%%
train = read_data('hw1-data/income.train.txt.5k')
dev = read_data('hw1-data/income.dev.txt')
mapping = {}
encoded = []
k = 0
for col in range(train.shape[1]):
items = np.unique(train[:,col])
thiscol = np.zeros((train.shape[0], items.shape[0]))
for i, item in enumerate(items):
mapping[k] = (item, col)
k += 1
thiscol[train[:,col] == item, i] = 1
encoded.append(thiscol)
encoded = np.concatenate(encoded, axis=1)
X_train = encoded[:, :-2]
y_train = (-1)**encoded[:, -2]
dev_encoded = np.zeros((dev.shape[0], encoded.shape[1]))
for key, val in mapping.items():
for i in range(dev.shape[1]):
dev_encoded[dev[:,i] == val[0], key] = 1
X_dev = dev_encoded[:, :-2]
y_dev = (-1)**dev_encoded[:, -2]
w = train_perceptron(X_train, y_train, X_dev, y_dev, 5)
ws = train_perceptron_average(X_train, y_train, X_dev, y_dev, 5)
indx = np.argsort(ws[1:])
for i in indx[:5]:
print(ws[i+1], mapping[i])
indx = np.argsort(ws[1:])
for i in indx[-5:]:
print(ws[i+1], mapping[i])
print('Bias:', ws[0])
#3.2
start = time.time()
y_pred = y_pred = knn(X_train, y_train, X_dev, k)
print('KNN Runtime:', time.time()-start)
start = time.time()
ws= train_perceptron_average(X_train, y_train, X_dev, y_dev, 5)
print('Perceptron Runtime:', time.time()-start)
# 4.1
sorted_index = | np.argsort(-y_train) | numpy.argsort |
#!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
import argparse
from datasets.implemented_datasets import *
import wandb
import pandas as pd
import numpy as np
from constants import *
import time
from data.tensorflow_data import TripletsBPRGenerator
from evaluation.topk_evaluator import Evaluator
from igccf_experiments.best_models import get_wandb_project_dict
from models.puresvd import PureSVD
from models.tensorflow.fism import FISM
import tensorflow as tf
from losses.tensorflow_losses import bpr_loss, l2_reg
from models.tensorflow.igccf import IGCCF
from utils import gpu_utils
import os
from utils.early_stopping import EarlyStoppingHandlerTensorFlow
from utils.pandas_utils import remap_column_consecutive, remap_columns_consecutive
PROJECT = "gowalla_dat"
SEEN_USERS_PERCENTAGE = [0.5, 0.6, 0.7, 0.8, 0.9]
ALGORITHM = "PureSVD"
SEEDS = [27, 83, 96, 14]
def best_param():
return {
"lastfm_dat": 25,
"ml1m_dat": 50,
"Amaz_dat": 25,
"gowalla_dat": 2000,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
##########################################
# identifier of WANDB run
##########################################
parser.add_argument("--wandb_project", type=str, default=PROJECT)
parser.add_argument("--alg", type=str, default=ALGORITHM)
args = vars(parser.parse_args())
alg = args["alg"]
wandb_project_dict = get_wandb_project_dict(args["wandb_project"])
##########################################
# Retrieve run parameters
##########################################
api = wandb.Api()
run_identifier = "XXXXXX/{}/{}".format(
args["wandb_project"], wandb_project_dict["igccf"]
)
run_object = api.run(run_identifier)
run_parameters_dict = run_object.config
summary = run_object.summary
##########################################
# Load dataset
##########################################
dataset_dict = eval(wandb_project_dict["dataset"])().load_split(
wandb_project_dict["split_name"]
)
train_df = dataset_dict["train"]
val_df = dataset_dict["val"]
# use both train and validation data
train_df = pd.concat([train_df, val_df])
user_data = {"interactions": train_df}
test_df = dataset_dict["test"]
full_data = pd.concat([train_df, val_df, test_df])
test_evaluator = Evaluator(
cutoff_list=[5, 20], metrics=["Recall", "NDCG"], test_data=test_df
)
for seed in SEEDS:
##########################################
# Split Users
##########################################
# set random seed
| np.random.seed(seed) | numpy.random.seed |
import helpers
import numpy
import pytest
import toughio
write_read = lambda x, **kwargs: helpers.write_read(
"INFILE", x, toughio.write_input, toughio.read_input, **kwargs
)
write_read_tough = lambda x: write_read(
x,
writer_kws={"file_format": "tough"},
reader_kws={"file_format": "tough"},
)
write_read_json = lambda x: write_read(
x,
writer_kws={"file_format": "json"},
reader_kws={"file_format": "json"},
)
@pytest.mark.parametrize(
"write_read, single",
[
(write_read_tough, True),
(write_read_tough, False),
(write_read_json, True),
(write_read_json, False),
],
)
def test_title(write_read, single):
parameters_ref = {
"title": (
helpers.random_string(80)
if single
else [helpers.random_string(80) for _ in range(numpy.random.randint(5) + 2)]
),
}
parameters = write_read(parameters_ref)
assert parameters_ref["title"] == parameters["title"]
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_rocks(write_read):
keys = [
"density",
"porosity",
"permeability",
"conductivity",
"specific_heat",
"compressibility",
"expansivity",
"conductivity_dry",
"tortuosity",
"klinkenberg_parameter",
"distribution_coefficient_3",
"distribution_coefficient_4",
]
parameters_ref = {
"rocks": {
helpers.random_string(5): {key: numpy.random.rand() for key in keys[:5]},
helpers.random_string(5): {
key: numpy.random.rand()
if key != "permeability"
else numpy.random.rand(3)
for key in keys[:5]
},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
}
}
names = list(parameters_ref["rocks"].keys())
parameters_ref["rocks"][names[-1]].update(
{
"relative_permeability": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
)
parameters_ref["rocks"][names[-2]].update(
{
"capillarity": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
)
parameters_ref["rocks"][names[-3]].update(
{
"relative_permeability": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
"capillarity": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
)
parameters = write_read(parameters_ref)
assert sorted(parameters_ref["rocks"].keys()) == sorted(parameters["rocks"].keys())
for k, v in parameters_ref["rocks"].items():
for kk, vv in v.items():
if not isinstance(vv, dict):
assert numpy.allclose(vv, parameters["rocks"][k][kk], atol=1.0e-5)
else:
helpers.allclose_dict(vv, parameters["rocks"][k][kk], atol=1.0e-4)
@pytest.mark.parametrize(
"write_read, rpcap",
[
(write_read_tough, "rp"),
(write_read_tough, "cap"),
(write_read_tough, "both"),
(write_read_json, "rp"),
(write_read_json, "cap"),
(write_read_json, "both"),
],
)
def test_rpcap(write_read, rpcap):
parameters_ref = {"default": {}}
if rpcap in {"rp", "both"}:
parameters_ref["default"]["relative_permeability"] = {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
}
if rpcap in {"cap", "both"}:
parameters_ref["default"]["capillarity"] = {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
}
parameters = write_read(parameters_ref)
for k, v in parameters_ref["default"].items():
helpers.allclose_dict(v, parameters["default"][k], atol=1.0e-4)
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_flac(write_read):
parameters_ref = {
"flac": {
"creep": bool(numpy.random.randint(2)),
"porosity_model": numpy.random.randint(10),
"version": numpy.random.randint(10),
},
"rocks": {
helpers.random_string(5): {
"permeability_model": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
"equivalent_pore_pressure": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
for _ in numpy.random.rand(10) + 1
},
}
parameters = write_read(parameters_ref)
helpers.allclose_dict(parameters_ref["flac"], parameters["flac"])
for k, v in parameters_ref["rocks"].items():
for kk, vv in v.items():
helpers.allclose_dict(vv, parameters["rocks"][k][kk], atol=1.0e-4)
@pytest.mark.parametrize(
"write_read, isothermal",
[(write_read_tough, True), (write_read_tough, False)],
)
def test_multi(write_read, isothermal):
import random
from toughio._io.input.tough._common import eos
parameters_ref = {
"eos": random.choice(
[k for k in eos.keys() if k not in {"eos7", "eos8", "eos9"}]
),
"isothermal": isothermal,
}
parameters = write_read(parameters_ref)
multi = [
parameters["n_component"],
parameters["n_component"] + 1,
parameters["n_phase"],
6,
]
multi_ref = eos[parameters_ref["eos"]]
assert multi_ref == multi
assert parameters_ref["isothermal"] == parameters["isothermal"]
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_solvr(write_read):
parameters_ref = {
"solver": {
"method": numpy.random.randint(10),
"z_precond": helpers.random_string(2),
"o_precond": helpers.random_string(2),
"rel_iter_max": numpy.random.rand(),
"eps": numpy.random.rand(),
},
}
parameters = write_read(parameters_ref)
assert parameters_ref["solver"]["method"] == parameters["solver"]["method"]
assert parameters_ref["solver"]["z_precond"] == parameters["solver"]["z_precond"]
assert parameters_ref["solver"]["o_precond"] == parameters["solver"]["o_precond"]
assert numpy.allclose(
parameters_ref["solver"]["rel_iter_max"],
parameters["solver"]["rel_iter_max"],
atol=1.0e-5,
)
assert numpy.allclose(
parameters_ref["solver"]["eps"], parameters["solver"]["eps"], atol=1.0e-5
)
@pytest.mark.parametrize(
"write_read, t_steps, num_pvars",
[
(write_read_tough, numpy.random.rand(), 4),
(write_read_tough, numpy.random.rand(numpy.random.randint(100) + 1), 4),
(write_read_tough, numpy.random.rand(numpy.random.randint(100) + 1), 6),
(write_read_json, numpy.random.rand(), 4),
(write_read_json, numpy.random.rand(numpy.random.randint(100) + 1), 4),
(write_read_json, numpy.random.rand(numpy.random.randint(100) + 1), 6),
],
)
def test_param(write_read, t_steps, num_pvars):
parameters_ref = {
"options": {
"n_iteration": numpy.random.randint(10),
"n_cycle": numpy.random.randint(10),
"n_second": numpy.random.randint(10),
"n_cycle_print": numpy.random.randint(10),
"verbosity": numpy.random.randint(10),
"temperature_dependence_gas": numpy.random.rand(),
"effective_strength_vapor": numpy.random.rand(),
"t_ini": numpy.random.rand(),
"t_max": numpy.random.rand(),
"t_steps": t_steps,
"t_step_max": numpy.random.rand(),
"t_reduce_factor": numpy.random.rand(),
"gravity": numpy.random.rand(),
"mesh_scale_factor": numpy.random.rand(),
"eps1": numpy.random.rand(),
"eps2": numpy.random.rand(),
"w_upstream": numpy.random.rand(),
"w_newton": numpy.random.rand(),
"derivative_factor": numpy.random.rand(),
},
"extra_options": {
k + 1: v for k, v in enumerate(numpy.random.randint(10, size=24))
},
"default": {"initial_condition": numpy.random.rand(num_pvars)},
}
parameters = write_read(parameters_ref)
helpers.allclose_dict(parameters_ref["options"], parameters["options"], atol=1.0e-5)
helpers.allclose_dict(parameters_ref["extra_options"], parameters["extra_options"])
if "initial_condition" in parameters["default"].keys():
assert numpy.allclose(
parameters_ref["default"]["initial_condition"],
parameters["default"]["initial_condition"],
atol=1.0e-5,
)
else:
assert not len(parameters_ref["default"]["initial_condition"])
@pytest.mark.parametrize(
"write_read, num_floats",
[
(write_read_tough, None),
(write_read_tough, 8),
(write_read_json, None),
(write_read_json, 8),
],
)
def test_selec(write_read, num_floats):
parameters_ref = {
"selections": {
"integers": {
k + 1: v for k, v in enumerate(numpy.random.randint(100, size=16))
},
"floats": (
numpy.random.rand(num_floats)
if num_floats is not None and num_floats <= 8
else numpy.random.rand(
numpy.random.randint(100) + 1, numpy.random.randint(8) + 1
)
),
},
}
parameters_ref["selections"]["integers"][1] = (
len(parameters_ref["selections"]["floats"])
if numpy.ndim(parameters_ref["selections"]["floats"]) == 2
else 1
)
parameters = write_read(parameters_ref)
helpers.allclose_dict(
parameters_ref["selections"]["integers"], parameters["selections"]["integers"]
)
if "floats" in parameters["selections"].keys():
assert numpy.allclose(
parameters_ref["selections"]["floats"],
parameters["selections"]["floats"],
atol=1.0e-4,
)
else:
assert parameters_ref["selections"]["integers"][1] == 0
@pytest.mark.parametrize(
"write_read, num_pvars, num_items",
[
(write_read_tough, 4, None),
(write_read_tough, 6, None),
(write_read_tough, 4, 1),
(write_read_tough, 6, 1),
(write_read_json, 4, None),
(write_read_json, 6, None),
],
)
def test_indom(write_read, num_pvars, num_items):
num_items = num_items if num_items else numpy.random.randint(10) + 1
parameters_ref = {
"rocks": {
helpers.random_string(5): {
"initial_condition": numpy.random.rand(num_pvars),
}
for _ in range(num_items)
},
}
parameters = write_read(parameters_ref)
for k, v in parameters_ref["rocks"].items():
assert numpy.allclose(
v["initial_condition"],
parameters["rocks"][k]["initial_condition"],
atol=1.0e-4,
)
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_momop(write_read):
parameters_ref = {
"more_options": {
k + 1: v for k, v in enumerate(numpy.random.randint(10, size=40))
},
}
parameters = write_read(parameters_ref)
helpers.allclose_dict(parameters_ref["more_options"], parameters["more_options"])
@pytest.mark.parametrize(
"write_read, times",
[
(write_read_tough, numpy.random.rand()),
(write_read_tough, numpy.random.rand(numpy.random.randint(100) + 1)),
(write_read_json, numpy.random.rand()),
(write_read_json, numpy.random.rand(numpy.random.randint(100) + 1)),
],
)
def test_times(write_read, times):
parameters_ref = {"times": times}
parameters = write_read(parameters_ref)
assert numpy.allclose(parameters_ref["times"], parameters["times"], atol=1.0e-5)
@pytest.mark.parametrize(
"write_read, oft, n",
[
(write_read_tough, "element_history", 5),
(write_read_tough, "connection_history", 10),
(write_read_tough, "generator_history", 5),
(write_read_json, "element_history", 5),
(write_read_json, "connection_history", 10),
(write_read_json, "generator_history", 5),
],
)
def test_oft(write_read, oft, n):
parameters_ref = {
oft: [helpers.random_string(n) for _ in range(numpy.random.randint(10) + 1)]
}
parameters = write_read(parameters_ref)
assert parameters_ref[oft] == parameters[oft]
@pytest.mark.parametrize(
"write_read, specific_enthalpy, label_length",
[
(write_read_tough, True, 5),
(write_read_json, True, 5),
(write_read_tough, True, 6),
(write_read_json, True, 6),
(write_read_tough, False, 5),
(write_read_json, False, 5),
(write_read_tough, False, 6),
(write_read_json, False, 6),
],
)
def test_gener(write_read, specific_enthalpy, label_length):
n_rnd = numpy.random.randint(100) + 1
parameters_ref = {
"generators": {
helpers.random_label(label_length): {
"name": [
helpers.random_string(5),
helpers.random_string(5),
helpers.random_string(5),
],
"nseq": numpy.random.randint(10, size=3),
"nadd": numpy.random.randint(10, size=3),
"nads": numpy.random.randint(10, size=3),
"type": [
helpers.random_string(4),
helpers.random_string(4),
helpers.random_string(4),
],
"times": [numpy.random.rand(10), None, numpy.random.rand(n_rnd)],
"rates": [
numpy.random.rand(10),
numpy.random.rand(),
numpy.random.rand(n_rnd),
],
"specific_enthalpy": [
numpy.random.rand(10),
numpy.random.rand(),
numpy.random.rand(n_rnd),
]
if specific_enthalpy
else None,
"layer_thickness": numpy.random.rand(3),
},
helpers.random_label(label_length): {
"name": [helpers.random_string(5), helpers.random_string(5)],
"nseq": numpy.random.randint(10, size=2),
"nadd": numpy.random.randint(10, size=2),
"nads": numpy.random.randint(10, size=2),
"type": [helpers.random_string(4), helpers.random_string(4)],
"rates": numpy.random.rand(2),
},
helpers.random_label(label_length): {
"nseq": numpy.random.randint(10),
"nadd": numpy.random.randint(10),
"nads": numpy.random.randint(10),
"type": helpers.random_string(4),
"rates": numpy.random.rand(),
"layer_thickness": | numpy.random.rand() | numpy.random.rand |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 21:10:21 2020
@author: pengning
does the Green's function Arnoldi iteration over a shell domain for spherical waves
nice analytical properties of polynomial representation lost when using shell domain leaving out origin
try going back to spatial discretization idea instead
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
from .shell_domain import shell_rho_M, shell_rho_N
import mpmath
from mpmath import mp
def grid_integrate_trap(integrandgrid,diffgrid):
#integrate a spatial grid representation of the integrand using trapezoid rule
return np.sum((integrandgrid[:-1]+integrandgrid[1:])*diffgrid/2.0)
def rgrid_Mmn_normsqr(vecMgrid, rsqrgrid, rdiffgrid):
return np.real(grid_integrate_trap(np.conj(vecMgrid)*vecMgrid*rsqrgrid, rdiffgrid))
def rgrid_Mmn_dot(vecM1grid, vecM2grid, rsqrgrid, rdiffgrid):
return grid_integrate_trap(vecM1grid*vecM2grid*rsqrgrid, rdiffgrid)
def rgrid_Mmn_vdot(vecM1grid, vecM2grid, rsqrgrid, rdiffgrid):
return grid_integrate_trap(np.conj(vecM1grid)*vecM2grid*rsqrgrid, rdiffgrid)
def rgrid_Mmn_plot(vecMgrid, rgrid):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.plot(rgrid,np.real(vecMgrid))
ax2.plot(rgrid,np.imag(vecMgrid))
plt.show()
def shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, vecMgrid):
"""
evaluates G(r,r')*vecM(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgMvecMrsqr_grid = RgMgrid*vecMgrid*rsqrgrid
Im_newvecMgrid = k**3 * grid_integrate_trap(RgMvecMrsqr_grid, rdiffgrid) * RgMgrid
Re_ImMfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_ImMfactgrid[1:] = k**3 * np.cumsum((RgMvecMrsqr_grid[:-1]+RgMvecMrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImMvecMrsqr_grid = np.flip(ImMgrid*vecMgrid*rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgMfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_RgMfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImMvecMrsqr_grid[:-1]+rev_ImMvecMrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecMgrid = -ImMgrid*Re_ImMfactgrid - RgMgrid*Re_RgMfactgrid
return Re_newvecMgrid + 1j*Im_newvecMgrid
def shell_Green_grid_Arnoldi_Mmn_oneshot(n,k,R1,R2, invchi, vecnum, gridpts=200):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgMgrid = sp.spherical_jn(n, k*rgrid) #the argument for radial part of spherical waves is kr
ImMgrid = sp.spherical_yn(n, k*rgrid)
RgMgrid = RgMgrid.astype(np.complex)
ImMgrid = ImMgrid.astype(np.complex)
vecMgrid = RgMgrid / np.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid,rdiffgrid))
rgrid_Mmn_plot(vecMgrid, rgrid)
unitMvecs = [vecMgrid]
for i in range(1,vecnum):
newvecMgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs[-1])
newvecMgrid[:] = np.real(newvecMgrid)
print('before orthogonalization and normalization:')
rgrid_Mmn_plot(newvecMgrid, rgrid)
for j in range(len(unitMvecs)):
unitMvec = unitMvecs[j]
coeff = rgrid_Mmn_vdot(unitMvec, newvecMgrid, rsqrgrid,rdiffgrid)
newvecMgrid -= coeff*unitMvec
newvecMgrid /= np.sqrt(rgrid_Mmn_normsqr(newvecMgrid, rsqrgrid,rdiffgrid))
rgrid_Mmn_plot(newvecMgrid, rgrid)
print(rgrid_Mmn_vdot(RgMgrid, newvecMgrid, rsqrgrid,rdiffgrid))
unitMvecs.append(newvecMgrid)
Green = np.zeros((vecnum,vecnum), dtype=np.complex)
for i in range(vecnum):
for j in range(vecnum):
GMjgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs[j])
Green[i,j] = rgrid_Mmn_vdot(unitMvecs[i],GMjgrid, rsqrgrid,rdiffgrid)
print(Green)
Umat = np.eye(vecnum)*invchi - Green
return Green, Umat
def shell_Green_grid_Arnoldi_Mmn_step(n,k, invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=False):
"""
this method does one more Arnoldi step, given existing Arnoldi vectors in unitMvecs
the last entry in unitMvecs is G*unitMvecs[-2] without orthogonalization and normalization
so len(unitMvecs) = len(Gmat)+1 going in and going out of the method
this is setup for most efficient iteration since G*unitMvec is only computed once
the unitMvecs list is modified on spot; a new enlarged Gmat nparray is returned at the end
"""
#first, begin by orthogonalizing and normalizing unitMvecs[-1]
#use relation U = V^{-1} - G
"""
see comment for analogous method for N waves, shell_Green_grid_Arnoldi_Nmn_step
coef1 = Gmat[-1,-1]
unitMvecs[-1] -= coef1*unitMvecs[-2]
if Gmat.shape[0]>1: #since G has symmetric Arnoldi representation (so tridiagonal), G*M_j has non-zero overlap with M_j and M_{j-1}
coef2 = Gmat[-2,-1]
unitMvecs[-1] -= coef2*unitMvecs[-3]
unitMvecs[-1][:] = np.real(unitMvecs[-1][:])
"""
vecnum = Gmat.shape[0]
for i in range(vecnum):
coef = rgrid_Mmn_vdot(unitMvecs[i], unitMvecs[-1], rsqrgrid,rdiffgrid)
unitMvecs[-1] -= coef*unitMvecs[i]
unitMvecs[-1][:] = np.real(unitMvecs[-1][:])
norm = np.sqrt(rgrid_Mmn_normsqr(unitMvecs[-1], rsqrgrid,rdiffgrid))
unitMvecs[-1] /= norm
if plotVectors:
rgrid_Mmn_plot(unitMvecs[-1], rgrid)
#get new vector
newvecM = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, unitMvecs[-1])
newvecM[:] = np.real(newvecM)
newGmat = | np.zeros((Gmat.shape[0]+1,Gmat.shape[1]+1), dtype=np.complex) | numpy.zeros |
#####################################################################
# This file is part of the 4D Light Field Benchmark. #
# #
# This work is licensed under the Creative Commons #
# Attribution-NonCommercial-ShareAlike 4.0 International License. #
# To view a copy of this license, #
# visit http://creativecommons.org/licenses/by-nc-sa/4.0/. #
#####################################################################
import configparser
import os
import sys
import numpy as np
import imageio
def read_lightfield(data_folder):
params = read_parameters(data_folder)
light_field = np.zeros((params["num_cams_x"], params["num_cams_y"], params["height"], params["width"], 3), dtype=np.uint8)
views = sorted([f for f in os.listdir(data_folder) if f.startswith("input_") and f.endswith(".png")])
for idx, view in enumerate(views):
fpath = os.path.join(data_folder, view)
try:
img = read_img(fpath)
light_field[idx // params["num_cams_x"], idx % params["num_cams_y"], :, :, :] = img
except IOError:
print("Could not read input file: %s" % fpath)
sys.exit()
return light_field
def read_parameters(data_folder):
params = dict()
with open(os.path.join(data_folder, "parameters.cfg"), "r") as f:
parser = configparser.ConfigParser()
parser.readfp(f)
section = "intrinsics"
params["width"] = int(parser.get(section, 'image_resolution_x_px'))
params["height"] = int(parser.get(section, 'image_resolution_y_px'))
params["focal_length_mm"] = float(parser.get(section, 'focal_length_mm'))
params["sensor_size_mm"] = float(parser.get(section, 'sensor_size_mm'))
params["fstop"] = float(parser.get(section, 'fstop'))
section = "extrinsics"
params["num_cams_x"] = int(parser.get(section, 'num_cams_x'))
params["num_cams_y"] = int(parser.get(section, 'num_cams_y'))
params["baseline_mm"] = float(parser.get(section, 'baseline_mm'))
params["focus_distance_m"] = float(parser.get(section, 'focus_distance_m'))
params["center_cam_x_m"] = float(parser.get(section, 'center_cam_x_m'))
params["center_cam_y_m"] = float(parser.get(section, 'center_cam_y_m'))
params["center_cam_z_m"] = float(parser.get(section, 'center_cam_z_m'))
params["center_cam_rx_rad"] = float(parser.get(section, 'center_cam_rx_rad'))
params["center_cam_ry_rad"] = float(parser.get(section, 'center_cam_ry_rad'))
params["center_cam_rz_rad"] = float(parser.get(section, 'center_cam_rz_rad'))
section = "meta"
params["disp_min"] = float(parser.get(section, 'disp_min'))
params["disp_max"] = float(parser.get(section, 'disp_max'))
params["frustum_disp_min"] = float(parser.get(section, 'frustum_disp_min'))
params["frustum_disp_max"] = float(parser.get(section, 'frustum_disp_max'))
params["depth_map_scale"] = float(parser.get(section, 'depth_map_scale'))
params["scene"] = parser.get(section, 'scene')
params["category"] = parser.get(section, 'category')
params["date"] = parser.get(section, 'date')
params["version"] = parser.get(section, 'version')
params["authors"] = parser.get(section, 'authors').split(", ")
params["contact"] = parser.get(section, 'contact')
return params
def read_depth(data_folder, highres=False):
fpath = os.path.join(data_folder, "gt_depth_%s.pfm" % ("highres" if highres else "lowres"))
try:
data = read_pfm(fpath)
except IOError:
print("Could not read depth file: %s" % fpath)
sys.exit()
return data
def read_disparity(data_folder, highres=False):
fpath = os.path.join(data_folder, "gt_disp_%s.pfm" % ("highres" if highres else "lowres"))
try:
data = read_pfm(fpath)
except IOError:
print("Could not read disparity file: %s" % fpath)
sys.exit()
return data
def read_img(fpath):
#from scipy import misc
#data = misc.imread(fpath)
data = imageio.imread(fpath)
return data
def write_hdf5(data, fpath):
import h5py
h = h5py.File(fpath, 'w')
for key, value in data.items():
h.create_dataset(key, data=value)
h.close()
def write_pfm(data, fpath, scale=1, file_identifier="Pf", dtype="float32"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
data = np.flipud(data)
height, width = np.shape(data)[:2]
values = np.ndarray.flatten(np.asarray(data, dtype=dtype))
endianess = data.dtype.byteorder
print(endianess)
if endianess == '<' or (endianess == '=' and sys.byteorder == 'little'):
scale *= -1
with open(fpath, 'wb') as file:
file.write((file_identifier).encode())
file.write(('\n%d %d\n' % (width, height)).encode())
file.write(('%d\n' % scale).encode())
file.write(values)
def read_pfm(fpath, expected_identifier="Pf"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
with open(fpath, 'rb') as f:
# header
identifier = _get_next_line(f)
if identifier != expected_identifier:
raise Exception('Unknown identifier. Expected: "%s", got: "%s".' % (expected_identifier, identifier))
try:
line_dimensions = _get_next_line(f)
dimensions = line_dimensions.split(' ')
width = int(dimensions[0].strip())
height = int(dimensions[1].strip())
except:
raise Exception('Could not parse dimensions: "%s". '
'Expected "width height", e.g. "512 512".' % line_dimensions)
try:
line_scale = _get_next_line(f)
scale = float(line_scale)
assert scale != 0
if scale < 0:
endianness = "<"
else:
endianness = ">"
except:
raise Exception('Could not parse max value / endianess information: "%s". '
'Should be a non-zero number.' % line_scale)
try:
data = np.fromfile(f, "%sf" % endianness)
data = np.reshape(data, (height, width))
data = np.flipud(data)
with | np.errstate(invalid="ignore") | numpy.errstate |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
This is a unit test. If you would like to further develop pahmc_ode_gpu, you
should visit here frequently.
"""
import os
from pathlib import Path
from numba import cuda, jit
import numpy as np
os.chdir(Path.cwd().parent)
from pahmc_ode_gpu import cuda_lib_dynamics
os.chdir(Path.cwd()/'unit_tests')
"""Prepare data, as well as variables to be compared to."""
name = 'lorenz96'
D = 200
M = 2000
X = np.random.uniform(-8.0, 8.0, (D,M))
par = np.array([8.17])
stimulus = np.random.uniform(-1.0, 1.0, (D,M))
# these functions have been tested in pahmc_ode_cpu
@jit(nopython=True)
def cpu_field(X, par, stimulus):
(D, M) = np.shape(X)
vecfield = | np.zeros((D,M)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 20:03:50 2019
Finds Vg1 and Vg2 values above a threshold, determined by the ratio of the areas
of a Gaussian fit of the intensity histogram to the total area of the intensities
@author: <NAME>
"""
import numpy as np
import scipy.signal as ss
import scipy.optimize as opt
from scipy.signal import medfilt2d, savgol_filter
from scipy.ndimage import correlate
from sklearn.neighbors import KDTree
import stability as stab
def hist_data(z):
"""
Finds x and y data from histogram
:param z: input
:return: x and y
"""
data = np.histogram(z, bins='scott')
x = data[1]
x = np.array([(x[i] + x[i + 1]) / 2 for i in range(0, len(x) - 1)])
return x, np.array(data[0])
def gauss(x, *params):
return abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2))
def multi_gaussian(x, *params):
"""
Fits multiple Gaussian distributions, number of which determined by the number of parameters inputted
"""
y = np.zeros_like(x)
index = np.arange(0, len(params), 3)
if index.size > 1:
for i in range(0, len(params) // 3):
mu = params[i]
sig = params[i + len(params) // 3]
amp = params[i + 2 * len(params) // 3]
y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
else:
y = y + abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2))
return y
def multi_gauss_background(x, *params):
y = np.zeros_like(x)
index = np.arange(0, len(params) - 2, 3)
if index.size > 1:
y = y + params[0] * x + params[1]
for i in range(0, (len(params) - 2) // 3):
mu = params[i + 2]
sig = params[i + 2 + (len(params) - 2) // 3]
amp = params[i + 2 + 2 * (len(params) - 2) // 3]
y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
else:
y = y + params[0] * x + params[1] + abs(params[4]) * np.exp(-(x - params[2]) ** 2 / (2 * params[3] ** 2))
return y
def greedy_guess(guess, x, y):
n = (len(guess) - 2) // 3
m, sig, a = guess[2:n + 2], guess[n + 2:2 * n + 2], guess[2 * n + 2:]
chi = (y - multi_gauss_background(x, *guess)) / multi_gauss_background(x, *guess)
chi = savgol_filter(chi, 3, 2)
m, a = np.append(m, float(x[np.where(chi == np.max(chi))])), np.append(a, float(y[np.where(chi == np.max(chi))]))
sig = np.append(sig, sig[n - 1] / 2)
return np.append(guess[:2], np.append(m, np.append(sig, a)))
def gradient(x, y, z):
"""
Calculates gradient along x and y of intensities to reduce noise
@param x: x vales
@param y: y values
@param z: intensities
@return:
"""
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix
sg = savgol_filter(m_z, 5, 2) + savgol_filter(m_z, 5, 2, axis=0) # Savgol filter acts as a low pass band filter
signal = sg - np.mean(sg) + np.mean(m_z)
return np.reshape(signal, np.shape(x))
def gradient_exp(x, y, z):
"""
Calculates gradient along x and y of intensities to reduce noise
@param x: x vales
@param y: y values
@param z: intensities
@return:
"""
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix
diff = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]]
z_diff = correlate(m_z, diff)
sg = savgol_filter(z_diff, 5, 2) + savgol_filter(z_diff, 5, 2, axis=0) # Savgol filter acts as a low pass band filter
signal = sg - np.mean(sg) + np.mean(m_z)
return np.reshape(signal, np.shape(x))
def filtering(x, y, z):
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x)))) # Transform array into matrix
s = medfilt2d(m_z)
return np.reshape(s, (int(len(x)),))
def normalise(z):
"""
Unity-based normalisation function, such that all values range between 0 and 1
:param z: Raw data that needs normalising
:return: Normalised data
"""
return np.nan_to_num((z - np.min(z)) / (np.max(z) - np.min(z)))
def fit_gauss(z):
intensity = normalise(z)
x, y = hist_data(intensity)
guess = np.append(0, np.append(np.median(y), np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))))
fit_param, cov = opt.curve_fit(multi_gauss_background, x, y, guess)
if fit_param[2] > 0.5:
index = np.where(intensity<fit_param[2]-3*abs(fit_param[3]))
else:
index = np.where(intensity>fit_param[2]+3*abs(fit_param[3]))
return index
def curved_plane(x, y, param):
return param[0]*x + param[1]*x**2 + param[2]*y + param[3]*y**2 + param[4]*x*y + param[5]
def linear_plane(x, y, param):
return param[0]*x + param[1]*y + param[2]
def minimise_plane(param, x, y, z):
return np.sum((z - linear_plane(x, y, param))**2)
def linear(x, z):
return (np.median(z[np.where(x==np.min(x))])-np.median(z[np.where(x==np.max(x))]))/(np.min(x)-np.max(x))
def remove_background(x, y, z):
p = gradient_exp(x, y, z)
param = np.array((linear(x, z), linear(y,z), np.median(p)))
sol = opt.minimize(minimise_plane, param, args=(x, y, p))
p_n = normalise(p - linear_plane(x, y, sol.x))
return p_n*(np.max(z)-np.min(z)) + np.min(z)
def grad_exp(z, val_x, val_y):
val = z.reshape(val_y, val_x)
scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
[-10+0j, 0+ 0j, +10 +0j],
[ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
grad = ss.convolve2d(val, scharr, boundary='symm', mode='same')
index = np.where(np.logical_or(abs(np.angle(grad).flatten())<=0.15, abs(np.angle(grad).flatten())>=np.pi-0.15))
z[index] = 0
return z
def get_klpq_div(p_probs, q_probs):
# Calcualtes the Kullback-Leibler divergence between pi and qi
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += pi*np.nan_to_num(np.log(pi/qi))
return kl_div
def D_KL(threshold, x, y):
# Finds best fit Gaussian distribution and calculates the corresponding Kullback-Leibler divergence
index = np.where(np.logical_and(x>=threshold[0], x<=threshold[1]))
xs, ys = x[index], y[index]
if np.trapz(ys)>0:
ys = ys/np.trapz(ys)
else:
return np.inf
guess = np.append(np.median(xs[np.where(ys == np.max(ys))]),
np.append(np.std(xs[np.where(ys > np.median(ys))]),
np.max(ys)))
bounds = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(ys)), (np.max(x)+np.std(x), np.max(x)-np.min(x), 10*np.max(ys)))
fit_param, cov = opt.curve_fit(gauss, xs, ys, guess, bounds=bounds)
return get_klpq_div(ys+10**-7, gauss(xs, *fit_param)+10**-7) # Add small epsilon to ensure that we donn't devide by zero
def minimise_DKL(x, y):
# Estimate first guess and boundaries to use:
guess = np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))
b = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(y)), (np.max(x)+np.std(x), np.max(x)-np.min(x), np.max(y)*10))
fit_param, cov = opt.curve_fit(gauss, x, y, guess, bounds=b)
x0 = [fit_param[0]-2*fit_param[1], fit_param[0]+2*fit_param[1]]
bound = ((np.min(x), fit_param[0]-fit_param[1]), (fit_param[0]+fit_param[1], np.max(x)))
# Find optimal bound solutions
sol = opt.minimize(D_KL, x0, jac=None, method='L-BFGS-B', options={'eps':1/len(x)}, args=(x, y), bounds=bound)
return sol.x
def threshold_DKL(z):
intensity = normalise(z)
x, y = hist_data(intensity)
y = y**0.5 # Broadens peak to allow to identify finer structure in the intensity
threshold = minimise_DKL(x, y)
if abs(np.max(z))>abs(np.min(z)):
index = np.where(intensity>=threshold[1])
else:
index = np.where(intensity<=threshold[0])
return index
def threshold(z, val):
if abs(np.max(z))>abs(np.min(z)):
v = abs(np.min(z))*0.9
else:
v = -abs(np.max(z))*0.9
val = np.append(val, v)
v = np.mean(abs(val))
m = np.where(np.logical_or(z > v, z < -v))
return m, val
def intense(z, index):
x, y = hist_data(z)
guess = np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))
fit_param, cov = opt.curve_fit(gauss, x, y, guess)
return z[index]-fit_param[0]
def threshold_experimental(vg1, vg2, i, q):
i_g, q_g = remove_background(vg1, vg2, i), remove_background(vg1, vg2, q)
m_i, m_q = threshold_DKL(i_g), threshold_DKL(q_g)
index = np.unique(np.append(m_i, m_q))
intensity = normalise(abs(intense(i, index)))+normalise(abs(intense(q, index)))
return vg1[index], vg2[index], intensity, i_g, q_g, index
def threshold_theoretical(vg1, vg2, i):
i_g = gradient(vg1, vg2, i)
x, y = hist_data(i_g)
x = normalise(x)
fit_param = [np.median(x[np.where(y == np.max(y))]), np.std(x[np.where(y > np.median(y))]), np.max(y)]
try:
fit_one, _ = opt.curve_fit(multi_gaussian, x, y, fit_param)
ind = np.where(x > fit_one[0] + fit_one[1])
ys = y[ind] - multi_gaussian(x[ind], *fit_one)
guess = [fit_one[0], np.median(x[ind][np.where(ys == np.max(ys))]),
fit_one[1], np.std(x[np.where(y > np.median(ys))]),
fit_one[2], np.max(ys)]
try:
fit_param, cov = opt.curve_fit(multi_gaussian, x, y, guess)
error = np.sqrt(np.diag(cov))
if error[1] * 10 > error[0]:
index = np.where(normalise(i) > fit_param[1])
else:
index = np.where(normalise(i) > 0.4)
except:
val = np.min(x[np.where(x > fit_one[0] + fit_one[1])])
index = np.where(normalise(i) > val)
except:
index = np.where(normalise(i) > 0.4)
return vg1[index], vg2[index], i[index], x, y, fit_param
def averaging_xy(x, y, intensity, leaf, n_neighbours):
"""
Uses KDTree to find n_neighbours and then calculates a weighted mean, resulting in thinning the data
:param x: threshold x values
:param y: threshold y values
:param intensity: corresponding intensities
:param leaf: determines how many neighbouring points to check, leaf > n_neighbours
:param n_neighbours: number of neighbours to average through
:return: thinned x and y values
"""
data = np.transpose(np.vstack([x, y]))
xs, ys, zs = [], [], []
tree = KDTree(data, leaf_size=leaf) # Finds relation between points
for i in range(0, len(data)):# // n_neighbours):
# Figure out which are the neighbouring points
# dist, ind = tree.query(np.reshape(data[i * n_neighbours, :], (1, -1)), k=n_neighbours)
dist, ind = tree.query(np.reshape(data[i, :], (1, -1)), k=n_neighbours)
# takes weighted average of x and y values of given point
x_m, y_m = np.average(x[ind], weights=intensity[ind]), np.average(y[ind], weights=intensity[ind])
z_m = np.average(intensity[ind])
xs, ys, zs = np.append(xs, x_m), np.append(ys, y_m), np.append(zs, z_m)
return xs, ys, zs
def thinning(Vg1, Vg2, i_g, q_g, ind):
val_x, val_y = len(np.unique(Vg1)), len( | np.unique(Vg2) | numpy.unique |
import torch
import torch.nn as nn
import numpy as np
import torch.distributions as TD
import scipy
import scipy.linalg
from copy import deepcopy
from multipledispatch import dispatch
from collections import Iterable
import sdepy
from .em import batchItoEuler
from .em_proxrec import torchBatchItoEulerProxrec
class OU_distrib_modeler:
'''
This class models distribution X(t) of OrnsteinUhlenbeck process
dX(t) = - \grad \frac{1}{2}(x - b)^T A (x - b) dt + \sqrt{2 \beta^{-1}} d W(t)
b is n-dim vector
A is (n \cross n) invertible symmetric matrix
\beta is a positive scalar parameters
W(t) is standart n-dim Wiener process
'''
def _U_rotate(self, M):
if len(M.shape) == 1:
return self.U @ np.diag(M) @ self.U.conj().T
return self.U @ M @ self.U.conj().T
def __init__(self, A, b, beta):
if isinstance(A, torch.Tensor):
A = A.detach().cpu()
if isinstance(b, torch.Tensor):
b = b.detach().cpu()
self.A = np.asarray(A)
self.b = np.asarray(b)
self.beta = beta
assert self.A.shape[0] == self.A.shape[1], 'matrix A must be square'
# assert np.allclose(self.A.T, self.A, 1e-13), 'matrix A must be symmetric'
self.A = 0.5*(self.A + self.A.T)
assert self.b.shape[0] == self.A.shape[0], 'b an A dimensions must coincide'
assert | np.linalg.matrix_rank(self.A, tol=1e-6) | numpy.linalg.matrix_rank |
#%%
import pytest
from numpy.testing import assert_allclose
import numpy as np
import natural_bm.backend.theano_backend as BTH
import natural_bm.backend.numpy_backend as BNP
from natural_bm.backend.common import floatx, set_floatx
#%% Define checks
def check_dtype(var, dtype):
assert var.dtype == dtype
def check_single_tensor_operation(function_name, input_shape, **kwargs):
val = np.random.random(input_shape) - 0.5
xth = BTH.variable(val)
xnp = BNP.variable(val)
_zth = getattr(BTH, function_name)(xth, **kwargs)
zth = BTH.eval(_zth)
znp = BNP.eval(getattr(BNP, function_name)(xnp, **kwargs))
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = BTH.variable(xval)
xnp = BNP.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = BTH.variable(yval)
ynp = BNP.variable(yval)
_zth = getattr(BTH, function_name)(xth, yth, **kwargs)
zth = BTH.eval(_zth)
znp = BNP.eval(getattr(BNP, function_name)(xnp, ynp, **kwargs))
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = BTH.variable(val)
xnp = BNP.variable(val)
yth = getattr(BTH, first_function_name)(xth, **first_function_args)
ynp = getattr(BNP, first_function_name)(xnp, **first_function_args)
zth = BTH.eval(getattr(BTH, second_function_name)(yth, **second_function_args))
znp = BNP.eval(getattr(BNP, second_function_name)(ynp, **second_function_args))
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
#%%
def test_linear_operations():
check_two_tensor_operation('dot', (4, 2), (2, 4))
check_two_tensor_operation('dot', (4, 2), (5, 2, 3))
check_single_tensor_operation('transpose', (4, 2))
check_single_tensor_operation('reverse', (4, 3, 2), axes=1)
check_single_tensor_operation('reverse', (4, 3, 2), axes=(1, 2))
#%%
def test_linear_algebra_operations():
check_single_tensor_operation('diag', (4, 4))
check_two_tensor_operation('solve', (4, 4), (4,))
check_two_tensor_operation('fill_diagonal', (4, 4), (1,))
check_two_tensor_operation('fill_diagonal', (4, 4), (4,))
#%%
def test_svd():
input_shape = (4, 4)
val = np.random.random(input_shape) - 0.5
xth = BTH.variable(val)
xnp = BNP.variable(val)
Uth, Sth, Vth = BTH.svd(xth)
Unp, Snp, Vnp = BNP.svd(xnp)
Uth = Uth.eval()
Sth = Sth.eval()
Vth = Vth.eval()
assert Uth.shape == Unp.shape
assert Sth.shape == Snp.shape
assert Vth.shape == Vnp.shape
assert_allclose(Uth, Unp, atol=1e-05)
assert_allclose(Sth, Snp, atol=1e-05)
assert_allclose(Vth, Vnp, atol=1e-05)
#%%
def test_shape_operations():
# concatenate
xval = np.random.random((4, 3))
xth = BTH.variable(xval)
xnp = BNP.variable(xval)
yval = np.random.random((4, 2))
yth = BTH.variable(yval)
ynp = BNP.variable(yval)
zth = BTH.eval(BTH.concatenate([xth, yth], axis=-1))
znp = BNP.eval(BNP.concatenate([xnp, ynp], axis=-1))
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
check_single_tensor_operation('permute_dimensions', (4, 2, 3),
pattern=(2, 0, 1))
check_single_tensor_operation('repeat', (4, 1), n=3)
check_single_tensor_operation('flatten', (4, 1))
check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
'squeeze', {'axis': 2},
(4, 3, 1, 1))
#%%
def test_repeat_elements():
reps = 3
for ndims in [1, 2, 3]:
shape = np.arange(2, 2 + ndims)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = BTH.variable(arr)
arr_np = BNP.variable(arr)
for rep_axis in range(ndims):
np_rep = np.repeat(arr, reps, axis=rep_axis)
th_z = BTH.repeat_elements(arr_th, reps, axis=rep_axis)
th_rep = BTH.eval(th_z)
bnp_rep = BNP.eval(
BNP.repeat_elements(arr_np, reps, axis=rep_axis))
assert th_rep.shape == np_rep.shape
assert bnp_rep.shape == np_rep.shape
assert_allclose(np_rep, th_rep, atol=1e-05)
assert_allclose(np_rep, bnp_rep, atol=1e-05)
#%%
def test_tile():
shape = (3, 4)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = BTH.variable(arr)
arr_np = BNP.variable(arr)
n = (2, 1)
th_z = BTH.tile(arr_th, n)
th_rep = BTH.eval(th_z)
np_rep = BNP.eval(BNP.tile(arr_np, n))
assert_allclose(np_rep, th_rep, atol=1e-05)
#%%
def test_value_manipulation():
val = np.random.random((4, 2))
xth = BTH.variable(val)
xnp = BNP.variable(val)
# get_value
valth = BTH.get_value(xth)
valnp = BNP.get_value(xnp)
assert valnp.shape == valth.shape
assert_allclose(valth, valnp, atol=1e-05)
# set_value
BTH.set_value(xth, val)
valth = BTH.get_value(xth)
assert valnp.shape == val.shape
assert_allclose(valth, val, atol=1e-05)
#%%
def test_elementwise_operations():
check_single_tensor_operation('max', (4, 2))
check_single_tensor_operation('max', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2))
check_single_tensor_operation('min', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('mean', (4, 2))
check_single_tensor_operation('mean', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=-1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('std', (4, 2))
check_single_tensor_operation('std', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('std', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('prod', (4, 2))
check_single_tensor_operation('prod', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('prod', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('cumsum', (4, 2))
check_single_tensor_operation('cumsum', (4, 2), axis=1)
check_single_tensor_operation('cumprod', (4, 2))
check_single_tensor_operation('cumprod', (4, 2), axis=1)
check_single_tensor_operation('argmax', (4, 2))
check_single_tensor_operation('argmax', (4, 2), axis=1)
check_single_tensor_operation('argmin', (4, 2))
check_single_tensor_operation('argmin', (4, 2), axis=1)
check_single_tensor_operation('square', (4, 2))
check_single_tensor_operation('abs', (4, 2))
check_single_tensor_operation('sqrt', (4, 2))
check_single_tensor_operation('exp', (4, 2))
check_single_tensor_operation('log', (4, 2))
check_single_tensor_operation('round', (4, 2))
check_single_tensor_operation('sign', (4, 2))
check_single_tensor_operation('pow', (4, 2), a=3)
check_single_tensor_operation('clip', (4, 2), min_value=0.4,
max_value=0.6)
# two-tensor ops
check_two_tensor_operation('equal', (4, 2), (4, 2))
check_two_tensor_operation('not_equal', (4, 2), (4, 2))
check_two_tensor_operation('greater', (4, 2), (4, 2))
check_two_tensor_operation('greater_equal', (4, 2), (4, 2))
check_two_tensor_operation('less', (4, 2), (4, 2))
check_two_tensor_operation('less_equal', (4, 2), (4, 2))
check_two_tensor_operation('maximum', (4, 2), (4, 2))
check_two_tensor_operation('minimum', (4, 2), (4, 2))
#%%
@pytest.mark.parametrize('x_np,axis,keepdims', [
(np.array([1.1, 0.8, 0.9]), 0, False),
(np.array([[1.1, 0.8, 0.9]]), 0, False),
(np.array([[1.1, 0.8, 0.9]]), 1, False),
(np.array([[1.1, 0.8, 0.9]]), -1, False),
(np.array([[1.1, 0.8, 0.9]]), 1, True),
(np.array([[1.1], [1.2]]), 0, False),
(np.array([[1.1], [1.2]]), 1, False),
(np.array([[1.1], [1.2]]), -1, False),
(np.array([[1.1], [1.2]]), -1, True),
(np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), None, False),
(np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), 0, False),
(np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), 1, False),
(np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), -1, False),
])
@pytest.mark.parametrize('B', [BTH, BNP], ids=["BTH", "BNP"])
def test_logsumexp(x_np, axis, keepdims, B):
'''
Check if K.logsumexp works properly for values close to one.
'''
x = B.variable(x_np)
assert_allclose(B.eval(B.logsumexp(x, axis=axis, keepdims=keepdims)),
np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
rtol=1e-5)
#%%
@pytest.mark.parametrize('B', [BTH, BNP], ids=["BTH", "BNP"])
def test_logsumexp_optim(B):
'''
Check if optimization works.
'''
x_np = np.array([1e+4, 1e-4])
assert_allclose(B.eval(B.logsumexp(B.variable(x_np), axis=0)),
1e4,
rtol=1e-5)
#%%
def test_switch():
val = np.random.random()
xth = BTH.variable(val)
xth = BTH.ifelse(xth >= 0.5, xth * 0.1, xth * 0.2)
xnp = BNP.variable(val)
xnp = BNP.ifelse(xnp >= 0.5, xnp * 0.1, xnp * 0.2)
zth = BTH.eval(xth)
znp = BNP.eval(xnp)
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
#%%
def test_nn_operations():
check_single_tensor_operation('sigmoid', (4, 2))
#%%
def test_random_normal():
mean = 0.
std = 1.
rand = BNP.eval(BNP.random_normal((1000, 1000), mean=mean, stddev=std))
assert rand.shape == (1000, 1000)
assert np.abs(np.mean(rand) - mean) < 0.01
assert np.abs(np.std(rand) - std) < 0.01
rand = BTH.eval(BTH.random_normal((1000, 1000), mean=mean, stddev=std))
assert rand.shape == (1000, 1000)
assert np.abs(np.mean(rand) - mean) < 0.01
assert np.abs(np.std(rand) - std) < 0.01
#%%
def test_random_uniform():
min_val = -1.
max_val = 1.
rand = BNP.eval(BNP.random_uniform((1000, 1000), min_val, max_val))
assert rand.shape == (1000, 1000)
assert np.abs(np.mean(rand)) < 0.01
assert np.max(rand) <= max_val
assert np.min(rand) >= min_val
rand = BTH.eval(BTH.random_uniform((1000, 1000), min_val, max_val))
assert rand.shape == (1000, 1000)
assert np.abs(np.mean(rand)) < 0.01
assert np.max(rand) <= max_val
assert np.min(rand) >= min_val
#%%
def test_random_binomial():
p = 0.5
rand = BNP.eval(BNP.random_binomial((1000, 1000), p))
assert rand.shape == (1000, 1000)
assert np.abs(np.mean(rand) - p) < 0.01
assert np.max(rand) == 1
assert np.min(rand) == 0
rand = BTH.eval(BTH.random_binomial((1000, 1000), p))
assert rand.shape == (1000, 1000)
assert np.abs(np.mean(rand) - p) < 0.01
assert np.max(rand) == 1
assert np.min(rand) == 0
#%%
def test_one_hot():
input_length = 10
num_classes = 20
batch_size = 30
indices = | np.random.randint(0, num_classes, size=(batch_size, input_length)) | numpy.random.randint |
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
kernel= | np.ones((5,5),np.uint8) | numpy.ones |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P
def cum_prod(nptype):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
x0 = np.random.rand(2, 3, 4, 4).astype(nptype)
axis0 = 3
x1 = np.random.rand(2, 3, 4, 4).astype(nptype)
axis1 = 3
x2 = np.random.rand(2, 3, 1, 4).astype(nptype)
axis2 = 2
x3 = np.random.rand(2, 3, 1, 4).astype(nptype)
axis3 = 2
x4 = np.random.rand(2, 3, 4, 4).astype(nptype)
axis4 = 1
x5 = np.random.rand(2, 3).astype(nptype)
axis5 = 1
x6 = np.random.rand(1, 1, 1, 1).astype(nptype)
axis6 = 0
class CumProd(nn.Cell):
def __init__(self, nptype):
super(CumProd, self).__init__()
self.x0 = Tensor(x0)
self.axis0 = axis0
self.x1 = Tensor(x1)
self.axis1 = axis1
self.x2 = Tensor(x2)
self.axis2 = axis2
self.x3 = Tensor(x3)
self.axis3 = axis3
self.x4 = Tensor(x4)
self.axis4 = axis4
self.x5 = Tensor(x5)
self.axis5 = axis5
self.x6 = Tensor(x6)
self.axis6 = axis6
@ms_function
def construct(self):
return (P.CumProd()(self.x0, self.axis0),
P.CumProd()(self.x1, self.axis1),
P.CumProd()(self.x2, self.axis2),
P.CumProd()(self.x3, self.axis3),
P.CumProd()(self.x4, self.axis4),
P.CumProd()(self.x5, self.axis5),
P.CumProd()(self.x6, self.axis6))
cumprod = CumProd(nptype)
output = cumprod()
expect0 = np.cumprod(x0, axis=axis0)
diff0 = abs(output[0].asnumpy() - expect0)
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output[0].shape == expect0.shape
expect1 = np.cumprod(x1, axis=axis1)
diff1 = abs(output[1].asnumpy() - expect1)
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output[1].shape == expect1.shape
expect2 = np.cumprod(x2, axis=axis2)
diff2 = abs(output[2].asnumpy() - expect2)
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output[2].shape == expect2.shape
expect3 = np.cumprod(x3, axis=axis3)
diff3 = abs(output[3].asnumpy() - expect3)
error3 = np.ones(shape=expect3.shape) * 1.0e-5
assert np.all(diff3 < error3)
assert output[3].shape == expect3.shape
expect4 = np.cumprod(x4, axis=axis4)
diff4 = abs(output[4].asnumpy() - expect4)
error4 = np.ones(shape=expect4.shape) * 1.0e-5
assert np.all(diff4 < error4)
assert output[4].shape == expect4.shape
expect5 = np.cumprod(x5, axis=axis5)
diff5 = abs(output[5].asnumpy() - expect5)
error5 = np.ones(shape=expect5.shape) * 1.0e-5
assert np.all(diff5 < error5)
assert output[5].shape == expect5.shape
expect6 = | np.cumprod(x6, axis=axis6) | numpy.cumprod |
import numpy as np
import torch
from .base import BaseModule
from ..utils import init_weight, repeat_interleave
class LSTM(BaseModule):
"""An LSTM module with experimental support for multi-sample handling of
the state (i.e. in case of an IQN layer before the LSTM)"""
def __init__(self, inp_shape, num_units, multi_sample_merge_mode="inner"):
"""Initialize an LSTM torch module
Args:
inp_shape: The input shape (Will be flattened to 1D in any case)
num_units: Number of hidden units in the LSTM. The hidden-state
size per timestep will be num_units*2*4B
multi_sample_merge_mode: In case of a multi-sample batch (e.g.
IQN), how to handle the LSTM input/output state (Which is
single-sample)
Options are:
- 'outer': Repeat the state at the start of the sequence and
merge back (using mean) at the end
- 'inner': repeat->merge the state on every timestep within the
sequence such that every timestep in the sequence has the
same input state for each sample (This allows the merge/mean
to participate in the backprop and may allow the model to
learn a more mean-friendly state representation)
- Alternatively, the need for this option can be avoided
altogether by injecting the IQN layer after the LSTM layer
Note this option is only relevant when there is an IQN layer
before the LSTM layer, and only for training/bootstrapping
(Acting does 1 timestep at a time therefore both options are
equivalent)
"""
super(LSTM, self).__init__()
self.inp_size = | np.prod(inp_shape) | numpy.prod |
import torch
import numpy as np
import gc
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import json, os
from datetime import datetime
from models.cifar10_models import *
from models.mnist_models import *
def accuracy(out, y):
preds = out.argmax(dim=1, keepdim=True).squeeze()
correct = preds.eq(y).sum().item()
return correct
def set_seed(seed):
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| np.random.seed(seed) | numpy.random.seed |
import numpy as np
import pygame as pg
CALC_PRECISION = 10
class Physical(pg.sprite.Sprite):
def __init__(self, game, spritepath: str):
pg.sprite.Sprite.__init__(self, game.all_sprites)
image = pg.image.load(spritepath).convert_alpha()
self.image = pg.transform.scale(image, (50, 50))
self._originalimage = self.image
self.rect = self.image.get_rect()
self.old_rect = self.rect
self.pos = (0, 0)
self.theta = 0
@classmethod
def rotateaxis(cls, xy, heading):
'''
'''
basis = np.array([(np.cos(heading), -np.sin(heading)),
(np.sin(heading), np.cos(heading))])
calc = xy*basis
return np.around(np.sum(calc, axis=1), decimals=CALC_PRECISION)
@classmethod
def getangle(cls, x1, x2, y1, y2):
dx = x2 - x1
dy = y2 - y1
if dx < 0:
if dy < 0:
heading = | np.arctan(dy/dx) | numpy.arctan |
from ctypes import *
import numpy as np
from OpenGL import GL,GLU
def computeFacesAndNormals(v, faceList):
# Compute normals
faces = np.asarray([v[i] for i in faceList])
va = faces[:,0]
vb = faces[:,1]
vc = faces[:,2]
diffB = vb - va
diffC = vc - va
vn = np.asarray([np.cross(db,dc) for db,dc in zip(diffB,diffC)])
vn = vn / np.sqrt(np.sum(np.square(vn),-1)).reshape((-1,1))
length = np.sqrt(np.sum(np.square(vn),-1))
vn = np.repeat(vn.reshape((-1,1,3)),3,1)
return faces, vn
class RenderObject(object):
def __init__(self, v, vn, t, dynamic=False):
self.v = v.astype('float32').reshape(-1)
self.vn = vn.astype('float32').reshape(-1)
self.t = t.astype('float32').reshape(-1)
self.s = np.ones(self.v.shape[0]).astype('float32')
if dynamic:
self.draw = GL.GL_DYNAMIC_DRAW
else:
self.draw = GL.GL_STATIC_DRAW
self.initialized = False
self.visible = True
def isInitialized(self):
return self.initialized
def setVisibility(self, visibility):
self.visible = visibility
def initializeMesh(self):
shadow = np.ones(self.v.shape[0]).astype('float32')
null = c_void_p(0)
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
# Vertex
self.vbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, null)
vertices = self.v.reshape(-1)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(vertices)*4, (c_float*len(vertices))(*vertices), self.draw)
# Normal
self.nbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.nbo)
GL.glEnableVertexAttribArray(1)
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, null)
normals = self.vn.reshape(-1)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(normals)*4, (c_float*len(normals))(*normals), self.draw)
# Vertex color
self.cbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.cbo)
GL.glEnableVertexAttribArray(2)
GL.glVertexAttribPointer(2, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, null)
textures = self.t.reshape(-1)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(textures)*4, (c_float*len(textures))(*textures), self.draw)
self.line_idx = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.line_idx)
vList = self.v.reshape((-1,3))
n = len(vList)
self.lineIdx = np.asarray([[i,i+1,i+1,i+2,i+2,i] for i in range(0,n-1,3)]).reshape(-1).astype('int32')
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, len(self.lineIdx)*4, (c_int*len(self.lineIdx))(*self.lineIdx), GL.GL_STATIC_DRAW)
GL.glBindVertexArray(0)
GL.glDisableVertexAttribArray(0)
GL.glDisableVertexAttribArray(1)
GL.glDisableVertexAttribArray(2)
GL.glDisableVertexAttribArray(3)
GL.glDisableVertexAttribArray(4)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self.initialized = True
def reloadMesh(self, v=None, vn=None, t=None):
if v is not None:
vertices = v.reshape(-1).astype('float32')
self.v = vertices
if self.initialized:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(vertices)*4, vertices, self.draw)
if vn is not None:
normals = vn.astype('float32').reshape(-1)
self.vn = vn
if self.initialized:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.nbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(normals)*4, normals, self.draw)
if t is not None:
textures = t.astype('float32').reshape(-1)
self.t = t
if self.initialized:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.cbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(textures)*4, textures, self.draw)
def smoothNormals(self, fList):
vn = self.vn.reshape((-1,3))
fList = fList.reshape(-1)
vn = np.stack([ | np.bincount(fList,vn[:,i]) | numpy.bincount |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.