prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
'''
Scrape Robospect output and do some processing of the results
'''
import os
import sys
import glob
import logging
import pandas as pd
import numpy as np
import matplotlib
from astropy.io.fits import getdata
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from . import *
class Scraper():
'''
Scrape all the equivalent width info from the Robospect *robolines files
'''
def __init__(self,
subdir=config_red["data_dirs"]["DIR_ROBO_OUTPUT"],
file_scraped_info=config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["SCRAPED_EW_ALL_DATA"],
orig_spec_list = config_red["data_dirs"]["DIR_SRC"] + config_red["file_names"]["LIST_SPEC_PHASE"],
verbose=False):
'''
INPUTS:
subdir:
file_scraped_info:
orig_spec_list: the file containing the original file names of the spectra
'''
# directory containing the *.fits.robolines
# files with the EW info
self.stem = '.' ## ##
# subdirectory containing the *.c.dat files
self.subdir = subdir ## ##
# get list of filenames without the path
## ## note the string being sought here is specific to RW's synthetic spectra; this is a weakness here and needs to be fixed later!
file_list_long = glob.glob(self.subdir+'/'+'*robolines')
file_list_unsorted = [os.path.basename(x) for x in file_list_long]
self.file_list = sorted(file_list_unsorted)
# read in original file names
input_list = pd.read_csv(orig_spec_list)
self.orig_spec_list = input_list["orig_spec_file_name"]
# EW info will get scraped into this
self.write_out_filename = file_scraped_info
# return tables of EW data?
self.verbose = verbose
def __call__(self):
def line_order_check(line_centers):
'''
Sanity check: are the lines listed in order?
N.b. This checks the wavelengths using the given line list
values (and not the fitted centers)
'''
logging.info('Verifying line centers...')
logging.info(line_centers[0])
glitch_count = int(0) # boolean for bookeeping
if ((line_centers[0] < 3933.660-10) or
(line_centers[0] > 3933.660+10)): # CaIIK
logging.warning('CaIIK line center does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[1] < 3970.075-10) or
(line_centers[1] > 3970.075+10)): # H-epsilon (close to CaIIH)
logging.warning('H-epsilon center (close to CaIIH) line does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[2] < 4101.7100-10) or
(line_centers[2] > 4101.7100+10)): # H-delta
logging.warning('H-delta line center does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[3] < 4340.472-10) or
(line_centers[3] > 4340.472+10)): # H-gamma
logging.warning('H-gamma line center does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[4] < 4861.290-10) or
(line_centers[4] > 4861.290+10)): # H-beta
logging.warning('H-beta line center does not match!')
glitch_count = 1 # boolean for bookeeping
if (glitch_count == int(0)):
logging.info('CaIIK, H-eps, H-del, H-gam, h_beta line centers are consistent')
return
df_master = pd.DataFrame() # initialize
# loop over all filenames of realizations of empirical spectra, extract line data
#import ipdb; ipdb.set_trace()
for t in range(0, len(self.file_list)):
# read in Robospect output
logging.info("--------------------")
logging.info("Reading in Robospect output from directory")
logging.info(self.subdir)
'''
The following parses lines from Robospect *robolines output files,
which look like the following, as of the v0.76 tag of Robospect:
## Units
##AA [ AA AA None] [ AA AA None] [ AA AA None] mAA mAA None None None None
## Headers
##wave_0 [ gaussianMu gaussianSigma gaussianAmp] [ uncertaintyMu uncertaintySigma uncertaintyAmp] [ priorMu priorSigma priorAmp] EQW uncertaintyEQW chiSqr flags blendGroup comment
3933.6600 [ 3933.618556 1.636451 -0.338310] [ 0.043767 0.045441 0.008054] [ 3934.427147 1.754001 0.384793] 1.387738 0.127230 0.004045 0x10020 0 CaII-K
3970.0750 [ 3969.912002 6.497202 -0.626854] [ 0.245555 0.237816 0.023196] [ 3971.262223 4.535872 0.781687] 10.208984 1.331932 0.117392 0x10020 0 H-eps
4101.7100 [ 4101.728498 6.829899 -0.596311] [ 0.335244 0.327236 0.025288] [ 4102.885050 4.878668 0.734648] 10.208852 1.637334 0.220112 0x10020 0 H-del
4340.4720 [ 4340.374387 7.365172 -0.557777] [ 0.395447 0.378434 0.025443] [ 4340.943149 4.961159 0.689719] 10.297539 1.773505 0.300238 0x10020 0 H-gam
4861.2900 [ 4861.316520 7.570797 -0.505060] [ 0.441626 0.426212 0.025690] [ 4861.746895 4.898021 0.635582] 9.584604 1.822847 0.377350 0x10020 0 H-beta
'''
df = pd.read_csv(self.subdir+'/'+self.file_list[t],
skiprows=19,
delim_whitespace=True,
index_col=False,
usecols=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],
names= ["wavel_stated_center","[1","wavel_found_center","gaussianSigma","gaussianAmp",
"[2","uncertaintyMu","uncertaintySigma","uncertaintyAmp",
"[3","priorMu","priorSigma","priorAmp","EQW","uncertaintyEQW",
"chiSqr","flags","blendGroup","line_name"])
# remove dummy columns
df = df.drop(columns=["[1","[2","[3"])
# remove Robospect delimiter strings from columns and cast contents as floats
logging.info("Parsing " + self.file_list[t])
try:
# this will fail if there are infs in the EWs
df["gaussianAmp"] = df["gaussianAmp"].str.replace("]","")
df["gaussianAmp"] = df["gaussianAmp"].astype(np.float)
df["uncertaintyAmp"] = df["uncertaintyAmp"].str.replace("]","")
df["uncertaintyAmp"] = df["uncertaintyAmp"].astype(np.float)
df["priorAmp"] = df["priorAmp"].str.replace("]","")
df["priorAmp"] = df["priorAmp"].astype(np.float)
except:
# skip this file
logging.error("Parsing error! " + self.file_list[t])
continue
# check lines are in the right order
# if they are not, a warning is printed in the log
line_order_check(df['wavel_found_center'])
# add two cols on the left: the filename, and the name of the line
#s_length = len(df['mean']) # number of lines (should be 5)
# file names
df['robolines_file_name'] = pd.Series(self.file_list[t],
index=df.index)
# names of empirical spectra realizations (multiple ones
# correspond to one empirical spectrum)
# remove .robolines extension
df['realization_spec_file_name'] = pd.Series(self.file_list[t].split(".robolines")[0],
index=df.index)
# names of the absorption lines
df['line_name'] = ['CaIIK', 'Heps', 'Hdel', 'Hgam', 'Hbet']
# print progress
logging.info('Out of '+str(len(self.file_list))+' files, '+str(t+1)+' scraped...')
# if this is the first list, start a master copy from it to concatenate stuff to it
if (t == 0):
df_master = df.copy()
else:
df_master = pd.concat([df_master, df])
del df # clear variable
# write to csv, while resetting the indices
# note THIS TABLE INCLUDES ALL DATA, GOOD AND BAD
#df_master_reset = df_master.reset_index(drop=True).copy()
# this is effectively the same, but gets written out
df_master.reset_index(drop=True).to_csv(self.write_out_filename,index=False)
logging.info("Table of ALL EW info written to " + str(self.write_out_filename))
#if self.verbose:
# return df_master_reset, df_master_reset_drop_bad_spectra
return
def add_synthetic_meta_data(input_list = config_red["data_dirs"]["DIR_SRC"] + config_red["file_names"]["LIST_SPEC_PHASE"],
read_in_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"],
write_out_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["RESTACKED_EW_DATA_W_METADATA"]):
'''
For the generation of a calibration, this reads in a file with spectrum file
names and other info like Fe/H, and adds everything to the table with EWs
INPUTS:
input_list: file name of list containing original spectrum names and meta-data
read_in_filename: file name of table containing EW data including Balmer lines and their errors
write_out_filename: file name with everything together to write out
'''
# read in metadata
input_data_arr = pd.read_csv(input_list)
# read in EW data
all_data = pd.read_csv(read_in_filename)
# add rows of meta-data table to EW data table, based on matchings of original spectrum file names
combined_data = all_data.merge(input_data_arr,how="left",on="orig_spec_file_name")
# write out
combined_data.to_csv(write_out_filename,index=False)
logging.info("Table of EW info with meta-data written to " + str(write_out_filename))
return
def quality_check(
read_in_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["SCRAPED_EW_ALL_DATA"],
write_out_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["SCRAPED_EW_DATA_GOOD_ONLY"]):
'''
This reads in all the scraped EW data in raw form, removes spectra that have fits
which are bad based on multiple criteria, and writes out another data_table
INPUTS:
read_in_filename: file name of the table with ALL scraped data from Robospect
write_out_filename: file name of the table with spectra with any bad line fits removed
'''
# read in data
all_data = pd.read_csv(read_in_filename)
# make new column for 'good' (G) or 'bad' (B) based on the below criteria
# (initialize all as 'G')
all_data["quality"] = "G"
# impose criteria for pruning of data
# Criterion 1. Remove all rows with a Robospect flag ending with something other than zero
# (i.e., Robospect found the fit to be bad)
# make an array consisting of the last character in each spectrum's flag
red_flag_array = ([u[-1] for u in all_data["flags"]])
# consider bad flags to be of any flag with a nonzero last character
where_red_flag = np.where(np.array(red_flag_array) != '0')
# identify the synthetic spectrum names which have at least one line with a bad fit
bad_robo_spectra = all_data["realization_spec_file_name"][np.squeeze(where_red_flag)]
# remove duplicate names
bad_robo_spectra_uniq = bad_robo_spectra.drop_duplicates()
# flag as bad the spectra with those names
all_data["quality"][all_data["realization_spec_file_name"].isin(bad_robo_spectra_uniq)] = "B"
# Criterion 2. Remove rows where the line centers are not right, using steps similar to above
# (specifically, if measured line center is more than 10 A away from perfect center)
where_bad_line_center = np.where(np.abs(np.subtract(all_data["wavel_found_center"],all_data["wavel_stated_center"]) > 10))
bad_line_center_spectra = all_data["realization_spec_file_name"][np.squeeze(where_bad_line_center,axis=0)] # squeeze necessary to preserve finite size
bad_line_center_spectra_uniq = bad_line_center_spectra.drop_duplicates()
all_data["quality"][all_data["realization_spec_file_name"].isin(bad_line_center_spectra_uniq)] = "B"
# Criterion 3. Remove rows with EWs which are clearly unrealistically large which slipped through other checks
# (this is particularly an issue with the CaIIK line, which is close to CaIIH)
# set cutoff at 18 A, based on inspection of >200 Robospect plots of fits to
# synthetic spectra; all those with CaIIK EW > 18 are clearly not fit right,
# and all those with EW < 18 look acceptable -E.S.
where_bad_CaIIK = np.where(np.logical_and(all_data["line_name"] == "CaIIK", all_data["EQW"] > 18))
bad_CaIIK_spectra = all_data["realization_spec_file_name"][np.squeeze(where_bad_CaIIK)]
bad_CaIIK_spectra_uniq = bad_CaIIK_spectra.drop_duplicates()
all_data["quality"][all_data["realization_spec_file_name"].isin(bad_CaIIK_spectra_uniq)] = "B"
# Criterion 4. Remove bad phases (for empirical data)
'''
min_good, max_good = phase_regions()
#[...]
#good_indices = np.intersect1d(good_phase, good_metal)
#[...]
'''
# Last step: Write only the rows with a good ("G") flag to file
# (note that if AT LEAST one absorption line was found to be bad, ALL the
# data corresponding to that spectrum is removed)
pruned_data = all_data[all_data.quality == "G"]#.reset_index()
pruned_data.to_csv(write_out_filename,index=False)
logging.info("--------------------------")
logging.info('Scraped Robospect output written to')
logging.info(write_out_filename)
#import ipdb; ipdb.set_trace()
return pruned_data
def error_scatter_ew(df_pass):
'''
Adds a column of errors, as calculated using the method of taking the
scatter in measured EWs of different lines (as opposed to taking Robospec's
errors at face value)
'''
# get list of original file names with no repeats
orig_file_array = np.array((df_pass["orig_spec_file_name"].drop_duplicates()))
# add new columns of nans
df_pass["err_EW_Hbeta_from_EW_variation"] = np.nan
df_pass["err_EW_Hgamma_from_EW_variation"] = np.nan
df_pass["err_EW_Hdelta_from_EW_variation"] = np.nan
df_pass["err_EW_Heps_from_EW_variation"] = np.nan
df_pass["err_EW_CaIIK_from_EW_variation"] = np.nan
for orig_file_name_num in range(0,len(orig_file_array)):
# mask all rows that do not correspond to the original spectrum
this_orig_spec = orig_file_array[orig_file_name_num]
df_masked = df_pass.where(df_pass["orig_spec_file_name"] == this_orig_spec)
# find stdev of EWs, as measured for all realizations of those file names
'''
df_masked["err_EW_Hbeta_from_EW_variation"] = np.nanstd(df_masked["EW_Hbeta"])
df_masked["err_EW_Hgamma_from_EW_variation"] = np.nanstd(df_masked["EW_Hgamma"])
df_masked["err_EW_Hdelta_from_EW_variation"] = np.nanstd(df_masked["EW_Hdelta"])
df_masked["err_EW_Heps_from_EW_variation"] = np.nanstd(df_masked["EW_Heps"])
'''
# insert into columns of input table
try:
idx = df_pass.index[df_pass["orig_spec_file_name"] == this_orig_spec] # indices
df_pass.loc[idx, "err_EW_Hbeta_from_EW_variation"] = np.nanstd(df_masked["EW_Hbeta"])
df_pass.loc[idx, "err_EW_Hgamma_from_EW_variation"] = np.nanstd(df_masked["EW_Hgamma"])
df_pass.loc[idx, "err_EW_Hdelta_from_EW_variation"] = np.nanstd(df_masked["EW_Hdelta"])
df_pass.loc[idx, "err_EW_Heps_from_EW_variation"] = np.nanstd(df_masked["EW_Heps"])
df_pass.loc[idx, "err_EW_CaIIK_from_EW_variation"] = np.nanstd(df_masked["EW_CaIIK"])
except:
print("Anomaly in finding scatter in EW measurements in " + str(this_orig_spec))
return df_pass
def generate_net_balmer(read_in_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["RESTACKED_EW_DATA_GOOD_ONLY"],
write_out_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["RESTACKED_EW_DATA_W_NET_BALMER"]):
'''
Takes stacked spectra data and adds a column representing a net Balmer line,
and populates another column for the error (based on propagation of the Robo
errors of constituent lines)
INPUTS:
read_in_filename: name of the file with stacked EW data from Robospect, and only including 'good' data
write_out_filename: name of the file to be written out; identical to the file read in,
except that additional columns contain info on a net Balmer line
OUTPUTS:
(writes out csv with net Balmer line EWs)
[m, err_m, b, err_b], [m_1to1, err_m_1to1, b_1to1, err_b_1to1], df_poststack: info used in test functions
'''
# read in
df_poststack = pd.read_csv(read_in_filename)
# to generate a net Balmer line, make a rescaling of Hgamma
# based on Hdelta
logging.info("Making a net Balmer line")
# fit a straight line to all the Hgam vs Hdel
EW_Hdelta = df_poststack["EW_Hdelta"].values.astype(float) # Hdel
EW_Hgamma = df_poststack["EW_Hgamma"].values.astype(float) # Hgam
# better names for clarity below
err_Hgamma = df_poststack["err_EW_Hgamma_from_robo"].values
# safety check that both pairs of coordinates used for the fit are simultaneously finite
# (otherwise a skipped 'nan' may cause a phase shift between the two series)
idx_good = np.logical_and(np.isfinite(EW_Hdelta),np.isfinite(EW_Hgamma))
# polyfit (x, y)
print("fyi EW_Hdelta")
print(EW_Hdelta)
print(len(EW_Hdelta[idx_good]))
print("fyi EW_Hgamma")
print(EW_Hgamma)
print(len(EW_Hgamma[idx_good]))
coeff, cov = np.polyfit(EW_Hdelta[idx_good], EW_Hgamma[idx_good], 1, full=False, cov=True)
m = coeff[0]
b = coeff[1]
err_m = np.sqrt(np.diag(cov))[0]
err_b = np.sqrt(np.diag(cov))[1]
# generate a rescaled Hgamma, call it rHgam; this is what will become the
# 'net Balmer' line
EW_rHgam = np.divide(np.subtract(EW_Hgamma, b), m)
# find corresponding error`
piece1 = np.add(np.power(err_Hgamma,2),np.power(err_b,2)).astype(float)
piece2 = np.power(np.subtract(EW_Hgamma,b),2)
piece3 = np.divide(np.power(err_m,2),np.power(m,2))
#err_rHgam = np.multiply(EW_rHgam,np.sqrt(np.subtract(np.divide(piece1,piece2),piece3)))
# fill with placeholder nans for now
err_rHgam = EW_Hgamma*np.sqrt(np.add(np.divide(piece1,piece2),piece3))
# test: a line of best fit to the Hdelta and rHgamma should be a 1-to-1 line
idx_good_test = np.logical_and(np.isfinite(EW_Hdelta),np.isfinite(EW_rHgam))
coeff_test, cov_test =
|
np.polyfit(EW_Hdelta[idx_good_test], EW_rHgam[idx_good_test], 1, full=False, cov=True)
|
numpy.polyfit
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import numpy as np
import tensorflow as tf
import config as cfg
import cv2
from scipy import misc
import random
def image_resizing(imgs, label, depths):
'''
Random resize the images and labels between 0.5 to 1.5 for height or width
:param img:
:param label:
:return: img and label
'''
scale = tf.cast(np.random.uniform(0.75, 1.25), dtype=tf.float32)
img_h = tf.shape(img)[0]
img_w = tf.shape(img)[1]
h_scale = tf.to_int32(tf.to_float(img_h) * scale)
w_scale = tf.to_int32(tf.to_float(img_w) * scale)
if np.random.uniform(0, 1) < 0.5:
h_new = h_scale
w_new = img_w
else:
h_new = img_h
w_new = w_scale
new_shape = tf.stack([h_new, w_new])
img_d = tf.image.resize_images(img, new_shape)
label_d = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label_d = tf.squeeze(label_d, squeeze_dims=[0])
return img_d, label_d
def image_scaling(imgs, label, depths, intrinsics):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
mask: 3 layer(top, mid, bot) mask to scale.
boundary: boundary mask to scale.
scale: ECP: [0.38, 1.0] [0.58, 1.25] [0.75, 1.5]
eTRIMS:[0.33, 0.75] [0.5, 1.0] [0.66, 1.25]
"""
# # fixed scales: no useless because the scale is fixed at one value
# scales = [0.75, 0.87, 1.0, 1.15, 1.3, 1.45, 1.6, 1.75]
# sc = random.sample(scales, 1)
# print(sc)
# scale = tf.convert_to_tensor(sc, dtype=tf.float32)
# random scales range(0.75, 1.75)
scale = tf.random_uniform([1], minval=cfg.minScale, maxval=cfg.maxScale, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(imgs)[0]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(imgs)[1]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
imgs = tf.image.resize_images(imgs, new_shape)
label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
depths =tf.image.resize_images(depths, new_shape)
intrinsics = intrinsics * scale
r3 = tf.constant([0., 0., 1.], shape=[1, 1, 3])
r3 = tf.tile(r3, [cfg.seq_num, 1, 1])
intrinsics = tf.concat([intrinsics[:, 0:2, :], r3], axis=1)
return imgs, label, depths, intrinsics
def image_mirroring(imgs, label, depths):
"""
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
mask: 3 layer mask to mirror.
boundary: boundary mask to mirror.
"""
distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
mirror = tf.boolean_mask([0, 1, 2], mirror)
imgs = tf.reverse(imgs, mirror)
label = tf.reverse(label, mirror)
depths = tf.reverse(depths, mirror)
return imgs, label, depths
def random_crop_and_pad_image_label_depth(images, label, depths, crop_h, crop_w, ignore_label=255): # img:[h, w, 3*n] label:[h,w,n]
"""
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
mask: 3 layer mask to crop/pad.
boundary: boundary mask to crop/pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
"""
label = tf.cast(label, dtype=tf.float32)
label = label - ignore_label # Needs to be subtracted and later added due to 0 padding.
depths = tf.cast(depths, dtype=tf.float32)
combined = tf.concat(axis=2, values=[images, label, depths])
image_shape = tf.shape(images)
combined_pad = tf.image.pad_to_bounding_box(combined, 0, 0, tf.maximum(crop_h, image_shape[0]), # top-left
tf.maximum(crop_w, image_shape[1]))
last_image_dim = images.get_shape()[-1]
combined_crop = tf.random_crop(combined_pad, [crop_h, crop_w, int(5 * cfg.seq_num)])
img_crop = combined_crop[:, :, :last_image_dim]
label_crop = combined_crop[:, :, last_image_dim:last_image_dim + cfg.seq_num]
label_crop = label_crop + ignore_label
label_crop = tf.cast(label_crop, dtype=tf.uint8)
depth_crop = combined_crop[:, :, (last_image_dim + cfg.seq_num):(last_image_dim + cfg.seq_num + cfg.seq_num)]
# Set static shape so that tensorflow knows shape at compile time.
img_crop.set_shape((crop_h, crop_w, last_image_dim))
label_crop.set_shape((crop_h, crop_w, cfg.seq_num))
depth_crop.set_shape((crop_h, crop_w, cfg.seq_num))
return img_crop, label_crop, depth_crop
def get_image_and_labels(images, label, depths, crop_h, crop_w):
# Set static shape so that tensorflow knows shape at compile time.
# # For other 512 x 512
# new_shape = tf.squeeze(tf.stack([crop_h, crop_w]))
# image = tf.image.resize_images(image, new_shape)
# label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
# label = tf.squeeze(label, squeeze_dims=[0])
images.set_shape((crop_h, crop_w, int(3*cfg.seq_num)))
label.set_shape((crop_h, crop_w, cfg.seq_num))
depths.set_shape((crop_h, crop_w, cfg.seq_num))
return images, label, depths
def random_brightness_contrast_hue_satu(img):
'''
Random birght and countrast
:param img:
:return:
'''
if np.random.uniform(0, 1) < 0.5:
distorted_image = tf.image.random_brightness(img, max_delta=32./255.)
distorted_image = tf.image.random_saturation(distorted_image, lower=0.5, upper=1.5)
distorted_image = tf.image.random_hue(distorted_image, max_delta=0.2)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.5, upper=1.5)
else:
distorted_image = tf.image.random_brightness(img, max_delta=32./255.)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.5, upper=1.5)
distorted_image = tf.image.random_saturation(distorted_image, lower=0.5, upper=1.5)
distorted_image = tf.image.random_hue(distorted_image, max_delta=0.2)
image = distorted_image
return image
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/label
/path/to/mask /path/to/boundary '.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
labels = []
depths = []
poses = []
intrinsicses = []
for line in f:
try:
image, label, depth, pose, intrinsics = line.strip("\n").split(' ')
except ValueError: # Adhoc for test.
image = label = depth = pose = intrinsics = line.strip("\n")
images.append(data_dir + image)
labels.append(data_dir + label)
depths.append(data_dir + depth)
poses.append(data_dir + pose)
intrinsicses.append(data_dir + intrinsics)
return images, labels, depths, poses, intrinsicses
def block_patch(img_stack, margin=50):
new_stack = []
k = int(cfg.seq_num / 2)
for i in range(cfg.seq_num):
img = img_stack[:, :, int(i * 3):int((i + 1) * 3)]
if i == k:
# if i != k:
# prob = np.random.uniform(0, 1)
# if i != k and prob < 0.5:
shape = img.get_shape().as_list()
# create patch in random size
pad_size = tf.random_uniform([2], minval=int(shape[1] / 5), maxval=int(shape[1] / 3), dtype=tf.int32)
patch = tf.zeros([pad_size[0], pad_size[1], shape[-1]], dtype=tf.float32)
h_ = tf.random_uniform([1], minval=margin, maxval=shape[0] - pad_size[0] - margin, dtype=tf.int32)[0]
w_ = tf.random_uniform([1], minval=margin, maxval=shape[1] - pad_size[1] - margin, dtype=tf.int32)[0]
padding = [[h_, shape[0] - h_ - pad_size[0]], [w_, shape[1] - w_ - pad_size[1]], [0, 0]]
padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)
img = tf.multiply(img, padded) # inpainted region is 0
new_stack.append(img)
return tf.concat(new_stack, axis=2)
def block_img_label(img_stack, label_stack, margin=10):
new_img_stack = []
new_label_stack = []
k = int(cfg.seq_num / 2)
for i in range(cfg.seq_num):
img = img_stack[:, :, int(i * 3):int((i + 1) * 3)]
label = tf.cast(label_stack[:, :, i:i + 1], tf.float32)
# if i == 10:
# if i != k:
prob = np.random.uniform(0, 1)
if prob < 1.0:
# if i != k and prob < 0.5:
shape = img.get_shape().as_list()
shape_label = label.get_shape().as_list()
# create patch in random size
pad_size = tf.random_uniform([2], minval=int(shape[1] / 5), maxval=int(shape[1] / 3), dtype=tf.int32) # [1/5, 1/3]
patch = tf.zeros([pad_size[0], pad_size[1], shape[-1]], dtype=tf.float32)
patch_label = tf.zeros([pad_size[0], pad_size[1], shape_label[-1]], dtype=tf.float32)
# Control the locations of block (height)
prob2 = np.random.uniform(0,1)
if prob2 < 0:
h_margin = np.random.randint(int(shape[0] / 2), int(shape[0] / 3 * 2 - margin))
# print(i, h_margin)
h_ = tf.random_uniform([1], minval=h_margin, maxval=shape[0] - pad_size[0], dtype=tf.int32)[0]
else:
h_ = tf.random_uniform([1], minval=margin, maxval=shape[0] - pad_size[0] - margin, dtype=tf.int32)[0]
w_ = tf.random_uniform([1], minval=margin, maxval=shape[1] - pad_size[1] - margin, dtype=tf.int32)[0]
padding = [[h_, shape[0] - h_ - pad_size[0]], [w_, shape[1] - w_ - pad_size[1]], [0, 0]]
padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)
padded_label = tf.pad(patch_label, padding, "CONSTANT", constant_values=1)
img = tf.multiply(img, padded) # inpainted region is 0
label = tf.multiply(label, padded_label) # inpainted region is 0
label.set_shape((shape_label[0], shape_label[1], 1))
new_img_stack.append(img)
new_label_stack.append(tf.cast(label, tf.uint8))
return tf.concat(new_img_stack, axis=2), tf.concat(new_label_stack, axis=2)
def block_img_label_mul(img_stack, label_stack, margin=10):
new_img_stack = []
new_label_stack = []
k = int(cfg.seq_num / 2)
for m in range(2):
for i in range(cfg.seq_num):
img = img_stack[:, :, int(i * 3):int((i + 1) * 3)]
label = tf.cast(label_stack[:, :, i:i + 1], tf.float32)
prob = np.random.uniform(0, 1)
if prob < 1.0:
shape = img.get_shape().as_list()
shape_label = label.get_shape().as_list()
# create patch in random size
# pad_size = [shape[1] // 5, shape[1] // 5]
pad_size = tf.random_uniform([2], minval=int(shape[1] / 5), maxval=int(shape[1] / 3),
dtype=tf.int32) # [1/5, 1/3]
patch = tf.zeros([pad_size[0], pad_size[1], shape[-1]], dtype=tf.float32)
patch_label = tf.zeros([pad_size[0], pad_size[1], shape_label[-1]], dtype=tf.float32)
h_ = tf.random_uniform([1], minval=margin, maxval=shape[0] - pad_size[0] - margin, dtype=tf.int32)[0]
w_ = tf.random_uniform([1], minval=margin, maxval=shape[1] - pad_size[1] - margin, dtype=tf.int32)[0]
padding = [[h_, shape[0] - h_ - pad_size[0]], [w_, shape[1] - w_ - pad_size[1]], [0, 0]]
padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)
padded_label = tf.pad(patch_label, padding, "CONSTANT", constant_values=1)
img = tf.multiply(img, padded) # inpainted region is 0
label = tf.multiply(label, padded_label) # inpainted region is 0
label.set_shape((shape_label[0], shape_label[1], 1))
new_img_stack.append(img)
new_label_stack.append(tf.cast(label, tf.uint8))
img_stack = tf.concat(new_img_stack, axis=2)
label_stack = tf.concat(new_label_stack, axis=2)
new_img_stack = []
new_label_stack = []
return img_stack, label_stack
def block_img_label_fix_size(img_stack, label_stack, margin=10):
new_img_stack = []
new_label_stack = []
new_occ_stack = []
k = int(cfg.seq_num / 2)
for i in range(cfg.seq_num):
img = img_stack[:, :, int(i * 3):int((i + 1) * 3)]
label = tf.cast(label_stack[:, :, i:i + 1], tf.float32)
# if i != k:
prob =
|
np.random.uniform(0, 1)
|
numpy.random.uniform
|
from ML_functions import max_f, logsense
from density_funcs import *
import numpy as np
#import matplotlib.pyplot as plt
def basic_constraints(n = 100):
M = np.logspace(-4, 7, num = 20)
fmd = np.zeros_like(M)
lfmd = np.zeros_like(M)
i = 0
for m in M:
lfmd[i] = max_f(m, n, 1.0, rho_NFW) #use the new function
fmd[i] = np.power(10, lfmd[i])
print(m, lfmd[i], fmd[i])
i = i+1
#plt.loglog(M,fmd/logsense(M,1e-2,0.5))
#plt.savefig('test.png')
np.savetxt('test.txt', fmd)
#return fmd
def baryon_errors(error):
M = np.logspace(-4, 7, num = 20)
m_array = np.array([])
n_20_bar_array =
|
np.array([])
|
numpy.array
|
#
# Copyright (C) 2018-2019 Pico Technology Ltd. See LICENSE file for terms.
#
# PS2000 Series (A API) STREAMING MODE EXAMPLE
# This example demonstrates how to call the ps2000a driver API functions in order to open a device,
# setup 2 channels and collects streamed data (1 buffer).
# This data is then plotted as mV against time in ns.
import ctypes
import numpy as np
from picosdk.ps2000a import ps2000a as ps
import matplotlib
import matplotlib.pyplot as plt
from picosdk.functions import adc2mV, assert_pico_ok
import time
matplotlib.style.use('ggplot')
# Create chandle and status ready for use
chandle = ctypes.c_int16()
status = {}
# Open PicoScope 2000 Series device
# Returns handle to chandle for use in future API functions
status["openunit"] = ps.ps2000aOpenUnit(ctypes.byref(chandle), None)
assert_pico_ok(status["openunit"])
channels = ('PS2000A_CHANNEL_A', 'PS2000A_CHANNEL_B')
enabled = 1
disabled = 0
analogue_offset = 0.0
# Set up channel A
# handle = chandle
# channel = PS2000A_CHANNEL_A = 0
# enabled = 1
# coupling type = PS2000A_DC = 1
# range = PS2000A_2V = 7
# analogue offset = 0 V
channel_range = ps.PS2000A_RANGE['PS2000A_10V']
status["setChA"] = ps.ps2000aSetChannel(chandle,
ps.PS2000A_CHANNEL['PS2000A_CHANNEL_A'],
enabled,
ps.PS2000A_COUPLING['PS2000A_DC'],
channel_range,
analogue_offset)
assert_pico_ok(status["setChA"])
# Set up channel B
# handle = chandle
# channel = PS2000A_CHANNEL_B = 1
# enabled = 1
# coupling type = PS2000A_DC = 1
# range = PS2000A_2V = 7
# analogue offset = 0 V
status["setChB"] = ps.ps2000aSetChannel(chandle,
ps.PS2000A_CHANNEL['PS2000A_CHANNEL_B'],
enabled,
ps.PS2000A_COUPLING['PS2000A_DC'],
channel_range,
analogue_offset)
assert_pico_ok(status["setChB"])
class PicoScope():
def __init__(self, picoscope, channels, channel_range, coupling):
self.picoscope = picoscope
self.enabled = 1
self.analogue_offset = 0.0
self.coupling = coupling
self.channel_range = channel_range
def init(self, chandle):
self.picoscope.ps2000aOpenUnit(ctypes.byref(chandle), None)
channel_range = self.picoscope.PS2000A_RANGE[self.channel_range]
for i, channel in enumerate(self.channels):
self.picoscope.ps2000aSetChannel(
chandle,
self.picoscope.PS2000A_CHANNEL[channel],
self.enabled,
self.picoscope.PS2000A_COUPLING[self.coupling],
self.channel_range,
self.analogue_offset
)
picoscope = PicoScope(ps, channels, 'PS2000A_10V', 'PS2000A_DC')
picoscope.init(chandle)
# Size of capture
sizeOfOneBuffer = 500
numBuffersToCapture = 10
totalSamples = sizeOfOneBuffer * numBuffersToCapture
# Create buffers ready for assigning pointers for data collection
bufferAMax = np.zeros(shape=sizeOfOneBuffer, dtype=np.int16)
bufferBMax = np.zeros(shape=sizeOfOneBuffer, dtype=np.int16)
memory_segment = 0
# Set data buffer location for data collection from channel A
# handle = chandle
# source = PS2000A_CHANNEL_A = 0
# pointer to buffer max = ctypes.byref(bufferAMax)
# pointer to buffer min = ctypes.byref(bufferAMin)
# buffer length = maxSamples
# segment index = 0
# ratio mode = PS2000A_RATIO_MODE_NONE = 0
status["setDataBuffersA"] = ps.ps2000aSetDataBuffers(chandle,
ps.PS2000A_CHANNEL['PS2000A_CHANNEL_A'],
bufferAMax.ctypes.data_as(ctypes.POINTER(ctypes.c_int16)),
None,
sizeOfOneBuffer,
memory_segment,
ps.PS2000A_RATIO_MODE['PS2000A_RATIO_MODE_NONE'])
assert_pico_ok(status["setDataBuffersA"])
# Set data buffer location for data collection from channel B
# handle = chandle
# source = PS2000A_CHANNEL_B = 1
# pointer to buffer max = ctypes.byref(bufferBMax)
# pointer to buffer min = ctypes.byref(bufferBMin)
# buffer length = maxSamples
# segment index = 0
# ratio mode = PS2000A_RATIO_MODE_NONE = 0
status["setDataBuffersB"] = ps.ps2000aSetDataBuffers(chandle,
ps.PS2000A_CHANNEL['PS2000A_CHANNEL_B'],
bufferBMax.ctypes.data_as(ctypes.POINTER(ctypes.c_int16)),
None,
sizeOfOneBuffer,
memory_segment,
ps.PS2000A_RATIO_MODE['PS2000A_RATIO_MODE_NONE'])
assert_pico_ok(status["setDataBuffersB"])
interval = 128
# Begin streaming mode:
sampleInterval = ctypes.c_int32(interval)
#sampleUnits = ps.PS2000A_TIME_UNITS['PS2000A_US']
sampleUnits = ps.PS2000A_TIME_UNITS['PS2000A_NS']
# We are not triggering:
maxPreTriggerSamples = 0
autoStopOn = 0
# No downsampling:
downsampleRatio = 1
status["runStreaming"] = ps.ps2000aRunStreaming(chandle,
ctypes.byref(sampleInterval),
sampleUnits,
maxPreTriggerSamples,
totalSamples,
autoStopOn,
downsampleRatio,
ps.PS2000A_RATIO_MODE['PS2000A_RATIO_MODE_NONE'],
sizeOfOneBuffer)
assert_pico_ok(status["runStreaming"])
actualSampleInterval = sampleInterval.value
actualSampleIntervalNs = actualSampleInterval * 1000
print("Capturing at sample interval %s ns" % actualSampleIntervalNs)
# We need a big buffer, not registered with the driver, to keep our complete capture in.
bufferCompleteA = np.zeros(shape=totalSamples, dtype=np.int16)
bufferCompleteB = np.zeros(shape=totalSamples, dtype=np.int16)
nextSample = 0
autoStopOuter = False
wasCalledBack = False
histogram_data = []
def plot_histogram(data, x = [], ax = None, c = 0, time_str = "", start_time_str = "", line = None, x_vec = None, y1_data = None, pause_time = 0.1, num_of_bins = 50):
fig = None
if ax is None:
plt.ion()
fig, ax = plt.subplots(2)
#fig.subplots_adjust(top = 0.85)
line, = ax[1].plot(x_vec, y1_data, '-o', alpha = 0.8)
ax[1].set_xlabel('Running time')
ax[1].set_ylabel('Time interval (ns)')
plt.show()
else:
plt.pause(pause_time)
ax[0].cla()
x.append(data)
n, bins, patches = ax[0].hist(x, num_of_bins, density = True)
ax[0].set_xlabel('Time interval (ns)')
ax[0].set_ylabel('Counts (' + str(c) + ")")
ax[0].set_title("Consequencing gammas in detector A and B\nExperiment " + start_time_str + " took " + time_str)
if fig is None:
# after the figure, axis, and line are created, we only need to update the y-data
line.set_ydata(y1_data)
# adjust limits if new data goes beyond bounds
if np.min(y1_data) <= line.axes.get_ylim()[0] or np.max(y1_data) >= line.axes.get_ylim()[1]:
plt.ylim([np.min(y1_data) - np.std(y1_data), np.max(y1_data) + np.std(y1_data)])
return x, ax, line
class Buffer():
def __init__(self, picoscope, buffer_size, buffer_channels, total_samples, memory_segment):
self.picoscope = picoscope
self.buffer_size = buffer_size
self.total_samples = total_samples
self.memory_segment = memory_segment
self.buffer_channels = buffer_channels
self.buffer_completes = []
self.buffer_maxes = []
def init_buffers(self, chandle):
self.buffer_completes = [np.zeros(shape = self.total_samples, dtype = np.int16)] * len(self.buffer_channels)
self.buffer_maxes = [np.zeros(shape = self.buffer_size, dtype = np.int16)] * len(self.buffer_channels)
for i, channel in enumerate(self.buffer_channels):
self.picoscope.ps2000aSetDataBuffers(
chandle,
self.picoscope.PS2000A_CHANNEL[channel],
self.buffer_maxes[i].ctypes.data_as(ctypes.POINTER(ctypes.c_int16)),
None,
self.buffer_size,
self.memory_segment,
self.picoscope.PS2000A_RATIO_MODE['PS2000A_RATIO_MODE_NONE'])
def complete(self, next_sample, end_destination, start_index, end_source):
for i, channel in enumerate(self.buffer_channels):
self.buffer_completes[i][next_sample:end_destination] = self.buffer_maxes[i][start_index:end_source]
buffer = Buffer(
ps,
sizeOfOneBuffer,
('PS2000A_CHANNEL_A', 'PS2000A_CHANNEL_B'),
totalSamples,
memory_segment
)
buffer.init_buffers(chandle)
class Streaming():
def __init__(self, picoscope, buffer):
self.picoscope = picoscope
self.buffer = buffer
self.auto_stop = False
self.was_called_back = False
self.next_sample = 0
self.callback = self.picoscope.StreamingReadyType(self.streaming_callback)
def streaming_callback(handle, noOfSamples, startIndex, overflow, triggerAt, triggered, autoStop, param):
self.was_called_back = True
self.buffer.complete(self.next_sample, self.next_sample + noOfSamples, startIndex, startIndex + noOfSamples)
self.next_sample += noOfSamples
if autoStop:
self.auto_stop = True
def acquire(self, chandle):
while self.next_sample < self.buffer.total_samples and not self.auto_stop:
self.was_called_back = False
self.picoscope.ps2000aGetStreamingLatestValues(chandle, self.callback, None)
if not self.wasCalledBack:
# If we weren't called back by the driver, this means no data is ready.
# Sleep for a short while before trying again.
time.sleep(0.01)
stream = Streaming(ps, buffer)
def streaming_callback(handle, noOfSamples, startIndex, overflow, triggerAt, triggered, autoStop, param):
global nextSample, autoStopOuter, wasCalledBack
wasCalledBack = True
destEnd = nextSample + noOfSamples
sourceEnd = startIndex + noOfSamples
bufferCompleteA[nextSample:destEnd] = bufferAMax[startIndex:sourceEnd]
bufferCompleteB[nextSample:destEnd] = bufferBMax[startIndex:sourceEnd]
nextSample += noOfSamples
if autoStop:
autoStopOuter = True
# Convert the python function into a C function pointer.
cFuncPtr = ps.StreamingReadyType(streaming_callback)
i = 1000
bins = []
ax = None
start_time = time.time()
start_time_str = time.strftime("%Y-%m-%d %H:%M:%S")
x_vec =
|
np.linspace(0, 1, 100 + 1)
|
numpy.linspace
|
"""
This module contains functions to create nice plots.
"""
__author__ = 'johannes'
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
class PSTH:
"""
Peri-Stimulus Time Histogram
.. image:: _images/psth_plot.png
:alt: PSTH Plot
:width: 400px
**Example**
The image above was created by following code:
::
spikes = np.random.randint(0, 2, size=(2, 200))
a = PSTH(spikes)
a.show_plot()
a.save_plot()
plt.show()
"""
def __init__(self, spiketrain, binsize=20):
"""
Initialize the PSTH plot with a spiketrain
:param spiketrain: The spiketrain to be plotted
:type spiketrain: Numpy array
:param binsize: The size of the bins
:return: None
"""
self.spiketrain = spiketrain
self.binsize = binsize
self.fig = None
def show_plot(self, neuron_indices=None):
"""
Shows the PSTH plot.
:param neuron_indices: The indices of the neurons to be plotted
:type neuron_indices: List or Tuple
"""
neurons, timesteps = self.spiketrain.shape
bins = range(0, timesteps + self.binsize, self.binsize)
if not neuron_indices:
print("Plotting all neurons!!")
neuron_indices = range(neurons)
n_plots = len(neuron_indices) # Number of subplots
fig, axes = plt.subplots(nrows=n_plots)
plt.title("Peri Stimulus Time Histogram")
if not isinstance(axes, np.ndarray):
axes = [axes]
for i, axis in enumerate(axes):
neuron_index = neuron_indices[i]
# Histogramm
times = np.where(self.spiketrain[neuron_index])[0]
if len(times) >= 1:
axis.hist(times, bins, histtype='bar', stacked=True, fill=True, facecolor='green', alpha=0.5, zorder=0)
# Scatter (Spikes)
y = np.ones(times.shape)
axis.scatter(times, y, c='r', s=20, marker="x", zorder=1)
axis.set_title('Neuron %d' % neuron_index)
axis.set_ylim(bottom=0)
axis.set_xlim(0, timesteps)
plt.tight_layout()
plt.show(block=False)
self.fig = fig
def save_plot(self, filename=None):
"""
Saves the plot.
:param filename: Name of the file. Default: 'plot.png'
:type filename: String
"""
if not self.fig:
self.show_plot()
if not filename:
filename = 'plot.png'
plt.figure(self.fig.number)
plt.savefig(filename)
print('saved psth plot as %s' % filename)
class SpikePlot:
"""
Most Simple SpikePlot
.. image:: _images/spike_plot.png
:alt: SpikePlot
:width: 400px
**Example**
The image above was created by following code:
::
spikes = np.random.randint(0, 2, size=(10, 200))
a = SpikePlot(spikes)
a.show_plot(neuron_indices=[1,2,6])
a.save_plot()
plt.show()
"""
def __init__(self, spiketrain):
"""
Initialize the Spike plot with a spiketrain
:param spiketrain: The spiketrain to be plotted
:type spiketrain: Numpy array
:return: None
"""
self.spiketrain = spiketrain
self.fig = None
def show_plot(self, neuron_indices=None):
"""
Shows the Spike plot.
:param neuron_indices: The indices of the neurons to be plotted
:type neuron_indices: List or Tuple
"""
neurons, timesteps = self.spiketrain.shape
if not neuron_indices:
print("Plotting all neurons!!")
neuron_indices = range(neurons)
n_plots = len(neuron_indices) # Number of subplots
fig = plt.figure()
plt.title("SpikePlot")
for i in range(len(neuron_indices)):
print(i)
neuron_index = neuron_indices[i]
times = np.where(self.spiketrain[neuron_index])[0]
# Scatter (Spikes)
y = i * np.ones(times.shape)
plt.scatter(times, y, c='r', s=40, marker="|", zorder=1)
plt.xlim(0, timesteps)
plt.ylabel('Neuron')
plt.yticks(range(len(neuron_indices)), neuron_indices)
plt.tight_layout()
plt.show(block=False)
self.fig = fig
def save_plot(self, filename=None):
"""
Saves the plot.
:param filename: Name of the file. Default: 'plot.png'
:type filename: String
"""
if not self.fig:
self.show_plot()
if not filename:
filename = 'plot.png'
plt.figure(self.fig.number)
plt.savefig(filename)
print('saved psth plot as %s' % filename)
class HeatmapAnimation:
def __init__(self, fps=30):
self.values = []
self.ani = None
self.fps = fps
self.vmin = -1
self.vmax = 1
def show_animation(self):
"""
Shows the Animation
:return: pyplot animation object
"""
fig = plt.figure()
ims = []
for value in self.values:
im = plt.imshow(value, vmin=self.vmin, vmax=self.vmax, interpolation='none', cmap=cm.coolwarm)
ims.append([im])
self.ani = animation.ArtistAnimation(fig, ims, interval=1000 / self.fps, repeat_delay=3000)
plt.show(block=False)
return self.ani
def save_animation(self, filename=None):
"""
Saves the animation
:param filename: Name of the file. Default: 'heatmap_plot.mp4'
:type filename: String
:return:
"""
if not filename:
filename = 'heatmap_plot.mp4'
if not self.ani:
self.show_animation()
self.ani.save(filename, writer='mencoder', fps=30)
print("Saved heatmap animation as %s " % filename)
class HintonPlot(object):
"""
Hinton diagrams are useful for visualizing the values of a 2D array (e.g.
a weight matrix): Positive and negative values are represented by white and
black squares, respectively, and the size of each square represents the
magnitude of each value.
Initial idea from <NAME> on the SciPy Cookbook
"""
def __init__(self, matrix, max_weight=None, ax=None):
"""
Draw Hinton diagram for visualizing a weight matrix.
http://matplotlib.org/examples/specialty_plots/hinton_demo.html
"""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x,y),w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w))
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
plt.show(block=False)
class Histogram3DPlot(object):
"""
.. image:: _images/threed_histogram_plot.png
:alt: Histogram 3D Plot
:width: 400px
**Example**
The image above was created by following code:
::
hist = Histogram3DPlot(np.random.random((5, 5)))
plt.show()
"""
def __init__(self, matrix, xlimits=None, ylimits=None, width_factor=0.9, alpha=1.0, color='#00ceaa', ax=None):
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
xlimits = xlimits if xlimits is not None else (0, 1)
ylimits = ylimits if ylimits is not None else (0, 1)
xsize, ysize = matrix.shape
xpos, ypos = np.meshgrid(np.linspace(xlimits[0], xlimits[1], xsize), np.linspace(ylimits[0], ylimits[1], ysize))
xpos = xpos.flatten()
ypos = ypos.flatten()
zpos =
|
np.zeros(xsize * ysize)
|
numpy.zeros
|
from scipy import signal
import numpy as np
from .base import VHRMethod
class CHROM(VHRMethod):
""" This method is described in the following paper:
"Remote heart rate variability for emotional state monitoring"
by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
methodName = 'CHROM'
def __init__(self, **kwargs):
super(CHROM, self).__init__(**kwargs)
def apply(self, X):
#self.RGB = self.getMeanRGB()
#X = signal.detrend(self.RGB.T)
# calculation of new X and Y
Xcomp = 3*X[0] - 2*X[1]
Ycomp = (1.5*X[0])+X[1]-(1.5*X[2])
# standard deviations
sX = np.std(Xcomp)
sY =
|
np.std(Ycomp)
|
numpy.std
|
import time
from itertools import chain
import numpy as np
import theano
from theano import tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from tqdm import trange
from encoders.token.tokendata import TokenAutoencoderDatasetExtractor
theano.config.floatX = "float32"
from encoders.baseencoder import AbstractEncoder
from data.dataimport import import_data
from data.featuredict import get_empirical_distribution
from deeplearning.layers import GRU, AveragingGRU
from deeplearning.optimization import nesterov_rmsprop_multiple, log_softmax, dropout
from deeplearning.utils import Bunch
class SequenceGruSupervisedEncoderModel:
"""
A sequence GRU supervised encoder
"""
def __init__(self, embedding_size: int, vocabulary_size: int, empirical_distribution, representation_size: int,
hyperparameters: dict, encoder_type: str, name: str = "GRUSequenceSupervisedEncoder",
use_centroid=False):
self.__hyperparameters = hyperparameters
self.__name = name
log_init_noise = self.__hyperparameters["log_init_noise"]
self.__memory_size = representation_size
self.__embedding_size = embedding_size
embeddings = np.random.randn(vocabulary_size, embedding_size) * 10 ** log_init_noise
self.__embeddings = theano.shared(embeddings.astype(theano.config.floatX), name=name + ":embeddings")
self.__name_bias = theano.shared(np.log(empirical_distribution).astype(theano.config.floatX),
name=name + ":name_bias")
encoder_init_state = np.random.randn(representation_size) * 10 ** log_init_noise
self.__encoder_init_state = theano.shared(encoder_init_state.astype(theano.config.floatX),
name=name + ":encoder_init_state")
self.__rng = RandomStreams()
self.__input_sequence = T.ivector(name + ":input_sequence")
self.__output_sequence = T.ivector(name + ":output_sequence")
self.__inverted_output_sequence = self.__output_sequence[::-1]
if encoder_type == 'gru':
self.__encoder = GRU(self.__embeddings, representation_size, embedding_size,
self.__hyperparameters, self.__rng, name=name + ":GRUSequenceEncoder",
use_centroid=use_centroid)
elif encoder_type == 'averaging_gru':
self.__encoder = AveragingGRU(self.__embeddings, representation_size, embedding_size,
self.__hyperparameters, self.__rng,
name=name + ":AveragingGRUSequenceEncoder", use_centroid=use_centroid)
else:
raise Exception("Unrecognized encoder type `%s`, possible options `gru` and `averaging_gru`")
self.__params = {"embeddings": self.__embeddings,
"encoder_init_state": self.__encoder_init_state}
self.__params.update(self.__encoder.get_params())
self.__standalone_representation = T.dvector(self.__name + ":representation_input")
@property
def rng(self):
return self.__rng
@property
def parameters(self):
return self.__params
@property
def input_sequence_variable(self):
return self.__input_sequence
@property
def output_sequence_variable(self):
return self.__output_sequence
@property
def representation_variable(self):
return self.__standalone_representation
def get_encoding(self):
"""
Return the encoding of the sequence.
"""
encoded_rep = self.__encoder.get_encoding(self.__input_sequence, self.__encoder_init_state)
return encoded_rep
class SequenceGruSupervisedEncoder(AbstractEncoder):
"""
Train an encoder
"""
def __init__(self, training_file, hyperparameters, encoder_type='gru', use_centroid=False):
"""
:param training_file:
:type hyperparameters: dict
:return:
"""
self.__hyperparameters = hyperparameters
self.dataset_extractor = TokenAutoencoderDatasetExtractor(training_file)
empirical_distribution = get_empirical_distribution(self.dataset_extractor.feature_map,
chain(*self.dataset_extractor.get_nonnoisy_samples(
import_data(training_file))))
self.__encoder = SequenceGruSupervisedEncoderModel(self.__hyperparameters["embedding_size"],
len(self.dataset_extractor.feature_map),
empirical_distribution,
self.__hyperparameters["representation_size"],
self.__hyperparameters, encoder_type=encoder_type,
use_centroid=use_centroid)
target_embeddings = np.random.randn(self.__hyperparameters["representation_size"],
self.dataset_extractor.num_equivalence_classes) * 10 ** \
self.__hyperparameters[
"log_init_noise"]
self.__target_embeddings = theano.shared(target_embeddings.astype(theano.config.floatX),
name="target_embeddings")
self.__target_embeddings_dropout = dropout(self.__hyperparameters['dropout_rate'], self.__encoder.rng,
self.__target_embeddings, True)
self.__trained_parameters = None
self.__compiled_methods = None
REQUIRED_HYPERPARAMETERS = {'log_learning_rate', 'rmsprop_rho', 'momentum', 'grad_clip', 'minibatch_size',
'embedding_size', 'representation_size', 'log_init_noise', 'dropout_rate'}
def __get_loss(self, target_class, use_dropout):
encoding = self.__encoder.get_encoding()
target_embeddings = self.__target_embeddings_dropout if use_dropout else self.__target_embeddings
logprobs = log_softmax(T.dot(encoding / encoding.norm(2), target_embeddings).dimshuffle('x', 0))[0]
return logprobs, logprobs[target_class]
def __compile_train_functions(self):
target_class = T.iscalar(name="target_class")
_, ll = self.__get_loss(target_class, True)
wrt_vars = list(self.__encoder.parameters.values()) + [self.__target_embeddings]
grad = T.grad(ll, wrt_vars)
grad_acc = [theano.shared(np.zeros(param.get_value().shape).astype(theano.config.floatX)) for param in wrt_vars] \
+ [theano.shared(0, name="sample_count")]
self.__compiled_methods.grad_accumulate = theano.function(
inputs=[self.__encoder.input_sequence_variable, target_class],
updates=[(v, v + g) for v, g in zip(grad_acc, grad)] + [
(grad_acc[-1], grad_acc[-1] + 1)],
outputs=ll)
normalized_grads = [T.switch(grad_acc[-1] > 0, g / grad_acc[-1].astype(theano.config.floatX), g) for g in
grad_acc[:-1]]
step_updates, ratios = nesterov_rmsprop_multiple(wrt_vars, normalized_grads,
learning_rate=10 ** self.__hyperparameters[
"log_learning_rate"],
rho=self.__hyperparameters["rmsprop_rho"],
momentum=self.__hyperparameters["momentum"],
grad_clip=self.__hyperparameters["grad_clip"],
output_ratios=True)
step_updates.extend([(v, T.zeros(v.shape)) for v in grad_acc[:-1]]) # Set accumulators to 0
step_updates.append((grad_acc[-1], 0))
self.__compiled_methods.grad_step = theano.function(inputs=[], updates=step_updates, outputs=ratios)
def __compile_test_functions(self):
target_class = T.iscalar(name="target_class")
logprobs, ll = self.__get_loss(target_class, False)
self.__compiled_methods.ll_and_logprobs = theano.function(
inputs=[self.__encoder.input_sequence_variable, target_class],
outputs=[ll, logprobs])
self.__compiled_methods.encode = theano.function(inputs=[self.__encoder.input_sequence_variable],
outputs=self.__encoder.get_encoding())
def __compile_if_needed(self):
if self.__compiled_methods is None:
print("Compiling Methods...")
self.__compiled_methods = Bunch()
self.__compile_train_functions()
self.__compile_test_functions()
print("Compilation Finished...")
def train(self, training_file: str, validation_file: str, max_iter: int = 1000, patience: int = 25,
validation_check_limit: int = 1, semantically_equivalent_noise: bool = False,
additional_code_to_run=None) -> tuple:
self.__compile_if_needed()
minibatch_size = self.__hyperparameters["minibatch_size"]
training_data = import_data(training_file)
training_set = list(self.dataset_extractor.get_dataset_for_encoder(training_data, return_num_tokens=True))
validation_set = list(
self.dataset_extractor.get_dataset_for_encoder(import_data(validation_file), return_num_tokens=True))
best_score = float('-inf')
train_x_ent = 0
epochs_not_improved = 0
historic_values = []
trainable_parameters = list(self.__encoder.parameters.values()) + [self.__target_embeddings]
print("Num classes: %s" % self.dataset_extractor.num_equivalence_classes)
def compute_validation_score() -> float:
return compute_score(validation_set)
def compute_score(dataset) -> float:
# Get all encodings
sum_ll = 0.
correct = 0
for data in dataset:
ll, logprobs = self.__compiled_methods.ll_and_logprobs(data[0], data[2])
sum_ll += ll
if
|
np.argmax(logprobs)
|
numpy.argmax
|
#!/usr/bin/python3
# coding: UTF-8
"""
ライントレーサー物理モデル
説明
物理モデルの変更については以下のパラメータおよび mtrs2twist() メソッドを編集してください。
- LF_MOUNT_POS_PRF # フォトリフレクタの配置
- LF_WEIGHT # 車体の重さ g
- SHAFT_LENGTH # シャフト長 mm (1本)
- TIRE_DIAMETER # タイヤ直径 mm
- COEF_K_P # 比例制御係数
- PARAMS_MU_CLIN # 直線運動の粘性摩擦係数
- PARAMS_NU_CROT # 旋廻運動の粘性摩擦係数
参考資料
- 三平 満司:「非ホロノミック系のフィードバック制御」計測と制御
36 巻 6 号 p. 396-403, 1997 年
- <NAME>, <NAME>, <NAME> and <NAME>: "Wheeled Mobile Robotics,"
Elsevier, 2017
「電子情報通信設計製図」新潟大学工学部工学科電子情報通信プログラム
All rights revserved 2019-2020 (c) Sh<NAME>AMATSU
"""
from mils_line_follower_ctrl import LFController
from mils_line_follower_phrf import LFPhotoReflector
from scipy.integrate import odeint
import numpy as np
import pygame
# 車体のパラメータ
#
# シャフト中心(+)からのフォトリフレクタ(*)の
# 相対座標[mm]
#
# --|-- * pr1 (dx1,dy1)
# | * pr2 (dx2,dy2)
# (0,0) + ------------- → x
# ↓ | * pr3 (dx3,dy3)
# y --|-- * pr4 (dx4,dy4)
#
# ((dx1,dy1), (dx2,dy2), (dx3,dy3), (dx4,dy4))
#
LF_MOUNT_POS_PRF = ((120,-60), (100,-20), (100,20), (120,60)) # mm
LF_WEIGHT = 360 # 車体の重さ g(グラム)
SHAFT_LENGTH = 50 # シャフト長 mm (1本)
TIRE_DIAMETER = 58 # タイヤ直径 mm
# 制御パラメータ(要調整)
COEF_K_P = 3.0 # 比例制御係数
## 物理モデルパラメータ(要調整)
PARAMS_MU_CLIN = 1e-3 # kg/s 車体の直線運動の粘性摩擦係数(便宜上)
PARAMS_NU_CROT = 1e-3 # kg·m^2/s 車体の旋廻運動の粘性摩擦係数(便宜上)
# フォトリフレクタ数
NUM_PHOTOREFS = 4
# 色の定義
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = ( 255, 128, 0)
def rotate_pos(pos,center,angle):
""" 座標位置の回転 """
rotmtx = np.asarray([
[ np.cos(angle), -np.sin(angle) ],
[ np.sin(angle), np.cos(angle) ]
])
return rotmtx.dot(pos-center) + center
class LFPhysicalModel:
""" ライントレーサ物理モデルクラス
ライントレーサの物理モデルを実装しています。
モータ制御信号が力に比例するという非常に単純なモデルです。
左右の和を前後運動、左右の差を回転運動に換算しています。
入力 モータ制御信号 [-1,1]x2
出力 フォトリフレクタの値 [0,1]x4
"""
def __init__(self, course, \
weight = LF_WEIGHT, \
mntposprs = LF_MOUNT_POS_PRF):
# プロパティの設定
self._course = course
self._weight = weight # g
self._mntposprs = mntposprs
self._x_mm = SHAFT_LENGTH + 10 # mm
self._y_mm = SHAFT_LENGTH + 10 # mm
self._angle_rad = 0.0 # rad
# 初期化
self.reset()
# 制御機とフォトリフレクタ設定
self._controller = LFController()
self._prs = [ LFPhotoReflector(self._course) \
for idx in range(NUM_PHOTOREFS)]
for idx in range(NUM_PHOTOREFS):
self._prs[idx].value = 0.0
self._controller.photorefs = self._prs
def mtrs2twist(self,mtrs,v0,w0,fps):
""" モータ制御信号→速度変換
h = 1/fps 間隔で制御信号をゼロ次ホールドすると仮定
"""
# 車体重量の換算
mc_kg = 1e-3*self._weight # g -> kg
# モーター電圧から速度・角速度の計算
u_r = mtrs[1]
u_l = mtrs[0]
ulin = COEF_K_P*(u_r + u_l)/2.0 # 直線運動
urot = COEF_K_P*(u_r - u_l)/2.0 # 回転運動
# サンプリング間隔
h = 1/fps
# 直線速度の計算
mu_clin = PARAMS_MU_CLIN # 直線運動の粘性摩擦係数
Tlin = (mc_kg+0.5)/(mu_clin+20) # 時定数
clin = 1/(0.4*mu_clin+8)
#v1 = v0*np.exp(-h/Tlin) + clin*(1.0-np.exp(-h/Tlin))*ulin
v1 = np.exp(-h/Tlin)*( v0 - clin*ulin ) + clin*ulin
# 回転速度の計算
nu_crot = PARAMS_NU_CROT # 回転運動の粘性摩擦係数
Trot = (mc_kg+0.5)/(40*nu_crot+20) # 時定数
crot = 1/(8*nu_crot+0.4)
#w1 = w0*np.exp(-h/Trot) + crot*(1.0-np.exp(-h/Trot))*urot
w1 = np.exp(-h/Trot)*( w0 - crot*urot ) + crot*urot
# 出力
twist = { "linear":{"x":v1, "y":0., "z":0.}, "angular":{"x":0., "y":0., "z":w1} }
return twist
def drive(self,fps):
""" 車体駆動メソッド"""
# センサ値更新
self._sense()
# モーター制御信号取得
mtrs = np.asarray(self._controller.prs2mtrs())
# 車体状態更新
self.updatestate(mtrs,fps)
def updatestate(self,mtrs,fps):
""" 車体駆動メソッド (2020)"""
# モータ―制御信号→Twist型
v0_m_s = 1e-3*self._v_mm_s # 前時刻直線速度 m/s
w0_rad_s = self._w_rad_s # 前時刻角速度 rad/s
twist = self.mtrs2twist(mtrs,v0_m_s,w0_rad_s,fps)
v1_m_s = twist["linear"]["x"] # 現時刻直線速度 m/s
w1_rad_s = twist["angular"]["z"] # 現時刻角速度 rad/s
# 位置・角度情報更新
t = np.linspace(0,1/fps,2)
pos = [ 0.0 for idx in range(3) ]
pos[0] = 1e-3*self._x_mm # m
pos[1] = 1e-3*self._y_mm # m
pos[2] = self._angle_rad # rad
p = odeint(self._odefun,pos,t,args=(v1_m_s,w1_rad_s))
pos = p[-1]
# 状態更新
self._v_mm_s = 1e3*v1_m_s # m/s -> mm/s
self._rad_s = w1_rad_s
self._x_mm = 1e3*pos[0] # m -> mm
self._y_mm = 1e3*pos[1] # m -> mm
self._angle_rad = pos[2]
def _odefun(self,pos,t,v,w):
""" 状態方程式 """
# d_ ( x ) = ( cosθ )v + ( 0 )ω
# dt ( y ) ( sinθ ) ( 0 )
# ( θ ) ( 0 ) ( 1 )
phi = pos[2]
return [ np.cos(phi)*v, np.sin(phi)*v, w ]
@property
def course(self):
return self._course
@property
def angle(self):
return self._angle_rad
def reset(self):
self._v_mm_s = 0.0 # mm/s
self._w_rad_s = 0.0 # rad/s
def set_position_mm(self,x,y):
self._x_mm = x # mm
self._y_mm = y # mm
def move_px(self,dx_px,dy_px):
res = self._course.resolution # mm/pixel
self._x_mm = self._x_mm + dx_px*res # mm
self._y_mm = self._y_mm + dy_px*res # mm
def rotate(self,angle):
self._angle_rad = angle # rad
def set_interval(self,interval):
self._interval = interval
def draw_body(self,screen):
rect = np.asarray(self.get_rect_px())
center = np.asarray(self.get_center_px())
# 車体の描画
apos00 = np.dot([[1,0,0,0],[0,1,0,0]],rect)
apos10 = np.dot([[1,0,0,0],[0,1,0,1]],rect)
apos01 = np.dot([[1,0,1,0],[0,1,0,0]],rect)
apos11 = np.dot([[1,0,1,0],[0,1,0,1]],rect)
#
angle = self._angle_rad
apos00 = rotate_pos(apos00,center,angle)
apos10 = rotate_pos(apos10,center,angle)
apos01 = rotate_pos(apos01,center,angle)
apos11 = rotate_pos(apos11,center,angle)
#
pos00 = apos00.tolist()
pos10 = apos10.tolist()
pos01 = apos01.tolist()
pos11 = apos11.tolist()
#
pygame.draw.polygon(screen, YELLOW, [pos00,pos01,pos11,pos10],0)
# 解像度の読み込み
res = self._course.resolution # mm/pixel
# 左タイヤの描画
pos_ltf = center + np.asarray([TIRE_DIAMETER/2,-SHAFT_LENGTH])/res
pos_ltr = center +
|
np.asarray([-TIRE_DIAMETER/2,-SHAFT_LENGTH])
|
numpy.asarray
|
# -*- coding: utf-8 -*-
"""
Serial python calculation if there's no GPU
v1.0 taken out of Howie-Whelan_v1 Aug 2020
v1.1 Modified along with Howie-Whelan_v2.3 Dec 2020
@author: <NAME>, <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
eps = 0.000000000001
def howieWhelan(F_in,Xg,X0i,s,alpha,t):
#for integration over n slices
# All dimensions in nm
Xgr = Xg.real
Xgi = Xg.imag
s = s + eps
gamma = np.array([(s-(s**2+(1/Xgr)**2)**0.5)/2, (s+(s**2+(1/Xgr)**2)**0.5)/2])
q = np.array([(0.5/X0i)-0.5/(Xgi*((1+(s*Xgr)**2)**0.5)), (0.5/X0i)+0.5/(Xgi*((1+(s*Xgr)**2)**0.5))])
beta = np.arccos((s*Xgr)/((1+s**2*Xgr**2)**0.5))
#scattering matrix
C=np.array([[np.cos(beta/2), np.sin(beta/2)],
[-np.sin(beta/2)*np.exp(complex(0,alpha)),
np.cos(beta/2)*np.exp(complex(0,alpha))]])
#inverse of C is just its transpose
Ci=np.transpose(C)
G=np.array([[np.exp(2*np.pi*1j*(gamma[0]+1j*q[0])*t), 0],
[0, np.exp(2*np.pi*1j*(gamma[1]+1j*q[1])*t)]])
F_out = C @ G @ Ci @ F_in
return F_out
def gdotR(rD, bscrew, bedge, beUnit, bxu, d2c, nu, gD):
# returns displacement vector R at coordinate xyz
r2 = np.dot(rD,rD)
rmag = r2**0.5
#cos(theta) & sin(theta) relative to Burgers vector
ct = np.dot(rD,beUnit)/rmag
sbt = np.cross(beUnit,rD)/rmag
st = sbt[2]
# From Head et al. Eq. 2.31, p31
# infinite screw dislocation displacement is b.phi/(2pi):
# using x=r.sin(phi) & y=r.cos(phi)
# have to take away pi to avoid double-valued tan
Rscrew = np.array((0,0,bscrew*(np.arctan(rD[1]/rD[0])-np.pi*(rD[0]<0))/(2*np.pi)))
# infinite edge dislocation displacement field part 1: b.sin2theta/4pi(1-nu)
# using sin(2theta)=2sin(theta)cos(theta)
Redge0 = bedge*ct*st/(2*np.pi*(1-nu))
# part 2: (b x u)*( (1-2v)ln(r)/2(1-v) + cos(2theta)/4(1-v) )/2pi
# using cos(2theta)= cos^2(theta) - sin^2(theta)
Redge1 = bxu*( ( (2-4*nu)*np.log(rmag)+(ct**2-st**2) )/(8*np.pi*(1-nu)))
# total displacement
R = (Rscrew + Redge0 + Redge1)
# dot product with g-vector
gR = np.dot(gD,R)
# gR = np.dot(gD,Redge1)
return gR
def calculate_deviations(xsiz, zsiz, pix2nm, t, dt, u, g, b, c2d, nu, phi, psi, theta):
# calculates the local change in deviation parameter s as the z-gradient of the displacement field
#dislocation components & g: in the dislocation reference frame
bscrew =
|
np.dot(b,u)
|
numpy.dot
|
import os
import numpy as np
from bmtk.builder import NetworkBuilder
from bmtk.builder.auxi.node_params import positions_columinar, xiter_random
# List of non-virtual cell models
bio_models = [
# {
# 'pop_name': 'Scnn1a', 'ei': 'e',
# 'morphology': 'Scnn1a_473845048_m.swc',
# 'model_template': 'ctdb:Biophys1.hoc',
# 'dynamics_params': '472363762_fit.json'
# },
{
'pop_name': 'Rorb', 'ei': 'e',
'morphology': 'Rorb_325404214_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '473863510_fit.json'
},
{
'pop_name': 'Nr5a1', 'ei': 'e',
'morphology': 'Nr5a1_471087815_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '473863035_fit.json'
},
{
'pop_name': 'PV', 'ei': 'i',
'morphology': 'Pvalb_469628681_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '473862421_fit.json'
}
]
# Build a network of 300 biophysical cells to simulate
print('Build internal "V1" network')
v1 = NetworkBuilder("V1")
# for i, model_props in enumerate(bio_models):
# n_cells = 80 if model_props['ei'] == 'e' else 60 # 80% excitatory, 20% inhib
#
# # Randomly get positions uniformly distributed in a column
# positions = positions_columinar(N=n_cells, center=[0, 10.0, 0], max_radius=50.0, height=200.0)
#
# v1.add_nodes(N=n_cells,
# x=positions[:, 0], y=positions[:, 1], z=positions[:, 2],
# rotation_angle_yaxis=xiter_random(N=n_cells, min_x=0.0, max_x=2 * np.pi), # randomly rotate y axis
# rotation_angle_zaxis=xiter_random(N=n_cells, min_x=0.0, max_x=2 * np.pi), #
# model_type='biophysical',
# model_processing='aibs_perisomatic',
# **model_props)
v1.add_nodes(
N=80,
# Reserved SONATA keywords used during simulation
model_type='biophysical',
model_template='ctdb:Biophys1.hoc',
dynamics_params='472363762_fit.json',
morphology='Scnn1a_473845048_m.swc',
model_processing='aibs_perisomatic',
# The x, y, z locations of each cell in a column
x=np.random.normal(0.0, 20.0, size=80),
y=np.random.uniform(400.0, 500.0, size=80),
z=np.random.normal(0.0, 20.0, size=80),
# Euler rotations of the cells
rotation_angle_xaxis=np.random.uniform(0.0, 2 * np.pi, size=80),
rotation_angle_yaxis=np.random.uniform(0.0, 2 * np.pi, size=80),
rotation_angle_zaxis=-3.646878266,
# Optional parameters
tuning_angle=np.linspace(start=0.0, stop=360.0, num=80, endpoint=False),
pop_name='Scnn1a',
location='L4',
ei='e',
)
v1.add_nodes(
# Rorb excitatory cells
N=80, pop_name='Rorb', location='L4', ei='e',
model_type='biophysical',
model_template='ctdb:Biophys1.hoc',
dynamics_params='473863510_fit.json',
morphology='Rorb_325404214_m.swc',
model_processing='aibs_perisomatic',
x=np.random.normal(0.0, 20.0, size=80),
y=
|
np.random.uniform(400.0, 500.0, size=80)
|
numpy.random.uniform
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 13:30:37 2017
@author: <NAME> (<EMAIL>)
Description: Implements Bayesian Linear Autoregression with the NIG model
(i.e., spatial locations have iid errors with common variance)
"""
import numpy as np
from scipy import special
from scipy import linalg
from scipy import stats
import scipy
from probability_model import ProbabilityModel
from nearestPD import NPD
class BVARNIG(ProbabilityModel):
"""The Bayesian Vector Autoregression model using past observations as
regressors in a specified neighbourhood. E.g., if the 4-neighbourhood is
selected with lag length 1, then the mean of y_{t,i} is modelled as linear
combination of observations y_{t-1, j} \in nb(i). Around the boundary,
the neighbourhoods are 0-padded.
###****************************************************************###
### MODEL PRIORS ###
###****************************************************************###
Inputs always needed. They correspond to priors in the following model:
Y ~ N(X*beta, sigma2 * I),
beta ~ N(beta_0, sigma2 * V_0),
sigma2 ~ IG(a,b)
prior_a: float >0:
a parameter of the Inverse Gamma, a>0
prior_b: float >0:
b parameter of the Inverse Gamma, b>0
prior_mean_beta: 1D-numpy array of size k, k = num regressors:
corresponds to beta_0, i.e. the mean prior of coefficients.
Takes precedence over prior_mean_scale if both specified.
prior_var_beta: 2D-numpy array of size kxk, k = num regressors:
corresponds to V_0, i.e. the covariance prior of coefs
Takes precedence over prior_var_scale if both specified.
prior_mean_scale: float:
If prior_mean_beta is None, prior_mean_scale supplied, the number
of regressors k is calculated automatically and
beta_0 = prior_mean_scale * np.ones(k)
prior_var_scale: float >0:
If prior_var_beta is None, prior_var_scale supplied, the number
of regressors k is calculated automatically and
beta_0 = prior_var_scale * np.identity(k)
###****************************************************************###
### REGULAR GRID + STRONG PARAM BINDING ###
###****************************************************************###
Inputs needed when assuming regular grid with strong parameter binding:
nbh_sequence, restriction_sequence, padding
nbh_sequence: array with integer entries, only needed if data on
regular grid, with strong coupling between effects:
0, 4, 8 -> specify the sequence ofVAR-nbhs.
corresponds to strong parameter coupling on regular
grid with no neighbourhood (0), 4-neighbourhood (4),
and 8-neighbourhood (8). I.e. all locations are on
a grid defining their nbhs, and share params.
(See restriction_sequence for param sharing)
restriction_sequence: array with integer entries, only needed if data
on regular grid, with strong coupling between effects:
0, 4, 8 -> specify the restriction of nbh_sequence on regular
spatial grid with parameter coupling.
Regardless of 0,4,8, we always couple across all
LOCATIONS! I.e., params the same across the grid.
However, we can vary how much we couple params within
each location's nbh: Not at all, i.e. one parameter
for each nbh location relative to each location (0),
the 4 inner and the 4 outer (4), and in the case of
a 8-nbh, all 8 together (8). See Fig. 2 in the paper
for illustration of 4-nbh (red), 8 nbh (red + blue),
0 nbh (orange).
NOTE: The parameter bindings for the intercepts are
again specified via intercept_grouping (see below).
They are NOT strongly coupled unless the argument
is not specified or is supplied as None.
padding: string:
ONLY needed if we specify nbh_sequence and restriction_sequence,
implying that we are on a regular grid. Then, we need to pad the
outside of the grid using one of the below options:
'overall_mean' -> compute mean across space and fill in
'row_col_mean' -> compute row and col means and fill in
'zero' -> insert zeros (bias estimation towards 0)
'leave-out' -> don't pad at all, and estimate only using
locations with full neighbourhood
###****************************************************************###
### GENERAL NBHS + ANY PARAM BINDING ###
###****************************************************************###
Inputs needed when assuming general nbh structures with arbitrary
parameter bindings:
intercept_grouping, general_nbh_sequence,
general_nbh_restriction_sequence , general_nbh_coupling
intercept_grouping: GxS1xS2 numpy array of ones or zeros grouping the
locations into G groups so that each group shares the intercept.
Notice that summing over the G-dimension, we would get an S1xS2
array of only ones. I.e., each location has to be in one of the G
groups. Extreme cases: G=1 with a single slice of ones => all
locations have one shared intercept. G=S1*S2 with each of the G
slicescontaining exactly a single 1-entry and only zeros otherwise
=> each location has individual intercept.
general_nbh_sequence: list of list of lists:
Gives an nparray of nparrays of nparrays of
coordinates/identifiers, i.e. an object like
[[[2,3,4],[5,6],[7]], [[5,6],[8],[9,10]], ...].
Here, [2,3,4] would be the 'closest' nbh to the
point with spatial coordinate 0, [5,6] the second-
closest, [7] the third-closest. how far away from
the closest nbh you consider the data is implied
by the general_nbh_restriction_sequence that
will give you the indices of the nbhs to be
considered for each lag length.
In the notation of the PAPER, this gives you the nbh. system as
[[N_1(1), N_2(1), N_3(1)], [N_1(2), N_2(2), N_2(3)], ...], i.e.
list entry s belongs to location with index s and contains n neigh-
bourhoods N_1(s), ... N_n(s) s.t. the indices describe spatial
closeness, with smaller indices indicating that we are closer to s.
Note that we assume n to be the same for all locations. If there
is a case where you assume that some locations s have less nbhs
than others, simply add some empty nbhs, i.e. N_i(s) = [].
general_nbh_restriction_sequence: list of list:
Gives you a list of lists of indices, i.e.
[[0,1,2,3], [0,1,2], [0],[]], where it must hold that
later entries are strict subsets of previous ones
s.t. the largest value at position l is at most as
big as the largest value at position l-1. Also, if
k is the largest value at position l, it must hold
that all k' s.t. 0<=k'<=k must be in that list entry
NOTE: If you want to have only auto-regressive
terms at some nbh (I.e., only its own past influen-
ces the present), then simply add an empty list [].
In the notation of the PAPER, this is the function p(.) assigning
temporal meaning to the neighbourhood structure. p is given in
list form so that the l-th entry of p gives all indices that
are going to be used at lag length l. I.e., assuming p(l) = 3
(using p's definition from the paper), it will hold that the
respective entry in general_nbh_restriction_sequence is going to
be [0,1,2]. More generally, for p(l) = k, [0,1,...,k-1].
general_nbh_coupling: string:
["no coupling", "weak coupling",
"strong coupling"], tells you how neighbourhoods
are tied together. "no coupling" means that each
linear effect of s' \in N_i(s) is modelled sepa-
rately. "weak coupling" means that the linear
effect for all s' \in N_i(s) are modelled together,
and "strong coupling" means that the linear effects
are also modelled together across space, i.e.
s' \in N_i(s) and g \in N_i(k) have the same effect
(but s' in N_j(s) and g in N_i(k) do not)
NOTE: no coupling is not implemented, because you
can obtain the same effect by weak coupling and
treating each station as its own nbh.
In the PAPER, notes on this are right after SSBVAR definition.
"weak coupling" is the standard modelling framework that assumes
that for all locations in a given nbh, we have a single linear
effect. "strong coupling" means that in addition, we have the same
linear neighbourhood effect for each location.
###****************************************************************###
### HYPERPARAMETER LEARNING ###
###****************************************************************###
Inputs needed when doing hyperparameter learning:
hyperparameter_optimization [don't use auto_prior_update!]
hyperparameter_optimization (ProbabilityModel level): string or None:
-> [True, False, None, "caron", "online", "turner"]
by default, this is True, which amounts to updating
the gradients but not performing on-line/caron's
hyperpar. learning. If False or None, the gradients
are not updated. "caron" and "online" both employ
the on-line hyperparameter learning proposed by
<NAME> (2012). If you don't want
this, but only want to do Turner's routine, you
have to do so via an enclosing HyperparameterOptimization
object. For this, put hyperparameter_optimization
to True (default) or "turner".
I.e., "turner", "True" mean that gradients are updated recursively,
but not used (unless an enclosing HyperparameterOptimization
object uses them), "caron" and "online" mean that we perform
gradient descent updates as in the PAPER. "False" and None mean
that we don't update the gradients. (barely any computational
benefit in so doing)
auto_prior_update: boolean.
Basically, DON'T set to True. It updates the priors by setting them
to the posterior expectations at time t. For instance, the beta_0
prior at time t will be set to
sum_r{ beta_rt[r,:] * P(r|y_1:t) }.
###****************************************************************###
### EXOGENEOUS/ADDITIONAL PREDICTORS ###
###****************************************************************###
NOT IMPLEMENTED!!!!
Inputs needed when using additional variables:
exo_selection, nbh_sequence_exo
NOTE: Intercepts, EXO, and ENDO vars can always ge grouped by the
following simple procedure: Suppose you have two groups G1, G2.
Let's assume you assume the same model in G1, G2 but with diff-
erent parameterizations. Lets say the params you want are
a(G1), a(G2), b(G1), b(G2). Then you can just estimate all four
coefs jointly by having G1 have a column of zeros for the var
corresponding to a(G2), b(G2) and vice versa.
NOTE: At some point, it may be good to replace the strings indicating
our neighbourhood structures using integers instead, since
string-computations are more expensive than integer-computations
exo_selection:
0,1,2,.. -> gives you a selection vector of length
num_exo_vars allowing you to select which exos
you want to regress on Y. The integers are
the row index in vector [exo1, exo2, ...] of
regressors available at each location.
nbh_sequence_exo: #not in the input
0,4,8 -> gives you the nbh of the lagged exos that are
regressors for your problem. Starts at time t
(rather than t-1, as for endo sequence)
###****************************************************************###
### OTHER INPUTS ###
###****************************************************************###
None of these inputs are needed, they provide additional functionality
non_spd_alerts: boolean:
Gives an alert whenever the covariance matrix was not semi-positive
definite and needed to be converted into an spd-matrix by forcing
it via 'nearestPD' or adding a disturbance.
NOTE: If you experience this a lot, try to rescale your data, i.e.
normalize it on-line or do something along the same lines.
"""
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" OBJECT INITIALIZATION FUNCTIONS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
def __init__(self,
prior_a,
prior_b,
S1,
S2,
prior_mean_beta=None,
prior_var_beta=None,
prior_mean_scale=0,
prior_var_scale=100,
nbh_sequence=None,
restriction_sequence = None,
intercept_grouping = None,
general_nbh_sequence = None,
general_nbh_restriction_sequence = None,
exo_selection = None,
padding = 'overall_mean',
#deprecated argument, should go
auto_prior_update=False,
hyperparameter_optimization = "online",
general_nbh_coupling = "strong coupling",
non_spd_alerts =False
):
"""STEP 1: Store priors"""
self.a, self.b = prior_a, prior_b
"""if beta_0 or beta's covariance matrix are specified, that takes
precedence over a supplied scaling of a vector/matrix of ones"""
if not prior_mean_beta is None:
self.prior_mean_beta = prior_mean_beta.flatten()
else:
self.prior_mean_beta= prior_mean_beta
self.prior_var_beta= prior_var_beta
"""STEP 2: Store execution parameters"""
self.auto_prior_update = auto_prior_update #Don't use
if (hyperparameter_optimization is not None or
hyperparameter_optimization is not False):
self.a_old = prior_a + 0.0000001 #Used for gradient computation
self.b_old = prior_b+ 0.0000001 #Used for gradient computation
self.gradient_old = 0.0 #Used for gradient computation
self.a_list, self.b_list = [],[]
self.hyperparameter_optimization = hyperparameter_optimization
self.non_spd_alerts = non_spd_alerts #if cov mat not spd and forced
#to be, this alerts you.
"""STEP 3: Get informations about the model we set up"""
self.has_lags = True #needed inside detector
self.generalized_bayes_rld = "kullback_leibler" #changed from inside detector init
self.alpha_rld_learning = False
self.alpha_rld = None #changed from inside detector init
self.S1, self.S2 = S1, S2
"""STEP 3.1: If we are on a regular grid with strong param binding"""
self.restriction_sequence = restriction_sequence
self.nbh_sequence = nbh_sequence
self.padding = padding
"""STEP 3.2: If we are on general neighbourhood structures"""
self.general_nbh_sequence = general_nbh_sequence
self.general_nbh_restriction_sequence = general_nbh_restriction_sequence
self.general_nbh_coupling = general_nbh_coupling
self.intercept_grouping = intercept_grouping
"""STEP 3.3: Check if we use regular grid + strong param binding or
the more general framework"""
if ((not self.restriction_sequence is None) and
(not self.nbh_sequence is None) and
(not self.padding is None)):
self.regular_grid = True
elif ((not self.general_nbh_sequence is None) and
(not self.general_nbh_restriction_sequence is None) and
(not self.general_nbh_coupling is None)):
self.regular_grid = False
elif (( self.restriction_sequence is None) and
( self.nbh_sequence is None) and
( self.general_nbh_sequence is None) and
( self.general_nbh_restriction_sequence is None)):
#In this case, we have only constant terms
self.regular_grid = False
self.has_lags = False
self.lag_length = 0 #unclear if it is arrived at automatically
self.general_nbh_coupling = None
else:
"""Neither specification is complete, so end the execution here"""
raise SystemExit("Your neighbourhood specifications " +
"are incomplete: At least one of " +
"restriction_sequence, nbh_sequence, padding is None; " +
"or at least one of " +
"general_nbh_sequence, general_nbh_restriction_sequence ,"+
" general_nbh_coupling is None")
"""STEP 3.4: If we have any exogeneous/additional variables"""
if exo_selection is None or exo_selection == []:
self.exo_bool = False
exo_selection = []
self.exo_selection = []
else:
self.exo_bool = True
self.exo_selection = exo_selection
"""STEP 4: Convert the neighbourhood into a sequence of strings
for the endogeneous variables"""
"""STEP 4.1: Get the codes for the intercept design"""
self.get_intercept_codes()
"""STEP 4.2: Get endogeneous regressor codes (self.endo_vars), lag
length (self.lag_length), and information about empty nbhs
(self.empty_nbhs, self.sum_empty_nbhs_per_lag)"""
#DEBUG: Not needed under constant fct. Simply set self.endo_var=[].
# do this inside fct.
self.get_endo_vars()
"""STEP 4.3: Get exogeneous regressor codes (self.exo_vars)"""
self.exo_vars = [self.intercept_codes + exo_selection]
"""STEP 4.4: Get all regressor codes"""
self.all_vars = list(self.exo_vars) + list(self.endo_vars)
self.all_vars = sum(self.all_vars, [])
"""STEP 5: Define quantities relating to the regressors:
the sequences of variables, the counts of variables,
the lag-structure, extraction list for updating"""
"""STEP 5.1: Get the number of each type of variable"""
self.num_exo_regressors = len(sum(self.exo_vars, []))
self.num_endo_regressors = len(sum(self.endo_vars, []))
self.num_regressors = (self.num_endo_regressors +
self.num_exo_regressors)
"""STEP 5.2: Get the lag structure such that lag_counts stores the
#exo_vars at position 0,and stores at position l the count
{#exo_vars + sum(#endo_vars: lag <= l) inside
self.lag_counts"""
#DEBUG: For constant function, this should be only the first line of
# the function
self.get_lag_counts()
"""STEP 6: Get the extraction vector and the insertion position. Note
that the result will be a list of form [1,1,1,0,0,1,1,1,1,0,0,0], which
means that the first 3 endogeneous variables will be kept, the next
two will be discarded, the next 4 will be kept, and the next 3 disc."""
"""STEP 6.1: Store in each entry l the number of endogeneous regressors
for lag l"""
#For constant fct, this should just return an empty list (if se set lag_length = 0)
endo_regressors_per_lag = self.get_endo_regressors_per_lag()
"""STEP 6.2: You can now get a list that tells you for given X_t-1
which columns need copying to X_t. You never copy exogeneous variables.
Also, the first lag for X_t will be new, so one can copy at most
lag_length -1 neighbourhoods from X_t-1 to X_t. Store this list as
self.extraction_list, and the position where you start extracting
as self.insertion_position with the function below"""
#DEBUG: This should still work and return an empty extraction list as
# well as an insertion position = p
self.get_extraction_list(endo_regressors_per_lag)
"""STEP 7: create the objects we need to trace through time"""
self.XX, self.YX, self.model_log_evidence = None, None, -np.inf
"""NOTE: The quantities below will be re-initialized in the
initialization function, but have to be instantated here due to how
the enclosing Detector object calls model_and_run_length_distr"""
self.retained_run_lengths = np.array([0,0])
self.joint_log_probabilities = 1
#DEBUG: Should not really be here (but insted in initialization)
self.log_alpha_derivatives_joint_probabilities = None #np.ones(3)
self.log_alpha_derivatives_joint_probabilities_sign = None #np.ones(3)
"""STEP 8: Rectify prior_beta_mean and prior_beta_var if needed.
Give a warning about this, too!"""
"""STEP 8.1: prior mean beta is not supplied or does not correspond
to the right dimensions: Check if a scale is
supplied. If not, automatically set the scale to 0.0, ensuring
that beta_0 = 0."""
if (self.prior_mean_beta is None or
self.num_regressors != np.size(self.prior_mean_beta)):
if prior_mean_scale is None:
prior_mean_scale = 0.0
self.prior_mean_beta = (prior_mean_scale*
np.ones(self.num_regressors))
"""STEP 8.2: prior var beta is not supplied or does not correspond
to the right dimensions: Check if a scale is
supplied. If not, automatically set the scale to 100.0, ensuring
that V_0 = 100*I."""
if (self.prior_var_beta is None or
self.num_regressors != prior_var_beta.shape[0] or
self.num_regressors != prior_var_beta.shape[1]):
if prior_var_scale is None:
prior_var_scale = 100.0
self.prior_var_beta = (prior_var_scale*
np.identity(self.num_regressors))
def get_intercept_codes(self):
"""Only called in __init__: Gets the intercept regressor codes"""
if (self.intercept_grouping is None or
self.intercept_grouping == np.array([])):
self.intercept_codes = ["intercept"]
else:
self.num_intercept_groups = self.intercept_grouping.shape[0]
self.intercept_codes = []
for g in range(0, self.num_intercept_groups):
self.intercept_codes.append(("intercept_group_" + str(g)))
def get_endo_vars(self):
"""Only called in __init__: Gets self.endo_vars, self.lag_length,
self.empty_nbhs, self.sum_empty_nbhs_per_lag in different ways,
depending on how your nbh structure is set up."""
endo_vars = []
""""STEP A: If you are on regular grid with strong parameter binding"""
if self.regular_grid:
self.lag_length = np.size(self.nbh_sequence)
for lag in range(0,int(self.lag_length)):
restriction = self.restriction_sequence[lag]
nbh = self.nbh_sequence[lag]
if restriction == 0:
if nbh == 0:
endo_vars.append(["center"])
elif nbh == 4:
endo_vars.append([ "center","top", "left", "right",
"bottom"])
elif nbh == 8:
endo_vars.append(["center",
"top", "left", "right", "bottom",
"topleft", "topright","bottomleft", "bottomright"])
elif restriction == 4:
if nbh == 0:
endo_vars.append(["center"])
print("Warning: Restriction sequence")
print("contained 4, nbh sequence a 1-nbh")
print("at the same position.\n")
elif nbh == 4:
endo_vars.append(["center", "4_inner_nbh_res"])
elif nbh == 8:
endo_vars.append(["center", "4_outer_nbh_res",
"4_inner_nbh_res"])
elif restriction == 8:
if nbh == 0:
endo_vars.append(["center"])
print("Warning: Restriction sequence")
print("contained 8, nbh sequence a 1-nbh")
print("at the same position.\n")
elif nbh == 4:
endo_vars.append(["center", "4_inner_nbh_res"])
print("Warning: Restriction sequence")
print("contained 8, nbh sequence a 4-nbh")
print("at the same position.\n")
elif nbh == 8:
endo_vars.append(["center", "8_nbh_res"])
print("Warning: Restriction = 8, which is not fully implemented")
elif self.general_nbh_coupling == "weak coupling":
"""STEP B: If we use the general nbh sequence formulation with
weak coupling (i.e. nbh-specific, but not across space).
Recall that the structure is as follows:
general_nbh_sequence = [[[4,5,6],[7,8],[9]], [[2,3,4],[5],[7]],...]
general_nbh_restriction_sequence = [[0,1,2],[0,1,2],[0,1],[2]].
Here, lag_length = 4, general_nbh_restriction_sequence[lag] = g(l),
where g(l) gives you the index of the nbh generating the regressors
at lag length l for s, i.e. N_p(l)(s)
We want to get strings of form
general_nbh_<lag>_<nbh_index>_<loc>,
where <lag> gives you the index in general_nbh_restriction_seq that
you need, say <lag> = 0, i.e. we care about [0,1,2]. Given this
index list, <nbh_index> then tells us which of the indices (and
thus neighbourhoods) we care about, i.e. nbh_index = 0 would mean
we care about [0,1,2][0] = [0]. Lastly, the <loc> tells us which
index on the lattice we care about, allowing us to retrieve
general_nbh_sequence[<loc>][general_nbh_restriction_seq[<lag>][<nbh_index>]]
as the indices of the nbh with <nbh_index> corresponding to
<loc>'s neighbourhood at lag <lag>+1
"""
self.lag_length = int(len(self.general_nbh_restriction_sequence))
self.empty_nbhs = [] #helps us to sort out the extraction list later
self.sum_empty_nbhs_per_lag = np.zeros(self.lag_length)
"""loop I: Go over all lag lengths, since the nbhs and their
restrictions will differ between lag lengths"""
for lag in range(0, int(self.lag_length)):
new_endo_vars_entry = []
"""Loop II: over all locations to fill self.endo_vars with the
correct endogeneous variables for each location and lag"""
for location in range(0, self.S1*self.S2):
#DEBUG: This marks the center for each location separately
# make sure that this does not cause problems for how
# we find the lag (e.g., by counting # of "center"s)
new_endo_vars_entry.append("general_nbh_" +
str(lag) + "_" + "center" + "_" +
str(location))
self.empty_nbhs.append(False)
relevant_nbh_indices = self.general_nbh_restriction_sequence[lag]
"""Loop III: Over all relevant nbh indices for this
location at the current lag. This makes sure that our
endo var codes are specific to lag, location, and the
neighbour whose values are used."""
for nbh_index in relevant_nbh_indices:
"""Only add the nbh if it is non-empty. If it is
empty, nbh_index will have boolean value False."""
if nbh_index:
"""Again, we only want to create the regressor code
if the list is non-empty. If it is empty, we
instead note so inside self.empty_nbhs and
self.sum_empty_nbhs_per_lag in the 'else' cond."""
if self.general_nbh_sequence[location][nbh_index]:
new_endo_vars_entry.append("general_nbh_" +
str(lag) + "_" + str(nbh_index) + "_" +
str(location))
self.empty_nbhs.append(False)
else:
"""Mark which neighbourhoods were left out because
they were empty. Needed for extraction_list and
lag_counts"""
self.empty_nbhs.append(True)
self.sum_empty_nbhs_per_lag[lag] += 1
"""Inside Loop II: For this location and lag, add the
required endogeneous variables into the collection of all
of them"""
endo_vars.append(new_endo_vars_entry)
new_endo_vars_entry = []
elif self.general_nbh_coupling == "strong coupling":
"""STEP C: In this case, we have the same input as for weak
coupling, but a different interpretation. In particular, we couple
the effects over different spatial locations. Accordingly, we save
general_nbh_<lag>_<nbh_index> only.
Then, in the extractors, we loop over <loc> to retrieve the
regressors in a single column as
regressor(<lag>, <nbh_index>)[<loc>] = sum over all measurements
at time t - <lag> for nbh given by
gen_nbh_seq[<loc>][gen_nbh_res_seq[<lag>][<nbh]].
"""
self.lag_length = int(len(self.general_nbh_restriction_sequence))
"""Loop I: Over the lags"""
for lag in range(0, int(self.lag_length)):
new_endo_vars_entry = ["general_nbh_" + str(lag) + "_center"]
relevant_nbh_indices = self.general_nbh_restriction_sequence[lag]
"""Loop II: Over the relevant nbhs. Notice that unlike for the
weak coupling, we only have 2 (rather than 3) loops, as the
locations do not require a separate loop for strong coupling"""
for nbh_index in relevant_nbh_indices:
new_endo_vars_entry.append("general_nbh_" +
str(lag) + "_" + str(nbh_index))
endo_vars.append(new_endo_vars_entry)
elif (self.general_nbh_coupling is None) and (not self.regular_grid):
"""In this case, we only fit constants!|"""
endo_vars = []
self.lag_length = 0
"""Last step: Declare endo_vars as the new attribute of the object"""
self.endo_vars = endo_vars
def get_lag_counts(self):
"""Only called in __init__: Gets self.lag_counts"""
self.lag_counts = [self.num_exo_regressors]
last_count = self.num_exo_regressors
if self.regular_grid:
"""STEP 1.A: If 0/4/8 nbhs used: Can be done via endo vars"""
for entry in self.endo_vars:
self.lag_counts.append(last_count + len(entry) + 1)
last_count = last_count + len(entry) + 1 #update
elif self.general_nbh_coupling == "strong coupling":
"""STEP 1.B: Similar to weak coupling, except you don't need to
multiply by the numbers of locations"""
for lag in range(0, self.lag_length):
self.lag_counts.append(last_count + (
(len(self.general_nbh_restriction_sequence[lag]) + 1)))
last_count = last_count + (
(len(self.general_nbh_restriction_sequence[lag]) + 1))
elif self.general_nbh_coupling == "weak coupling":
"""STEP 1.C: If general nbhs, we need more care"""
"""each gen_res corresponds to a lag and gives you a set at
position l, e.g. [0,1,2] at position 0, telling you that at the
first lag, the neighbourhoods used are 0,1,2. Thus, at the first
lag, each location has 3 regressors corresponding to the first
three nbhs for that location in general_nbh_sequence PLUS the
autoregressive term, which is always incorporated but not repre-
sented in any regressor code.
I.e., we need [len([0,1,2]) + 1]*S1*S2 to get the #endogeneous
variables at lag 1. Generally, we thus need
[len(gen_nbh_res_seq[l]) + 1]*S1*S2"""
for lag in range(0, self.lag_length):
self.lag_counts.append(last_count + (
(len(self.general_nbh_restriction_sequence[lag]) + 1)
*self.S1*self.S2) - self.sum_empty_nbhs_per_lag[lag])
last_count = last_count + ( - self.sum_empty_nbhs_per_lag[lag] +
(len(self.general_nbh_restriction_sequence[lag]) + 1)
*self.S1*self.S2)
elif (not self.regular_grid) and self.general_nbh_coupling is None:
"""STEP 1.D: We only fit a constant, so self.lag_counts remains
unchanged. self.lag_counts will be None"""
def get_endo_regressors_per_lag(self):
"""Returns as output the endogeneous regressors per lag"""
if self.regular_grid:
"""STEP 1A: If we have the 4-nbh structure"""
endo_regressors_per_lag = []
for l in range(0, self.lag_length):
res = self.restriction_sequence[l]
nbh = self.nbh_sequence[l]
if res == 0:
endo_regressors_per_lag.append(int(nbh) + 1)
elif res == 4:
endo_regressors_per_lag.append(int(nbh*0.25) + 1)
elif self.general_nbh_coupling is not None:
"""STEP 1B: If we have a general nbh structure, we get
endo_regressors_per_lag differently. In particular, just look at
the self.endo_vars object."""
endo_regressors_per_lag = []
for l in range(0, self.lag_length):
endo_regressors_per_lag.append(int(len(self.endo_vars[l])))
else:
"""STEP 1C: If we only fit a constant"""
endo_regressors_per_lag = []
"""STEP 2: Return the result"""
return endo_regressors_per_lag
def get_extraction_list(self, endo_regressors_per_lag):
"""Gets self.extraction_list and self.insertion position"""
""""STEP 1: Make sure we don't want to copy exogeneous regressors"""
self.extraction_list = [False]*(self.num_exo_regressors)
if self.regular_grid:
"""STEP 1A: IF we have 0/4/8 nbhs """
for i in range(0,self.lag_length-1):
self.extraction_list = (self.extraction_list
+ [True]*endo_regressors_per_lag[i+1]
+ [False]*int(endo_regressors_per_lag[i] -
endo_regressors_per_lag[i+1]))
"""STEP 2A: The last lag of X_t-1 will 'slide out' of sight, so it
definitely is not needed for X_t anymore."""
self.extraction_list += ([False]*
endo_regressors_per_lag[self.lag_length-1])
elif self.general_nbh_coupling == "weak coupling":
"""STEP 1B: IF we have general nbhs"""
per_location = []
for lag in range(0, self.lag_length-1):
num_retained = (1 + len(np.intersect1d(
self.general_nbh_restriction_sequence[lag],
self.general_nbh_restriction_sequence[lag+1])))
num_discarded = ( -num_retained + 1 +
len(self.general_nbh_restriction_sequence[lag]))
per_location += ([True]* num_retained +
[False] * num_discarded)
"""STEP 2B: The last lag of X_t-1 will 'slide out' of sight, so it
definitely is not needed for X_t anymore."""
total_num_last_lag = 1+ len(
self.general_nbh_restriction_sequence[self.lag_length-1])
per_location += ([False]* total_num_last_lag)
"""STEP 3B: Use that we have the same structure all across the
lattice, and simply multiply each entry of 'per_location' by the
number of lattice elements"""
self.extraction_list += sum(
[self.S1*self.S2*[e] for e in per_location],[])
self.extraction_list[self.num_exo_regressors:] = np.array(
self.extraction_list)[np.where(np.array(
self.empty_nbhs) == False)].tolist()
elif self.general_nbh_coupling == "strong coupling":
"""STEP 1C: IF we have general nbhs"""
per_location = []
for lag in range(0, self.lag_length-1):
num_retained = (1 + len(np.intersect1d(
self.general_nbh_restriction_sequence[lag],
self.general_nbh_restriction_sequence[lag+1])))
num_discarded = ( -num_retained + 1 +
len(self.general_nbh_restriction_sequence[lag]))
per_location += ([True]* num_retained +
[False] * num_discarded)
"""STEP 2C: The last lag of X_t-1 will 'slide out' of sight, so it
definitely is not needed for X_t anymore."""
total_num_last_lag = 1+ len(
self.general_nbh_restriction_sequence[self.lag_length-1])
per_location += ([False]* total_num_last_lag)
"""STEP 3C: Use that we have the same structure all across the
lattice, and simply multiply each entry of 'per_location' by the
number of lattice elements"""
self.extraction_list += per_location
elif self.general_nbh_coupling is None and not self.regular_grid:
"""We have constant function and don't need to change anything"""
"""STEP 4: In order to copy entries of X_t-1 to X_t, you need to know
the position of X_t at which you should insert. (This does
only affect the endogeneous part of the regressors)"""
self.insertion_position = - sum(self.extraction_list)
def reinstantiate(self, a = None, b = None):
"""Return a new BVARNIG-model that contains all the same attributes as
this BVARNIG model. In some sense, it is an 'emptied' version of the
same model. Used inside HyperparameterOptimization, if BVARNIGs
Detector is run for hyperparameter optimization"""
"""STEP 1: Get all the characteristics of this model"""
prior_a, prior_b, S1, S2 = self.a, self.b, self.S1, self.S2
prior_mean_beta,prior_var_beta=self.prior_mean_beta,self.prior_var_beta
nbh_sequence = self.nbh_sequence
restriction_sequence = self.restriction_sequence
intercept_grouping = self.intercept_grouping
general_nbh_sequence = self.general_nbh_sequence
general_nbh_restriction_sequence = self.general_nbh_restriction_sequence
nbh_sequence_exo = self.nbh_sequence_exo
exo_selection = self.exo_selection
padding = self.padding
auto_prior_update = self.auto_prior_update
hyperparameter_optimization = self.hyperparameter_optimization
general_nbh_coupling = self.general_nbh_coupling
non_spd_alerts = self.non_spd_alerts
"""STEP 2: Check whether you have a new prior already"""
if a is None:
a = prior_a
if b is None:
b = prior_b
"""STEP 2: Use the characteristics to clone the model"""
clone_model = BVARNIG(prior_a = a, prior_b = b, S1=S1, S2=S2,
prior_mean_beta=prior_mean_beta,
prior_var_beta =prior_var_beta,
prior_mean_scale=None, prior_var_scale=None,
nbh_sequence=nbh_sequence,
restriction_sequence=restriction_sequence,
intercept_grouping=intercept_grouping,
general_nbh_sequence=general_nbh_sequence,
general_nbh_restriction_sequence=general_nbh_restriction_sequence,
nbh_sequence_exo=nbh_sequence_exo, exo_selection=exo_selection,
padding=padding, auto_prior_update=auto_prior_update,
hyperparameter_optimization=hyperparameter_optimization,
general_nbh_coupling=general_nbh_coupling,
non_spd_alerts=non_spd_alerts)
"""STEP 3: Return the cloned model"""
return clone_model
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" FIRST OBSERVATION INITIALIZATION FUNCTIONS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
#NOTE: We need to pass X_endo with one more entry into this function,
# namely for y_2!
def initialization(self, X_endo, X_exo, Y_2, X_exo_2, cp_model, model_prior,
padding_columns_computeXX = None, padding_column_get_x_new = None):
"""Initialize the model (i.e. t=1) with some inputs from the
containing Detector object. The padding_column arguments are only
needed for the demo Csurf object. This is different from object
instantiation/creation, as it processes the very first (collection of)
observation(s), thus creating the objects and quantities we will trace
through time.
NOTE I: The exo_selection list is applied inside detector, so X_exo
will already contain everything relevant
NOTE II: The tag #QR ADAPTION means that the lines following the tag
could/can be adapted to QR-updating (rather than Woodbury)
X_endo: S1xS2x(L+1) numpy array, float:
is the S1xS2x(L+1) array of the last L observations before t
as well as the observation at t at position L.
Y_2: S1xS2 np array, float:
will be endogeneous regressors at time t+1, which means Y_t.
X_exo: S1xS2xnum_exo np array, float:
will contain exogeneous variables at time t (NOT IMPLEMENTED)
X_exo_2: S1xS2xnum_exo np array, float:
will contain exogeneous variables at time t+1 (NOT IMPLEMENTED)
cp_model: CpModel object:
gives the hazard function inside an object
model_prior: float:
Passes the prior of the Detector object into the model object
padding_columns_computeXX, padding_column_get_x_new:
deprecated, leave None.
"""
print("Initializing BVAR object")
"""STEP 1: Take the data-stream that was partitioned appropriately
inside the Detector object and reshape/rename it for further processing
Y1 = Y_t, Y2 = Y_{t+1}, X1_endo = Y_1:t-1, with t = L-1."""
Y1 = X_endo[-1,:].flatten()
Y2 = Y_2.flatten()
if self.has_lags:
X1_endo = X_endo[:self.lag_length,:].reshape(self.lag_length,
self.S1, self.S2)
else:
X1_endo = None
"""In case there are no exogeneous variables in this model, take
the relevant precautions."""
if self.exo_bool:
#RESHAPE will not corr. to real dims of exo vars
X1_exo = (X_exo[-1,:,:].reshape(
self.num_exo_regressors, self.S1, self.S2))
else:
X1_exo = None
"""STEP 2: Format the quantities we wish to trace through time (i.e.
typically sufficient statistics), and correctly compute them using
neighbourhood structure"""
"""STEP 2.1: Quantities for time point t, i.e. dimension does not
depend on how many run-lengths we retain.
Quantities will hold:
XX
Y_1:t-1'Y_1:t-1, i.e. the cross-product of regressors at time t.
XY
Y_1:t-1'Y_t, i.e. the cross-product of regressors and obs. at t
X_t
Y_1:t-1, i.e. regressors at time t
X_tp1
Y_2:t, i.e. regressors at time t+1 ('t plus (p) 1')
YY
Y_t'Y_t, i.e. observation cross-product
"""
self.XX = np.zeros(shape=(self.num_regressors,self.num_regressors))
self.XY = np.zeros(self.num_regressors)
self.X_t = np.zeros(shape=(self.S1*self.S2, self.num_regressors))
self.X_tp1 = np.zeros(shape=(self.S1*self.S2, self.num_regressors))
self.YY = np.inner(Y1, Y1)
"""STEP 2.2: Cross-product quantities for time point t and run-length
r, i.e. dimension does depend on how many run-lengths we retain. Unlike
quantities only stored for the current time, the quantities below
incorporate the prior beliefs.
Quantities will hold;
XX_rt
At time t, r-th entry holds the cross-product of all regressors
corresponding to run-length r_t, i.e. you sum over the last r_t
cross-products XX. Additionally, XX_rt also holds the prior
belief inside, so
XX_rt[r,:,:] = prior_var_beta^-1 + sum_{i = t-r}^t XX(i)
XY_rt
At time t, r-th entry holds the cross-product of all regressors
and observationscorresponding to run-length r_t, i.e. you sum
over the last r_t cross-products XY. Additionally, XY_rt also holds
the prior belief inside, so
XY_rt[r,:] = prior_var_beta^-1 * prior_beta + sum_{i = t-r}^t XY(i)
YY_rt
As the other two, but with YY, and no prior belief occurs, so
YY_rt[r] = sum_{i = t-r}^t YY(i)
Q_rt, R_rt
Unuseable in current version, would hold the QR-decomposition of
inverse of XX_rt
"""
self.XX_rt = np.zeros(shape=(2,self.num_regressors, self.num_regressors)) #2 for r=-1 and r=0
self.XY_rt = np.zeros(shape=(2,self.num_regressors)) #2 for r=-1 and r=0
self.YY_rt = np.zeros(2)
#QR ADAPTION
self.Q_rt = np.zeros(shape=(2,self.num_regressors, self.num_regressors))
self.R_rt = np.zeros(shape=(2,self.num_regressors, self.num_regressors))
"""STEP 2.3: Inverse-related quantities for time point t and run-length
r, i.e. dimension again depends on how many run-lengths on retains.
These are direct functionals of the cross-produts stored above, but
computed/updated in an efficient rather than brute-force way
Quantities will hold:
M_inv_1_rt
Inverse of XX_rt, updated via Woodbury formula at each time point,
but at a later time point than M_inv_2_rt. This means within a
certain time window inside an iteration, we have access to both,
XX_rt^-1 at t and XX_rt^-1 at time t-1, which is needed for
efficient updates.
M_inv_2_rt
Inverse or XX_rt, updated via Woodbury formula at each time point.
See above for the relative timing.
log_det_1_rt
log determinants of all entries in M_inv_1_rt, computed efficiently
log_det_2_rt
log dets of all entries in M_inv_2_rt, computed efficiently
"""
self.M_inv_1_rt = np.zeros(shape=(2,self.num_regressors,
self.num_regressors))
self.M_inv_2_rt = np.zeros(shape=(2,self.num_regressors,
self.num_regressors))
self.log_det_1_rt = np.zeros(2)
self.log_det_2_rt = np.zeros(2)
"""STEP 2.4: beta-coef related quantities for time point t and run-
length r, i.e. dimension depends on how many run-lengths one retains
Quantities will hold:
beta_rt
beta_rt[r,:] stores the coefficients beta corresponding to the
MAP-estimate at time t if one assumes run-length r
beta_XX_beta_rt
what it says: beta_rt[r,:] * XX_rt[r,:,:] * beta_rt[r,:] at pos r
each time point t.
"""
self.beta_XX_beta_rt = np.zeros(2)
self.beta_rt = np.zeros(shape=(2,self.num_regressors))
"""STEP 2.5: Retained run lengths, storing which run-lengths you retain
at time t. Careful with this, as retained_run_lengths[i] = j means that
the i-th longest run-length you retain is j"""
self.retained_run_lengths = np.array([0,0])
"""STEP 3: Compute prior- and data-dependent quantities:
Computation of X_t, X_tp1, X'X, X'Y, and Y'Y from scratch."""
"""STEP 3.1: Gives X_t, X'X, X'Y, Y'Y"""
#DEBUG: Unclear if this does what I want for constant case!
self.compute_X_XX_XY_YY( Y1, X1_endo, X1_exo,
padding_columns_computeXX,
compute_XY = True)
"""STEP 3.2: Gives X_{t+1}"""
#DEBUG: Unclear if this does what I want for constant case!
self.X_tp1 = self.get_x_new(Y2, X_exo_2 ,1,padding_column_get_x_new)
"""STEP 4: Using the results of STEP 3, compute some computationally
burdensome results, like XX_rt's inverses and prior inv + det"""
"""STEP 4.1: Computation of the prior inverse, which will be needed
at each iteration to inform the chaingepoint probabilities"""
self.D_inv = np.linalg.inv(self.prior_var_beta) #not efficient if D diagonal
_, self.D_inv_log_det = np.linalg.slogdet(self.D_inv)
#QR ADAPTION
self.D_inv_Q, self.D_inv_R = np.linalg.qr(self.D_inv)
self.D_inv_log_det = np.sum(np.log(np.abs(np.diagonal(self.D_inv_R))))
"""STEP 4.2: Use the prior inverse from STEP 4.1 to get the first
inverse computation of XX_rt underway"""
M_inv_1 = np.linalg.inv(self.D_inv + self.XX)
self.M_inv_1_rt[0,:,:] = self.M_inv_1_rt[1,:,:] = M_inv_1
#QR ADAPTION
Q0, R0 = self.QR_loop(self.D_inv_Q, self.D_inv_R, self.X_t)
self.Q_rt[0,:,:] = self.Q_rt[1,:,:] = Q0
self.R_rt[0,:,:] = self.R_rt[1,:,:] = R0
"""STEP 5: Compute the prior contributions/quantities and use them to
get XX_rt, YY_rt, XY_rt with prior influences for r_t = 0"""
"""STEP 5.1: Get D^-1*beta_prior and beta_prior * D^-1 * beta_prior
which are needed later in the estimation as the prior contributions"""
self.D_inv_b0 = np.matmul(self.D_inv, self.prior_mean_beta)
self.b0_D_inv_b0 = np.inner(self.prior_mean_beta, self.D_inv_b0)
"""STEP 5.2: Get the first two values of X'X_rt and X'Y_rt using
the result of STEP 6.1.
NOTE: Since we will only need X'Y for computing beta(r,t),
we need to work with (D^-1 * beta_0 + X'Y), which is why
we add D^-1 * beta_0 to X'Y whenever we are at r=0."""
self.XX_rt[0,:,:] = self.XX_rt[1,:,:] = self.XX + self.D_inv
self.XY_rt[0,:] = self.XY_rt[1,:] = (self.XY + self.D_inv_b0)
self.YY_rt[0] = self.YY_rt[1] = self.YY
"""STEP 6: Get the log-determinants by brute force or QR
NOTE: If using QR, use trace for determinants of Q(r,t)R(r,t)
for all run-lengths. These are needed in posterior of Y
They can be obtained as trace of R[r,:,:] because Q is an
orthogonal matrix, so det(Q) = 1 and as
det(QR) = det(Q)det(R), it follows det(QR) = det(R)"""
sign, value = np.linalg.slogdet(self.M_inv_1_rt[0,:,:])
self.log_det_1_rt[0] = self.log_det_1_rt[1] = (value) #s.p.d. matrices have pos dets
#QR ADAPTION
#diag = np.abs(np.diagonal(self.R_rt, axis1=1, axis2=2))
#self.log_det_1_rt = np.sum(np.log(diag), axis=1)
"""STEP 7: Compute the MAP of beta = MX'Y from scratch, using triangular
solvers for speedy computation! Also compute beta^T X'X(r,t) beta.
If QR is used, you also calculate the inverses here."""
beta = np.matmul(self.M_inv_1_rt[0,:,:],self.XY_rt[0,:])
self.beta_rt[0,:] = self.beta_rt[1,:] = beta
#QR ADAPTION
#beta = linalg.solve_triangular(a = self.R_rt[0,:,:],
# b = np.matmul(np.transpose(self.Q_rt[0,:,:]),self.XY_rt[0,:]),
# check_finite=False)
#self.M_inv_1_rt[0,:,:] = self.M_inv_1_rt[1,:,:] = (
# linalg.solve_triangular(a=R0, b = np.transpose(Q0),
# check_finite=False))
self.beta_XX_beta_rt[0] = self.beta_XX_beta_rt[1] = (np.inner(np.matmul(
self.beta_rt[0,:], self.XX_rt[0,:]), self.beta_rt[0,:]))
"""STEP 8: Lastly, update the inverses for one-step-ahead of time, i.e.
get M_inv_2_rt as well as its log determinant."""
"""STEP 8.1: If we do Woodbury, this is a brute force step involving
inversion of the small matrix that re-appears later on
inside 'mvt_log_density' as C_t_inv.
If we do QR-updates, perform QR update w.r.t. X_tp1 and
get M_inv + log_det_2. Do NOT update X'X, X'Y, X_t, X_tp1, Y'Y since
they will be already updated"""
small_matrix_inv = (
np.linalg.inv(
np.identity(self.S1*self.S2) +
np.matmul((self.X_tp1), np.matmul(
self.M_inv_1_rt[0,:,:], np.transpose(self.X_tp1)))) )
"""Brute force determinant calc for small matrix + recursive update for
determinant of M(r,t). We take -value because log(det(M^-1)) =
-log(det(M))"""
sign2, value2 = np.linalg.slogdet(small_matrix_inv)
self.log_det_2_rt[0] = self.log_det_2_rt[1] = (
value2 + self.log_det_1_rt[0])
"""Woodbury Update-Inversion formula for M_inv_2, see handwritten notes
for derivation"""
M_inv_1_x_X_tp1 = np.matmul(self.M_inv_1_rt[0,:,:],
np.transpose(self.X_tp1))
self.M_inv_2_rt[0,:,:] = self.M_inv_2_rt[1,:,:] = (
self.M_inv_1_rt[0,:,:] - np.matmul((M_inv_1_x_X_tp1),
np.matmul( small_matrix_inv,
np.transpose(M_inv_1_x_X_tp1))))
#QR ADAPTION
#Q1, R1 = self.QR_loop(self.Q_rt[0,:,:], self.R_rt[0,:,:], self.X_tp1)
#self.Q_rt[0,:,:] = self.Q_rt[1,:,:] = Q1
#self.R_rt[0,:,:] = self.R_rt[1,:,:] = R1
#self.M_inv_2_rt[0,:,:] = self.M_inv_2_rt[1,:,:] = linalg.solve_triangular(
# a=R1, b = np.transpose(Q1), check_finite=False)
#diag = np.abs(np.diagonal(self.R_rt, axis1=1, axis2=2))
#self.log_det_2_rt = np.sum(np.log(diag), axis=1)
"""STEP 9: Compute the joint log probabilities under your prior by
computing the predictive and multiplying it with the model prior as
well as the probability that we have a CP at time 1 vs at a time
before the first observation was made. Also compute their gradients
for efficient updating."""
"""STEP 9.1: Get the posterior parameter estimates from your model,
use them to get the value of your predictive distribution."""
a_ = self.a + 0.5
b_ = self.b + 0.5*(self.b0_D_inv_b0 + self.YY - self.beta_XX_beta_rt[0])
C_0_inv = (a_/b_)*(np.identity(self.S1*self.S2) -
np.matmul(self.X_t, np.matmul(self.M_inv_1_rt[0,:,:],
np.transpose(self.X_t))))
if b_<0:
log_det = np.nan
else:
log_det = ((self.S1*self.S2) * (np.log(b_) - np.log(a_)) +
self.D_inv_log_det - self.log_det_1_rt[0])
"""This step ensures that we center the MVT at zero, which makes
the computations inside mvt_log_density easier"""
resid = Y1 - np.matmul(self.X_t, self.beta_rt[0,:])
"""For the first observation, the predictive probability and the
model evidence are equivalent, as the model evidence is computed under
prior beliefs (captured by a_, b_, C_0_inv) only."""
self.model_log_evidence = ( np.log(model_prior) +
BVARNIG.mvt_log_density(resid, C_0_inv, log_det, 2*a_,
self.non_spd_alerts))
"""STEP 9.2: Multiply the model evidence by the hazard rate/cp prior
as well as the model prior to get the joint log probs for run-length
equalling 0 or being >0 (i.e., first CP occured before first obs)"""
"""Numerical stability: Ensure that we do not get np.log(0)=np.inf
by perturbation"""
if cp_model.pmf_0(1) == 0:
epsilon = 0.000000000001
else:
epsilon = 0
"""get log-probs for r_1=0 or r_1>0. Typically, we assume that the
first observation corresponds to a CP (i.e. P(r_1 = 0) = 1),
but this need not be the case in general."""
r_equal_0 = (self.model_log_evidence +
np.log(cp_model.pmf_0(0) + epsilon))
r_larger_0 = (self.model_log_evidence +
np.log(cp_model.pmf_0(1)+ epsilon))
self.joint_log_probabilities = np.array([r_equal_0, r_larger_0])
"""STEP 8.3: Get the derivative of the log probs, too, just
initialize to 1 (since log(1) = 0), initialize with 2 columns
(for 2 hyperparams: a,b). We may wish to extend this to more params"""
self.model_specific_joint_log_probabilities_derivative = np.ones((2,2))
self.model_specific_joint_log_probabilities_derivative_sign = np.ones(
(2,2))
"""STEP 8.4: Similar to 8.3, but for alpha-optimization. Hence we only
do this if we have to"""
if self.alpha_rld_learning:
self.log_alpha_derivatives_joint_probabilities = None #np.ones(3)
self.log_alpha_derivatives_joint_probabilities_sign = None #np.ones(3)
def compute_X_XX_XY_YY(self, Y0, X0_endo, X0_exo, padding_columns = None,
compute_XY = True):
"""Compute X'X, X'Y, Y'Y, X_t from scratch. Called at initialization.
Uses the nbh-strings to concatenate the raw data of X0_endo, Y0 (and
potentially at a later stage X0_exo) into the regressors that we want
for our model.
NOTE: compute_XY = False only for BVARNIGDPD models, where there is
no need to know XY
"""
"""Computation: Loop over both exogeneous and endogeneous variables,
retrieve their cross-products element-wise. If you have already it from
product before, just copy the relevant entry in X'X and paste it."""
#DEBUG: Reshape X0_endo into (lag_length,S1, S2)
if self.has_lags:
X0_endo = X0_endo.reshape(self.lag_length, self.S1, self.S2)
else:
X0_endo = None
lag_count1, lag_count2 = 0,0
"""OUTER LOOP: Over all regressors"""
for i in range(0, self.num_regressors):
"""Since exo vars are stored first in all_vars, this condition
allows us to see if we need to access exo or endo vars"""
if (i <= (self.num_exo_regressors - 1)):
"""EXOGENEOUS"""
#DEBUG: Do I get the intercept from here? I should, since
# self.all_vars will still be containing the intercept_codes
data_vector1 = self.get_exo_regressors(self.all_vars[i], i,
X0_exo)
elif self.has_lags:
"""If we need endo vars, make sure that we advance the lag
length appropriately afterwards"""
if (i >= self.lag_counts[lag_count1]):
lag_count1 = lag_count1 + 1
"""ENDOGENEOUS"""
"""I.e., if we do not pass padding columns, we cannot access
the None-type object and thus skip the argument"""
if padding_columns is None:
data_vector1 = self.get_endo_regressors(self.all_vars[i],
lag_count1, X0_endo)
else:
data_vector1 = self.get_endo_regressors(self.all_vars[i],
lag_count1, X0_endo, padding_columns[i,:])
lag_count2 = 0 #reset lag count
"""INNER LOOP: Over all regressors"""
for j in range(0, self.num_regressors):
"""This condition ensures that we do not re-compute cross-
products after having done so before"""
if (i <= j):
if (j <= (self.num_exo_regressors - 1)):
"""EXOGENEOUS"""
data_vector2 = self.get_exo_regressors(
self.all_vars[j], j, X0_exo)
elif self.has_lags:
"""If we need endo vars, make sure that we advance the lag
length appropriately afterwards"""
if (j >= self.lag_counts[lag_count2]):
lag_count2 = lag_count2 + 1
"""ENDOGENEOUS"""
if padding_columns is None:
data_vector2 = self.get_endo_regressors(
self.all_vars[j], lag_count2, X0_endo)
else:
data_vector2 = self.get_endo_regressors(
self.all_vars[j], lag_count2, X0_endo,
padding_columns[i,:])
"""if i == 0, we loop over all j. Use this to compute X'Y
as well as X"""
if(i == 0):
self.X_t[:,j] = data_vector2
if compute_XY:
self.XY[j] = np.inner(data_vector2, Y0)
"""Computation: Fill in X'X with dot products!"""
prod = np.inner(data_vector1, data_vector2)
self.XX[i,j] = prod
self.XX[j,i] = prod
"""Lastly, compute Y'Y"""
self.YY = np.inner(Y0, Y0)
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" EVALUATE PROBABILITIES/BELIEFS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
def evaluate_predictive_log_distribution(self, y, t):
"""Returns the log densities of *y* using the predictive posteriors
for all possible run-lengths r=0,1,...,t-1,>t-1 as currently stored by
virtue of the sufficient statistics.
The corresponding density is computed for all run-lengths and
returned in a np array
Note: This is called BEFORE update_log_distr, so at time t, the
quantities one tracks through time (like Q_rt, R_rt, ...) will
still only hold Q(r,t), R(r, t), ... and so on (rather than
Q(r+1,t+1), R(r+1,t+1) ... ). Similarly, the regressors
X_t will actually correspond to time point t-1, so we instead
use the regressors stored inside X_tp1 = X_t+1 for evaluating
the pred. density of y.
"""
"""STEP 1: Preliminaries.
- Get y into vector format,
- get log_densities as container of log predictive densities
- get C_t_inv[r,:,:] as the posterior precision for run-length r+1
"""
y = y.flatten()
run_length_num = self.retained_run_lengths.shape[0]
log_densities = -np.inf * np.ones(shape=run_length_num)
"""Note that we store the r0-C_t_inv too, so this quantity is one
entry longer than all other quantities"""
self.C_t_inv = np.zeros((run_length_num+1, self.S1*self.S2,
self.S1*self.S2))
self.predictive_variance_log_det = np.zeros(run_length_num+1)
self.C_t_inv[0,:,:] = self.C_t_inv_r0
self.predictive_variance_log_det[0] = (
self.predictive_variance_r0_log_det)
"""STEP 2: Loop over all retained run-lengths and fill log_densities[r]
with the predictive log density for run-length r.
NOTE: We cannot use retained_run_lengths to loop directly, since
r=t-1 and r>t-1 both have a 0 in there."""
for r in range(0,run_length_num):
"""STEP 2.1: Get inverse of posterior variance ( = posterior
precision) using stored quantities & woodbury (see notes)"""
a_ = self.a + (self.retained_run_lengths[r]+1.0)*0.5
b_ = (self.b + 0.5*(self.b0_D_inv_b0 + self.YY_rt[r] -
self.beta_XX_beta_rt[r]))
self.C_t_inv[r+1,:,:] = (np.identity(self.S1*self.S2) -
np.matmul(self.X_tp1, np.matmul(self.M_inv_2_rt[r,:,:],
np.transpose(self.X_tp1))))
"""STEP 2.2: Get the log determinant using the Woodbury Formula and
applying the determinant lemma afterwards (see notes)
NOTE: We take the minus in front because we compute the
log det of the INVERSE matrix C(r,t)^-1 here, but
need that of C(r,t) for call of 'mvt_log_density'"""
if b_ < 0:
log_det = np.nan
else:
log_det = ((self.S1 * self.S2)*(np.log(b_) - np.log(a_)) +
self.log_det_1_rt[r] - self.log_det_2_rt[r])
self.predictive_variance_log_det[r+1] = log_det
"""STEP 2.3: Evaluate the predictive probability for r_t = r"""
resid = y - np.matmul(self.X_tp1, self.beta_rt[r,:])
log_densities[r] = (
BVARNIG.mvt_log_density(resid,
(a_/b_)*self.C_t_inv[r+1,:,:],
log_det, 2*a_, self.non_spd_alerts))
"""STEP 3: return the full log density vector"""
return log_densities
def get_log_integrals_power_divergence(self):
"""get integrals for power div in log-form"""
p = self.S1*self.S2
run_length_with_0 = np.insert(self.retained_run_lengths.copy() + 1, 0, 0)
nu_1 = 2* (self.a + (run_length_with_0+1.0)*0.5)
nu_2 = nu_1 * self.alpha_rld + p* self.alpha_rld + nu_1
C1 = (1.0 + self.alpha_rld) * (special.gammaln(0.5*(nu_1+p)) -
special.gammaln(0.5*nu_1))
C2 = (special.gammaln(0.5*(nu_2+p)) - special.gammaln(0.5*nu_2))
#DEBUG: Inefficient as brute force, will only be here for preliminary
# test version
#DEBUG: Incorrect! Posterior variance needs a/b factor!
#_, dets = np.linalg.slogdet(self.C_t_inv[:,:,:])
#dets = dets * self.alpha
return (C1 - C2 - nu_1*0.5*p*self.alpha_rld
- np.pi*0.5*p*self.alpha_rld #dets)
- self.alpha_rld * self.predictive_variance_log_det )
# def get_prior_integral_power_divergence(self):
# """get integral for r = 0"""
# p = self.S1*self.S2
# nu_1 = 2*(self.a)
# nu_2 = nu_1 + self.alpha + p*self.alpha + nu_1
# C1 = (1.0 + self.alpha) * (special.gammaln(0.5*(nu_1+p)) -
# special.gammaln(0.5*nu_1))
# C2 = (special.gammaln(0.5*(nu_2+p)) - special.gammaln(0.5*nu_2))
#
# #DEBUG: Inefficient as brute force, will only be here for preliminary
# # test version
# _, det = np.linalg.slogdet(self.C_t_inv[0,:,:])
# det = det * self.alpha
#
# return (C1 - C2 - nu_1*0.5*p*self.alpha - np.pi*0.5*p*self.alpha - det)
def evaluate_log_prior_predictive(self, y, t):
"""Basically, this does is as 'evaluate_predictive_log_distribution',
but using only the prior specs of BVARNIG object to get the
predictive prob. """
resid = y - np.matmul(self.X_tp1, self.prior_mean_beta)
self.C_t_inv_r0 = (
np.identity(self.S1*self.S2) -
np.matmul(self.X_tp1, np.matmul(self.prior_var_beta,
np.transpose(self.X_tp1))))
_, log_det = np.linalg.slogdet((self.a/self.b)*self.C_t_inv_r0)
self.predictive_variance_r0_log_det = log_det
"""Ensure that our log density is upper bounded to avoid ugly numerical
issues. Usually, this minimum has no effect because the density is way
below 1.0 (i.e., the log density is way below 0.0), so this prevents
numerical issues when inverting C_t_inv_r0 to dominate the probability.
The minimum can be removed and only causes issues in extremely
artificial data exhibiting near-collinearity in the regressors"""
return min(0.0, BVARNIG.mvt_log_density(resid,
(self.a/self.b)*self.C_t_inv_r0, log_det, 2*self.a, True))
#DEBUG: Deprecated/not needed
def save_NLL_fixed_pars(self, y,t):
"""DEPRECATED. Similar to eval_pred_log_distr, but evaluates normal
instead to avoid incorporating the parameter-uncertainty into the NLL
computations. Not used for anything."""
"""STEP 0: Ensure that y has desired format"""
y = y.flatten()
run_length_num = self.retained_run_lengths.shape[0]
log_densities = -np.inf * np.ones(shape=run_length_num)
"""Note: We cannot use retained_run_lengths to loop directly, since
r=t-1 and r>t-1 both have a 0 in there."""
for r in range(0,run_length_num):
"""STEP 1A: Get inverse using stored quantities & woodbury"""
a_ = self.a + (self.retained_run_lengths[r]+1.0)*0.5
b_ = (self.b + 0.5*(self.b0_D_inv_b0 + self.YY_rt[r] -
self.beta_XX_beta_rt[r]))
sigma2 = max((b_/(a_+1)), 0.0000001)
cov_mat = sigma2*self.C_t_inv[r+1,:,:]
"""STEP 1C: Evaluate the predictive probability"""
resid = y - np.matmul(self.X_tp1, self.beta_rt[r,:])
"""normal evaluation"""
log_densities[r] = (
stats.multivariate_normal.logpdf(resid, cov=cov_mat))
"""STEP 2: return the full log density vector"""
self.one_step_ahead_predictive_log_probs_fixed_pars = log_densities
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" UPDATE PROBABILITIES/SUFFICIENT STATISTICS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
#DEBUG: For now assume that one KNOWS the exos, might cause trouble
def update_predictive_distributions(self, y_t, y_tm1, x_exo_t, x_exo_tp1, t,
padding_column_tm1 = None,
padding_column_t = None,
r_evaluations = None):
"""Takes the next observation, *y*, at time *t* and updates the
sufficient statistics, means & vars corresponding to all potential
run-lengths r=0,1,...,t-1,>t-1.
Inputs are:
y_t
Observations at time t
y_tm1
Observations at time t-1 ('t minus 1', i.e. tm1)
x_exo_t, x_exo_tp1
Exogeneous variables at t, t+1
padding_column_tm1, padding_column_t, r_evaluations
deprecated/leave None
Quantities affected by the update are:
always: XX_old, XX, X_t, X_tp1, XX_rt, XY, XY_rt, YY, YY_rt,
beta_rt, M_inv_1_rt,M_inv_2_rt, log_det_1_rt, log_det_2_rt,
retained_run_lengths, beta_XX_beta_rt
never: C_t_inv_rt (instead updated in evaluate_predictive...)
QR: Q_rt, R_rt
"""
"""STEP 1: Get observations as vectors"""
y_t, y_tm1 =y_t.flatten(), y_tm1.flatten()
"""STEP 2: Extract the NEW regressor vectors, and do two things:
(A) Store them in X for the S1*S2 rank-1-updates later
(B) Immediately compute the new dot products for X'X, and
copy the old dot products that can be re-used
(C) Immediately compute the new dot products for X'Y
(D) Update X'X(r-1,t-1) => X'X(r,t),
X'Y(r-1,t-1) => X'Y(r,t),
Y'Y(r-1,t-1) => Y'Y(r,t) using addition
NOTE: If we want to use exogeneous variables, adapt this function"""
"""STEP 2.1: Store the value of previous iteration"""
#DEBUG: Not used anywhere anymore it seems!?
self.XX_old, self.XY_old, self.X_t_old = (self.XX.copy(),
self.XY.copy(), self.X_t.copy())
self.Y_new, self.Y_old = y_t, y_tm1
"""STEP 2.2: Updates X'X, X'Y, Y'Y, XX_rt, XY_rt, YY_rt"""
self.regressor_cross_product_updates(y_t, y_tm1, x_exo_t,
t, padding_column_tm1)
"""STEP 2.3: Retrieves the new regressors in X_tp1 and re-assigns the
old value of X_tp1 to X_t"""
self.X_tp1 = self.get_x_new(y_t, x_exo_tp1, t, padding_column_t)
"""STEP 3: Update quantities that are direct functions of the data"""
"""STEP 3.1: Extend the run-lengths and add run-length 0, add QR for
the new run-length r=0, and update beta and beta^T M beta. Also copy
the entries of log_det_2_rt into log_det_1_rt to update log_det_1_rt.
PRE-UPDATES, since M_1_inv_rt, log_det_1_rt are only overwritten by
the old M_2_inv_rt, log_det_2_rt before they in turn are updated.
UPDATES WB: run-lengths
M_1_inv_rt[0,:,:] added, rest of M_1_inv copied
beta_rt
beta_XX_beta_rt
log_det_1_rt[0,:] added, rest of log_det_2_rt copied
UPDATES QR: not implemented
"""
#QR ADAPTION
#self.pre_QR_updates(t)
self.pre_updates(t)
"""STEP 3.2: Bottleneck: Update your QR decomposition with the
regressors in X_tp1 from QR(r,t) to QR(r+1,t+1)
UPDATES, since this is where the heavy computational lifting occurs
via updating M_2_inv_rt, log_det_2_rt. If in QR, this is where QR
decomposition is updated (as this corresponds to the heavy lifting)
UPDATES WB: M_2_inv(r+1,t+1) for all r
log_det_2(r+1,t+1) for all r
UPDATES QR: not implemented
"""
self.updates(t)
#QR ADAPTION
#self.QR_updates(t)
#self.post_QR_updates(t)
def regressor_cross_product_updates(self, y_t, y_tm1, x_exo, t,
padding_column=None,
rt_updates = True):
"""Get the new regressors, i.e. transform the shape of X and X'X in
accordance with your new observations at time t. Also update X'Y
and Y'Y, since it is not much more work once the other stuff is
in place. Also immediately updates XX_rt, XY_rt, YY_rt.
The argument *padding_column* is only needed for the demo
object BVAR_NIG_CSurf for the column that is next to the C Surface."""
"""STEP 1: Copy all old regressors from X and X'X_t into new X and
X'X and shift them within the same data structure"""
"""STEP 1.1: Shift the ENDO regressors that are already in X, X'X,
provided that there is something to shift (i.e. provided
that the lag length is at least 2)"""
if self.has_lags and self.lag_length > 1:
self.X_t[:,self.insertion_position:] = (
self.X_t[:,self.extraction_list])
self.XX[self.insertion_position:,self.insertion_position:] = (
self.XX[self.extraction_list,:][:,self.extraction_list])
"""STEP 2: For each variable x_i that newly arrives per time step,
put it into X and compute relevant cross-prods X'X & X'Y"""
i = 0
#DEBUG: Not correct anymore for weak coupling!
if (not (self.restriction_sequence is None) or
self.general_nbh_coupling == "strong coupling"):
num_new_vars = len(self.endo_vars[0]) + self.num_exo_regressors
new_vars = sum(self.exo_vars,[]) + self.endo_vars[0]
elif self.general_nbh_coupling == "weak coupling":
new_vars = (sum(self.exo_vars,[]) +
sum(self.endo_vars[:self.S1*self.S2], []))
num_new_vars = int(len(new_vars))
elif self.general_nbh_coupling is None and not self.regular_grid:
"""only constants"""
new_vars = sum(self.exo_vars,[])
num_new_vars = int(len(new_vars))
"""NEW VARIABLE LOOP I"""
for regressor_code in new_vars: #sum(self.exo_vars,[]) + self.endo_vars[0]:
"""STEP 2.1: Retrieve the values of x_i"""
if i <= self.num_exo_regressors - 1:
x_i = self.get_exo_regressors(regressor_code, i, x_exo)
elif self.has_lags:
x_i = self.get_endo_regressors(regressor_code, 1,
y_tm1.reshape(1, self.S1, self.S2),
padding_column)
"""STEP 2.2: Store x_i inside X"""
self.X_t[:,i] = x_i
"""STEP 2.3: Compute the cross-products x_i^Tx_j for all other
new variables x_j s.t. j>=i as well as for all old
x_j that we retain, and store in X'X_t"""
"""NEW VARIABLE LOOP II"""
for j in range(0, num_new_vars):
if (i <= j):
if (j <= self.num_exo_regressors-1):
x_j = self.get_exo_regressors(self.all_vars[j],
j, x_exo)
elif self.has_lags:
x_j = self.get_endo_regressors(self.all_vars[j],
1, y_tm1.reshape(1,self.S1, self.S2),
padding_column)
self.XX[i,j] = self.XX[j,i] = np.inner(x_i, x_j)
"""STEP 2.4: Since for i=0, we will retrieve all new regressors
into x_j, use this to directly fill in the new
cross-products between old regressors in X and
the new regressors x_j"""
"""OLD VARIABLE LOOP"""
if i == 0 and self.has_lags:
for k in range(num_new_vars, self.num_regressors):
x_k = self.X_t[:,k]
self.XX[k,j] = self.XX[j,k] = np.inner(x_j, x_k)
"""STEP 2.5: Advance the counter"""
i = i+1
"""Get XX for only constants (like in OLD VARIABLE LOOP)"""
#DEBUG: Unclear if this works
if not self.has_lags:
self.XX = np.identity(self.num_regressors)
"""STEP 3: Add X'X [X'Y, Y'Y] to X'X(r-1,t-1) [X'Y(r-1,t-1),
Y'Y(r-1,t-1)]to update to X'X(r,t) [X'Y(r,t), Y'Y(r,t)]"""
"""STEP 3.1: Add X'X [X'Y, Y'Y] to all the t+1 possible run-lenghts"""
self.YY = np.inner(y_t, y_t)
self.XY = np.matmul(np.transpose(self.X_t), y_t)
"""Note: Also update the sums of previous X'X quantities, provided that
you are in a BVARNIG model. In a BVARNIGDPD model, this will not
happen as XX_rt, XY_rt, YY_rt are not traced through time"""
if rt_updates:
self.XX_rt = self.XX_rt + self.XX
self.XY_rt = self.XY_rt + self.XY
self.YY_rt = self.YY_rt + self.YY
"""STEP 3.2: Insert X'X [X'Y, Y'Y] at position r=0 of XX_rt
[XY_rt, YY_rt]. Note: For X'Y, we need to add the prior
influence encoded in D^-1 * beta_0. Since we always add
this at r=0, we can ignore it when we add X'Y to the
longer run-lengths in STEP 3.1"""
self.XX_rt = np.insert(self.XX_rt, 0, (self.XX + self.D_inv), axis = 0)
self.XY_rt = np.insert(self.XY_rt, 0, (self.XY + self.D_inv_b0), axis = 0)
self.YY_rt = np.insert(self.YY_rt, 0, self.YY, axis = 0)
def get_x_new(self,y_t, x_exo_tp1, t, padding_column=None):
"""STEP 1: Shift the ENDO regressors that are already in X, X'X,
provided that there is something to shift (i.e. provided
that the lag length is at least 2)"""
if self.has_lags and self.lag_length > 1:
x_new = np.zeros((self.S1*self.S2, self.num_regressors))
x_new[:,self.insertion_position:] = (
self.X_t[:,self.extraction_list].copy())
else:
x_new = np.zeros((self.S1*self.S2, self.num_regressors))
"""STEP 2: For each variable x_i that newly arrives per time step,
put it into X and compute relevant cross-prods X'X & X'Y"""
i = 0
"""NEW VARIABLE LOOP I: Only over exogeneous variables and first lag"""
#DEBUG: This is a list of lists! We want a list of codes!
if self.has_lags:
all_codes = sum(self.exo_vars,[]) + self.endo_vars[0]
else:
all_codes = sum(self.exo_vars,[]) #endo_vars will be empty, so its
#first entry doesn't exist!
for regressor_code in all_codes:
"""STEP 2.1: Retrieve the values of x_i"""
if i <= self.num_exo_regressors - 1:
x_i = self.get_exo_regressors(regressor_code, i, x_exo_tp1)
elif self.has_lags:
"""Note: y is treated as 3-dim array in get_endo_regressors"""
x_i = self.get_endo_regressors(regressor_code, 1,
y_t.reshape(1,self.S1, self.S2),
padding_column)
"""STEP 2.2: Store x_i inside X"""
x_new[:,i] = x_i
i = i+1
return x_new
def pre_updates(self, t):
"""Updates retained_run_lengths,
M_1_inv_rt, log_det_1_rt,
beta_rt, beta_XX_beta_rt"""
"""STEP 1: extend the retained run lengths"""
self.retained_run_lengths = self.retained_run_lengths + 1
self.retained_run_lengths = np.insert(self.retained_run_lengths, 0, 0)
"""STEP 2: Add the new M inverse"""
new_M_inv = np.linalg.inv(self.D_inv + self.XX)
self.M_inv_1_rt = np.insert(self.M_inv_2_rt.copy(), 0, new_M_inv, axis=0)
"""STEP 3: update the beta estimates and beta * XX * beta"""
self.compute_betas(t)
"""STEP 4: update the log determinant 1 and M_inv_1_rt. Again, take
-new_log_det because we take the log-det of the inverse of the matrix
whose log det we wanna store."""
sign, new_log_det = np.linalg.slogdet(new_M_inv)
self.log_det_1_rt = np.insert(self.log_det_2_rt.copy(), 0, new_log_det)
#QR ADAPTION
#"""add the QR for r=0"""
#newQ, newR = self.QR_loop( self.D_inv_Q, self.D_inv_R, self.X_t)
#self.Q_rt = np.insert(self.Q_rt, 0, newQ, axis=0)
#self.R_rt = np.insert(self.R_rt, 0, newR, axis=0)
#"""get new log det and new M_inv"""
#new_log_det = np.sum(np.log(np.abs(np.diagonal(newR))))
#self.log_det_1_rt = np.insert(self.log_det_2_rt.copy(), 0, -new_log_det)
#
#new_M_inv = linalg.solve_triangular(a=newR, b = np.transpose(newQ),
# check_finite=False)
#self.M_inv_1_rt = np.insert(self.M_inv_2_rt.copy(), 0, new_M_inv,
# axis=0)
#DEBUG: Should be the same as computing
# np.sum(np.log(np.abs(np.diagonal(self.R_rt, axis1=1, axis2=2))), axis=1)
def updates(self, t):
"""Updates M_inv_2_rt, log_det_2_rt
NOTE: It updates the inverses of X'X(r, t) to that ofX'X(r+1,t+1), so
we have the inverse for t+1 at time t
"""
run_length_num = self.retained_run_lengths.shape[0]
self.M_inv_2_rt = np.zeros((run_length_num, self.num_regressors,
self.num_regressors))
self.log_det_2_rt = np.zeros(run_length_num)
for r in range(0,run_length_num):
"""Compute small_matrix for r, and get inverse + det"""
M_inv_x_X_tp1 = np.matmul(self.M_inv_1_rt[r,:,:], np.transpose(self.X_tp1))
small_matrix_inv = np.linalg.inv(np.identity(self.S1*self.S2) +
np.matmul( np.transpose(M_inv_x_X_tp1),
np.transpose(self.X_tp1)))
"""Update M_2_inv"""
self.M_inv_2_rt[r,:,:] = self.M_inv_1_rt[r,:,:] - np.matmul(
(M_inv_x_X_tp1), np.matmul(small_matrix_inv,
np.transpose(M_inv_x_X_tp1) ))
"""Update log_det_2"""
sign, value = np.linalg.slogdet(small_matrix_inv)
self.log_det_2_rt[r] = value + self.log_det_1_rt[r]
#QR ADAPTION
#self.Q_rt[r,:,:], self.R_rt[r,:,:] = self.QR_loop(
# self.Q_rt[r,:,:], self.R_rt[r,:,:], self.X_tp1)
def post_QR_updates(self, t):
#QR ADAPTION
"""After the QR updates have been updated to contain the X_tp1 r
regressors, obtain the M_inv_rt matrices and update log_det_2"""
self.M_inv_2_rt = np.insert(self.M_inv_2_rt, 0,
np.zeros((self.num_regressors,self.num_regressors) ),
axis=0)
run_length_num = self.retained_run_lengths.shape[0]
for r in range(0,run_length_num):
self.M_inv_2_rt[r,:,:] = linalg.solve_triangular(a=self.R_rt[r,:,:],
b = np.transpose(self.Q_rt[r,:,:]), check_finite=False)
self.log_det_2_rt = np.sum(np.log(np.abs(np.diagonal(self.R_rt,
axis1=1, axis2=2))), axis=1)
def compute_betas(self,t):
"""compute beta = MX'Y for all run-lengths using triangular solver:
Since M^-1*beta = X'Y, and since M^-1 = QR with R a triangular
matrix and Q^TQ = I, it holds that QR*beta = X'Y, and so
R*beta = Q^T*X'Y can be solved for beta using a triangular solver
once Q^T*X'Y has been computed (which is O(n^2)). Thus, since the
triangular solver itself is O(n^2), we obtain beta in O(n^2)!"""
run_length_num = self.retained_run_lengths.shape[0]
self.beta_rt = (
np.insert(self.beta_rt , 0, np.zeros(self.num_regressors), axis=0))
self.beta_XX_beta_rt = np.insert(self.beta_XX_beta_rt , 0, 0, axis=0)
for r in range(0,run_length_num):
#QR ADAPTION
#self.beta_rt[r,:] = linalg.solve_triangular(a = self.R_rt[r,:,:],
# b = np.matmul(np.transpose(self.Q_rt[r,:,:]), self.XY_rt[r,:]),
# check_finite=False)
self.beta_rt[r,:] = np.matmul(self.M_inv_1_rt[r,:,:], self.XY_rt[r,:])
self.beta_XX_beta_rt[r] = np.inner(self.beta_rt[r,:],
np.matmul(self.XX_rt[r,:,:],self.beta_rt[r,:]))
def QR_loop(self,Q0, R0, X):
#QR ADAPTION, but also used in initialization (though it need not be)
"""Taking Q0, R0 as starting decomposition, this function loops over
the elements in X_t until all row vectors have been used for rank-1
updates. Overwrites Q0,R0 in the process."""
current_count = end_point = 0
while (end_point != self.S1*self.S2):
start_point = current_count*self.num_regressors
end_point = min((current_count+1)*self.num_regressors,
self.S1*self.S2)
current_range = range(start_point, end_point)
Q0, R0 = linalg.qr_update( Q0, R0,
np.transpose( X[current_range,:]),
np.transpose( X[current_range,:]),
check_finite=False)
current_count = current_count + 1
return Q0, R0
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" GET POSTERIOR PREDICTIVE QUANTITIES """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
#DEBUG: Assumes acess to x_exo(t+1) at time t.
def get_posterior_expectation(self, t, r_list=None):
"""get the predicted value/expectation from the current posteriors
at time point t, for all possible run-lengths."""
post_mean = np.matmul((self.X_tp1),
self.beta_rt[:,:,np.newaxis])
return post_mean
def get_prior_expectation(self, t):
"""Get the prior value/expectation at time t, for r_t = 0"""
return ( np.matmul(self.X_tp1, self.prior_mean_beta))
#DEBUG: Assumes acess to x_exo(t+1) at time t.
def get_posterior_variance(self, t, r_list=None):
"""get the predicted variance from the current posteriors at
time point t, for all possible run-lengths."""
post_var = np.zeros((np.size(self.retained_run_lengths),
self.S1*self.S2, self.S1*self.S2))
"""NOTE: See the derivations in my notes"""
run_length_num = self.retained_run_lengths.shape[0]
for r in range(0,run_length_num):
"""Get inverse using stored quantities & woodbury"""
a_ = self.a + (r+1.0)*0.5
b_ = (self.b + 0.5*(self.b0_D_inv_b0 + self.YY_rt[r] -
self.beta_XX_beta_rt[r]))
"""NOTE: Overflow encountered here when your floats are too big!
Though this can be avoided by normalizing the data, usually"""
post_var[r,:,:] = (b_/a_)*(np.identity(self.S1*self.S2) +
np.matmul(self.X_tp1, np.matmul(self.M_inv_1_rt[r,:,:],
np.transpose(self.X_tp1))))
return post_var
@staticmethod
def mvt_log_density(y_flat, prec, log_det, df, prior = False, alerts = False):
"""Returns the density of a multivariate t-distributed RV.
Assumes that the mean is 0. Amounts to the predictive probability for
a given run-length (and model).
Here, we have y_flat being the point at which we want to evaluate the
density, mu its mean, prec the precision matrix, and det the cov matrix'
determinant. A very helpful reference is the formulation in
https://www.statlect.com/probability-distributions/
multivariate-student-t-distribution
"""
p, nu = y_flat.shape[0], df
"""NOTE: Because it typically is the case that num_regressors > S1*S2,
1+(1.0/nu)*np.matmul(np.matmul(y_flat, prec),y_flat)) is
negative sometimes because prec is not s.p.d.
This happens for r s.t. (r+1)*S1*S2 < num_regressors, and is
addressed by forcing prec to be s.p.d."""
log_term = (1+(1.0/nu)*np.matmul(np.matmul(y_flat, prec),y_flat))
if( log_term<0 or np.isnan(log_det) ):
"""If there is trouble with your log determinant (i.e. it is nan
or negative), you will try to fix it. Usually does not happen, but
needs care if it does. If it does occur, it will typically affect
the prior (i.e. the predictive for r=0 without observations)"""
if not prior and p>1:
"""If we don't evaluate the prior predictive (i.e. for r=0),
immediately try more expensive (but more principled) routine"""
if alerts:
print("covariance estimate not s.p.d. or log_det nan")
print("degrees of freedom: ", df)
"""NOTE: Use the (expensive) nearest pd matrix function if not
prior, otherwise just add an identity matrix that
is large enough"""
#print(prec)
try:
prec = (NPD.nearestPD(prec) +
np.identity(prec.shape[0])*max(df*nu, max(25, p)))
except (ValueError, np.linalg.LinAlgError) as e: #np.linalg.LinAlgError
prec = prec + np.identity(p)*pow(10,5)
log_term = (1+(1.0/nu)*
np.matmul(np.matmul(y_flat, prec),y_flat))
elif prior and p>1:
"""If we do evaluate the prior predictive (i.e. for r=0),
try computationally cheap methods first, and then the more
expensive ones. (Influence of r=0 probability typically
negligible)"""
"""First try the easy fix (=adding diagonal)"""
if log_term<0 and p>1:
prec = prec + np.identity(prec.shape[0])*nu*df
log_term = (1+(1.0/nu)*np.matmul(
np.matmul(y_flat, prec),y_flat))
"""If this did not rectify the issue, try the computationally
more expensive way of fixing it (=PD function)"""
if log_term<0 and p>1:
prec = (NPD.nearestPD(prec) +
np.identity(prec.shape[0])*max(df*nu, 25))
log_term = (1+(1.0/nu)*
np.matmul(np.matmul(y_flat, prec),y_flat))
"""Last safeguard: add diagonal terms until you are spd. I note
that I have never needed/entered this. nearestPD has worked"""
count = 0
while log_term<0:
if count == 0:
print("Covariance matrix injected with sphericity")
prec = prec + np.identity(prec.shape[0])*nu*df*10
log_term = (1+(1.0/nu)*np.matmul(np.matmul(y_flat, prec),y_flat))
count = count+1
elif p == 1:
"""If we only fit a single constant!"""
return -pow(10,4)
"""If you have tried to make the matrix spd but it simply has not
worked, take drastic action and simply set it to some extremely
small value (this corresponds to saying that whatever has happened
is extremely unlikely. That being said, I don't recall the algo
ever entering this contition."""
if( log_term<0):
print("non-s.p.d. covariance estimate:",
"problem persists! Set it to log(pow(10,-100))")
print("log term is", log_term)
print("det term is", log_det)
#log_term = np.log(pow(10,-50))
#log_det = np.log(pow(100, p)) # = slogdet(np.identity*100)
return -pow(10,5)
else:
log_term = np.log(log_term)
_, log_det = np.linalg.slogdet(prec)
if np.isnan(log_det):
print("log_det nan: problem persists!")
else:
log_term = np.log(log_term)
"""Note: Should not happen after we have corrected b_ to be positive.
I have never entered this condition in any simulation."""
if np.isnan(log_det):
print("nan log det")
_, log_det = np.linalg.slogdet(prec)
log_det = 1.0/log_det #since we want the log det of cov mat
if np.isnan(log_det):
print("problem persists!")
calc = (special.gammaln(0.5*(nu+p)) - special.gammaln(0.5*nu) -
0.5*p*( np.log(nu) + np.log(np.pi) ) - 0.5*log_det -
0.5*(nu+p)*log_term)
"""Note: Should not happen after we have corrected b_ to be positive.
I have never entered this condition in any simulation."""
if np.isnan(calc):
print("Alert! Calc is nan")
calc = -pow(10,5)
return calc
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" TRIMMING THE RUN-LENGTH-DISTRIBUTION """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
def trimmer(self, kept_run_lengths, BAR_submodel = False):
"""Trim the relevant quantities for the BVAR NIG model"""
"""If this BVAR model is a submodel of a BAR model, its joint log
probs will never be updated/grown. In that case, all it is needed for
is the evaluation of predictive probabilities"""
if not BAR_submodel:
self.joint_log_probabilities = (
self.joint_log_probabilities[kept_run_lengths])
"""If we optimize hyperparameters, we also want to discard the deriva-
tives that are not warranted."""
if self.hyperparameter_optimization:
self.model_specific_joint_log_probabilities_derivative_sign = (
self.model_specific_joint_log_probabilities_derivative_sign[:,
kept_run_lengths])
self.model_specific_joint_log_probabilities_derivative = (
self.model_specific_joint_log_probabilities_derivative[:,
kept_run_lengths])
"""None condition needed to ensure that they are initialized"""
if (self.generalized_bayes_rld == "power_divergence" and
self.alpha_rld_learning and
self.log_alpha_derivatives_joint_probabilities is not None):
self.log_alpha_derivatives_joint_probabilities = (
self.log_alpha_derivatives_joint_probabilities[
kept_run_lengths])
self.log_alpha_derivatives_joint_probabilities_sign = (
self.log_alpha_derivatives_joint_probabilities_sign[
kept_run_lengths])
"""Discard all quantities of data that have been computed"""
self.beta_rt = self.beta_rt[kept_run_lengths,:]
self.beta_XX_beta_rt = self.beta_XX_beta_rt[kept_run_lengths]
self.XX_rt = self.XX_rt[kept_run_lengths,:,:]
self.XY_rt = self.XY_rt[kept_run_lengths,:]
self.YY_rt = self.YY_rt[kept_run_lengths]
#QR ADAPTION
#self.log_det_rt = self.log_det_rt[kept_run_lengths]
#self.R_rt = self.R_rt[kept_run_lengths,:,:]
#self.Q_rt = self.Q_rt[kept_run_lengths,:,:]
self.M_inv_1_rt = self.M_inv_1_rt[kept_run_lengths,:,:]
self.M_inv_2_rt = self.M_inv_2_rt[kept_run_lengths,:,:]
self.log_det_1_rt = self.log_det_1_rt[kept_run_lengths]
self.log_det_2_rt = self.log_det_2_rt[kept_run_lengths]
self.retained_run_lengths = (
self.retained_run_lengths[kept_run_lengths])
self.model_log_evidence = scipy.misc.logsumexp(
self.joint_log_probabilities )
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" REGRESSOR EXTRACTOR FUNCTIONS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
def get_exo_regressors(self, regressor_code, i, data):
"""Extract all the exogeneous regressors as you should"""
"""If the regressor_code is intercept, you just create intercepts.
Otherwise, you just extract the relevant row in data"""
#DEBUG: Check that data has format (num_exos, S1,S2)!
if regressor_code == "intercept":
data_vector = np.ones((self.S1, self.S2)) #assumes ONE intercept
elif self.intercept_codes != ["intercept"]:
"""retrieve the number of the group in question"""
group_number = int(regressor_code.split("_")[-1])
"""return the intercept only at the desired locations"""
data_vector = self.intercept_grouping[group_number,:,:].flatten()
else:
data_vector = data[i,:,:].flatten()
return data_vector.flatten()
def get_endo_regressors(self, regressor_code, lag, data,
padding_column=None):
"""Get the predictors in order, where we give back the *data* as it
should be. In particular, give the *lag*-th lattice slide with the
neighbourhood as specified by *position*.
Around the edges, the *padding* is applied. The argument
*padding_column* is only for the demo object BVAR_NIG_CSurf: It is the
rhs or lhs column of the lattice.
NOTE: The lag passed to this function is the actual lag - 1.
The data passed to this function at time t has times t-l:t-1
"""
padding = self.padding
lag = -(lag - 1) #need this conversion since we have 1,2,3...,T order,
#but want to access the l-th lag, i.e. T-l. Also, the
#FIRST lag corresponds to 0, i.e. T-1 i.e. the last
#entry contained in the endogeneous regressors
"""STEP 1: Compute padding for rows, columns, and corners"""
if padding == 0 or padding == "zero":
padding_row = np.zeros(self.S2)
padding_col = np.zeros(self.S1)
padding_corners = 0.0
elif padding == "overall_mean":
mean = np.mean(data[lag,:,:])
padding_row = mean * np.ones(self.S2) #np.mean(data[lag,:,:], axis=0)
padding_col = mean * np.ones(self.S1)
padding_corners = mean
elif padding == "row_col_mean":
padding_row = np.mean(data[lag,:,:], axis=0)
padding_col = np.mean(data[lag,:,:], axis=1)
weight = (np.size(padding_row)/
(np.size(padding_row) + np.size(padding_col)))
padding_corners = (weight*np.sum(padding_row) +
(1-weight)*np.sum(padding_col))
elif padding.split("_")[-1] == "rhs" or padding.split("_")[-1] == "lhs":
"""I.e., if we have a CSurf object, we need some extra care at the
boundaries of the change surface"""
padding_row = np.mean(data[lag,:,:], axis=0)
if padding.split("_")[-1] == "rhs":
"""get padding for cols as usual + specific one for rhs, lhs"""
padding_rhs = padding_column
padding_lhs = padding_col = np.mean(data[lag,:,:], axis=1)
weight = (np.size(padding_row)/
(np.size(padding_row) + np.size(padding_col)))
padding_corner_rhs = (weight*np.sum(padding_row) +
(1-weight)*np.sum(padding_rhs))
padding_corner_lhs = padding_corners = (
weight*np.sum(padding_row) +
(1-weight)*np.sum(padding_lhs))
else:
"""get padding for cols as usual + specific one for rhs, lhs"""
padding_rhs = padding_col = np.mean(data[lag,:,:], axis=1)
padding_lhs = padding_column
weight = (np.size(padding_row)/
(np.size(padding_row) + np.size(padding_col)))
padding_corner_rhs = padding_corners = (weight*
np.sum(padding_row) + (1-weight)*np.sum(padding_rhs))
padding_corner_lhs = (weight*np.sum(padding_row) +
(1-weight)*np.sum(padding_lhs))
#data_vector = np.ones((self.S1, self.S2))
"""STEP 2A: Get the data_vector for the 4-nbh case or intercept/center"""
#DEBUG: intercept will be called in exo regr, this is redundant
if regressor_code == "intercept":
data_vector = np.ones((self.S1, self.S2))
elif regressor_code == "center":
data_vector = data[lag,:,:]
elif regressor_code == "left":
if padding.split("_")[-1] == "rhs":
"""Insert the padding column passed to this function"""
data_vector = np.insert(data[lag,:,:-1], 0, padding_rhs, axis=1)
else:
"""Take the row averages as padding"""
data_vector = np.insert(data[lag,:,:-1], 0, padding_col, axis=1)
elif regressor_code == "right":
if padding.split("_")[-1] == "lhs":
"""Insert the padding column passed to this function"""
data_vector = np.insert(data[lag,:,1:], self.S2-1, padding_lhs, axis=1)
else:
"""Take the row averages as padding"""
data_vector = np.insert(data[lag,:,1:], self.S2-1, padding_col, axis=1)
elif regressor_code == "top":
data_vector = np.insert(data[lag,:-1,:], 0, padding_row, axis=0)
elif regressor_code == "bottom":
data_vector = np.insert(data[lag,1:,:], self.S1-1, padding_row, axis=0)
elif regressor_code == "topleft":
data_vector = np.zeros((self.S1, self.S2))
data_vector[1:, 1:] = data[lag,:-1,:-1]
if padding.split("_")[-1] == "rhs":
"""Insert the padding column passed to this function"""
data_vector[0,:] = np.append(padding_corner_rhs, padding_row[:-1])
data_vector[:,0] = np.append(padding_corner_rhs, padding_rhs[:-1])
else:
"""Take the row averages as padding"""
data_vector[0,:] = np.append(padding_corners, padding_row[:-1])
data_vector[:,0] = np.append(padding_corners, padding_col[:-1])
elif regressor_code == "topright":
data_vector = np.zeros((self.S1, self.S2))
data_vector[1:, :-1] = data[lag,:-1,1:]
if padding.split("_")[-1] == "lhs":
"""Insert the padding column passed to this function"""
data_vector[0,:] = np.append( padding_row[1:], padding_corner_lhs)
data_vector[:,-1] = np.append(padding_corner_lhs, padding_lhs[:-1])
else:
"""Take the row averages as padding"""
data_vector[0,:] = np.append(padding_row[1:], padding_corners)
data_vector[:,-1] =
|
np.append(padding_corners, padding_col[:-1])
|
numpy.append
|
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
__all__ = ["echelle", "plot_echelle", "interact_echelle"]
def echelle(freq, power, dnu, fmin, fmax, offset=0.0):
if len(freq) != len(power):
raise ValueError("x and y must have equal size.")
fmin = fmin - offset
fmax = fmax - offset
freq = freq - offset
if fmin <= 0.0:
fmin = 0.0
else:
fmin = fmin - (fmin % dnu)
# trim data
index = np.intersect1d(np.where(freq>=fmin)[0],np.where(freq<=fmax)[0])
trimx = freq[index]
trimy = freq[index]
samplinginterval = np.median(trimx[1:-1] - trimx[0:-2]) * 0.1
xp = np.arange(fmin,fmax+dnu,samplinginterval)
yp = np.interp(xp, freq, power)
n_stack = int((fmax-fmin)/dnu)
n_element = int(dnu/samplinginterval)
morerow = 2
arr =
|
np.arange(1,n_stack)
|
numpy.arange
|
"""
bunch.py is a module for computing bunch distributions for use with different tracking codes. The StandardBunch() class
defines beam phase space coordinates and permits fitting of different distributions for a traditional linear system
with given Twiss parameters. Currently, the class supports unit conventions for the Synergia tracking code, as
discussed in the Synergia 2.1 documentation: http://compacc.fnal.gov/~amundson/html/units.html
Unit Conventions:
-x,y : m
-xp,yp : unitless (px/ptotal)
-z : m (c*dt)
-zp : unitless (dpz/ptotal)
A derived class will soon be added for matching to a nonlinear integrable insert for use in IOTA and other integrable
systems using the elliptic scheme defined by: https://journals.aps.org/prab/abstract/10.1103/PhysRevSTAB.13.084002
<NAME> and <NAME>. Phys. Rev. ST Accel. Beams 13, 084002 (2010
Nonlinear waterbag and truncated Gaussian functions modified from C. Mitchell of LBNL.
Author: <NAME>
Date Created: 6/14/2018
Last Updated: 6/25/2018
"""
import numpy as np
import random
from scipy.optimize import newton
class StandardBunch(object):
"""
Generic class for generating traditional bunch distributions.
Generates a numpy array for easy output/input into other codes.
Attributes:
npart (int): the number of particles in the bunch
dist (string): the distribution type being used, defaults to Gaussian
emitx (float): RMS uncorrelated emittance in the x-px plane, defaults to 1 mm-mrad
emity (float): RMS uncorrelated emittance in the y-py plane, defaults to 1 mm-mrad
betax (float): the beta function where the bunch is being matched, defaults to 1
alphax (float): one-half the derivative of the beta function, defaults to 0
betay (float): the beta function where the bunch is being matched, defaults to 1
alphay (float): one-half the derivative of the beta function, defaults to 0
stdz (float): standard deviation in z-coordinate, defautls to 0
dpop (float): standard deviation in delta-p/p0 coordinate, defaults to 0
seed (float): seed for psuedorandom generator. Defaults to None, and is generate during initialization.
quiet (Boolean): boolean describing whether to use exact centroid injection, defaults to false.
"""
def __init__(self, npart, dist = 'Gaussian', emitx = 1e-6, emity = 1e-6, betax=1., alphax = 0.,
betay =1., alphay=0., stdz=1., dpop=0, seed = None, quiet=False):
"""
Args:
npart (int): the number of particles in the bunch
dist (string): the distribution type being used, defaults to Gaussian
emitx (float): RMS uncorrelated emittance in the x-px plane, defaults to 1 mm-mrad
emity (float): RMS uncorrelated emittance in the y-py plane, defaults to 1 mm-mrad
betax (float): the beta function where the bunch is being matched, defaults to 1
alphax (float): one-half the derivative of the beta function, defaults to 0
betay (float): the beta function where the bunch is being matched, defaults to 1
alphay (float): one-half the derivative of the beta function, defaults to 0
stdz (float): standard deviation in z-coordinate, defautls to 0
dpop (float): standard deviation in delta-p/p0 coordinate, defaults to 0
seed (float): seed for psuedorandom generator. Defaults to None, and is generate during initialization.
quiet (Boolean): boolean describing whether to use exact centroid injection, defaults to false.
"""
self.npart = npart
self.dist = dist
#set emittance parameters in each plane
self.emitx = emitx
self.emity = emity
#total emittance - this varies with distribution but is useful to set a default
#it will be adjusted for specific distributions
self.emit = emitx + emity
#set Twiss parameters as "private" attributes because setters will be redefined to avoid conflicts
self._betax = betax
self._alphax = alphax
self._betay = betay
self._alphay = alphay
#use beta/alpha values to compute initial gamma
self._gammax = (1 + alphax**2) / betax
self._gammay = (1 + alphay**2) / betay
self.stdz = stdz
self.dpop = dpop
#define seed
if seed is not None:
self.seed = seed
else:
self.seed = random.seed()
#create particles array
self.particles = np.zeros((npart,7))
#define particle IDs
self.particles[:,6] = np.arange(npart)
#define quiet injection attribute
self.quiet = quiet
#Define beta and alpha properties which automatically update gamma
@property
def betax(self):
return self._betax
@betax.setter
def betax(self, bet):
if bet < 0:
raise ValueError("Beta must be larger than 0.")
self._betax = bet
self._gammax = (1 + self._alphax**2) / self._betax
@property
def betay(self):
return self._betay
@betax.setter
def betay(self, bet):
if bet < 0:
raise ValueError("Beta must be larger than 0.")
self._betay = bet
self._gammay = (1 + self._alphay**2) / self._betay
@property
def alphax(self):
return self._alphax
@alphax.setter
def alphax(self, alph):
self._alphax = alph
self._gammax = (1 + self._alphax**2) / self._betax
@property
def alphay(self):
return self._alphay
@alphay.setter
def alphay(self, alph):
self._alphay = alph
self._gammay = (1 + self._alphay**2) / self._betay
def set_longitudinal_coordinates(self, stdz = None, dpop = None):
"""Define the arrays describing the longitudinal coordinates z, dpop"""
if stdz is not None:
self.stdz = stdz
if dpop is not None:
self.dpop = dpop
self.particles[:,4] = np.random.randn(self.npart)*self.stdz #set z coordinate
self.particles[:,5] = np.random.randn(self.npart)*self.dpop #set dpop coordinate
def set_transverse_coordinates(self, emitx = None, emity = None, betax = None, alphax = None, betay = None, alphay = None):
"""Define the arrays describing the longitudinal coordinates z, dpop"""
if betax is not None:
self.betax = betax
if alphax is not None:
self.alphax = alphax
if betay is not None:
self.betax = betay
if alphay is not None:
self.alphay = alphay
if emitx is not None:
self.emitx = emitx
if emity is not None:
self.emity = emity
if self.dist == 'Gaussian':
self.distribute_Gaussian()
elif self.dist == 'KV':
self.distribute_KV()
def distribute_Gaussian(self):
""" Generates an uncorrelated Gaussian distribution in 4D phase space using known bunch attributes"""
sigma_x = np.sqrt(self.emitx*self._betax)
sigma_xp = np.sqrt(self.emitx*self._gammax)
sigma_y = np.sqrt(self.emity*self._betay)
sigma_yp = np.sqrt(self.emity*self._gammay)
self.particles[:,0] = np.random.randn(self.npart)*sigma_x #set x-coordinates
self.particles[:,1] = np.random.randn(self.npart)*sigma_xp #set xp-coordinates
self.particles[:,2] = np.random.randn(self.npart)*sigma_y #set y-coordinates
self.particles[:,3] = np.random.randn(self.npart)*sigma_yp #set yp-coordinates
def distribute_KV(self):
"""
Generate a KV distribution in 4D phase space using known bunch attributes. Note that the KV distribution
uniqely characterizes the bunch given a single emittance and appropriate normalizing (Twiss) parameters.
"""
assert (self.emitx == self.emity), "For a KV distribution, the planar emittances must be equal"
#total emittance of the K-V distribution is 4 times the planar emittance
emit = 4.*self.emitx
self.emit = emit
# Generate some bounds on the transverse size to reduce waste in generating the bunch
# Use the lemming method to find the maximum y
y0 = np.sqrt(self.emit)
yMax = newton(self.whatsleft, y0)
xMax = yMax
# Generate particles by creating trials and finding particles with potential less than emittance,
# then assign the rest to momentum
ptclsMade = 0
phaseSpaceList = []
while ptclsMade < self.npart:
#Note that the particle coordinates here are distributed in normal coordinates
xTrial = 2.*(0.5 - random.random())*xMax
yTrial = 2.*(0.5 - random.random())*yMax
trialValue = self.compute_potential(xTrial, yTrial)
if trialValue < self.emit:
pMag = np.sqrt(2.*(self.emit - trialValue))
pDir = 2.*np.pi * random.random()
pxHat = pMag * np.cos(pDir)
pyHat = pMag * np.sin(pDir)
xReal = xTrial * np.sqrt(self.betax)
yReal = yTrial * np.sqrt(self.betay)
#We want to provide the user with standard (non-normal) coordinates
pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)
pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)
ptclCoords = np.array([xReal, pxReal, yReal, pyReal])
phaseSpaceList.append(ptclCoords)
ptclsMade += 1
#Add 3 more particles if creating a quiet start
if self.quiet:
self.exact_centroids(ptclCoords, phaseSpaceList)
ptclsMade += 3
self.particles[:,:4] = np.asarray(phaseSpaceList)
def print_Twiss(self):
"""Print the Twiss parameters for the lattice being used to compute the bunch coordinates"""
print("Twiss parameters in use:")
print("betax : {}".format(self._betax))
print("betay : {}".format(self._betay))
print("alphax : {}".format(self._alphax))
print("alphay : {}".format(self._alphay))
print("gammax : {}".format(self._gammax))
print("gammay : {}".format(self._gammay))
def compute_Hamiltonian(self, xHat, pxHat, yHat, pyHat):
"""Compute the Hamiltonian (C-S invariant) for the potential"""
hamiltonian = 0.5*(pxHat**2 + pyHat**2) + 0.5 *(xHat**2 + yHat**2)
return hamiltonian
def compute_potential(self, xHat, yHat):
"""Compute the general potential"""
potential = 0.5*(xHat**2 + yHat**2)
return potential
def whatsleft(self, yHat):
"""Return the difference btween the emittance and potential"""
return self.emit - self.compute_potential(0, yHat)
def exact_centroids(self, ptclCoords, phaseSpaceList):
"""Sets the centroid of the distribution in phase space to be zero - providing a quieter injection."""
translation1 = np.array([-1,1,-1,1])
translation2 = np.array([-1,-1,-1,-1])
translation3 = np.array([1,-1,1,-1])
ptcl1 = ptclCoords * translation1
ptcl2 = ptclCoords * translation2
ptcl3 = ptclCoords * translation3
phaseSpaceList.append(ptcl1)
phaseSpaceList.append(ptcl2)
phaseSpaceList.append(ptcl3)
class NonlinearBunch(StandardBunch):
"""
Derived class for generating matched distributions to a Danilov-Nagaitsev style nonlinear insert.
Attributes:
npart (int): the number of particles in the bunch
dist (string): distribution type ['KV, 'waterbag', 'Gaussian'], defaults to KV
emitx (float): RMS uncorrelated emittance in the x-px plane, defaults to 1 mm-mrad
emity (float): RMS uncorrelated emittance in the y-py plane, defaults to 1 mm-mrad
betax (float): the beta function where the bunch is being matched, defaults to 1
alphax (float): one-half the derivative of the beta function, defaults to 0
betay (float): the beta function where the bunch is being matched, defaults to 1
alphay (float): one-half the derivative of the beta function, defaults to 0
stdz (float): standard deviation in z-coordinate, defautls to 0
dpop (float): standard deviation in delta-p/p0 coordinate, defaults to 0
seed (float): seed for psuedorandom generator. Defaults to None, and is generate during initialization.
quiet (Boolean): boolean describing whether to use exact centroid injection, defaults to false.
t (float): nonlinear strength parameter for the insert. Defaults to 0.1 (unitless).
c (float): the nonlinear aperture parameter (m^-1/2), defining poles in the x-axis. Defaults to 0.01.
cutoff (float): cutoff parameter for the nonlinear Gaussian distributoin, defaults to 4.
"""
def __init__(self, npart, dist = 'KV', emitx = 1e-6, emity = 1e-6, betax=1., alphax = 0.,
betay =1., alphay=0., stdz=1., dpop=0, seed = None, queit=False, t = 0.1, c = 0.01, cutoff = 4):
"""
Args:
npart (int): the number of particles in the bunch
dist (string): distribution type ['KV, 'waterbag', 'Gaussian'], defaults to KV
emitx (float): RMS uncorrelated emittance in the x-px plane, defaults to 1 mm-mrad
emity (float): RMS uncorrelated emittance in the y-py plane, defaults to 1 mm-mrad
betax (float): the beta function where the bunch is being matched, defaults to 1
alphax (float): one-half the derivative of the beta function, defaults to 0
betay (float): the beta function where the bunch is being matched, defaults to 1
alphay (float): one-half the derivative of the beta function, defaults to 0
stdz (float): standard deviation in z-coordinate, defautls to 0
dpop (float): standard deviation in delta-p/p0 coordinate, defaults to 0
seed (float): seed for psuedorandom generator. Defaults to None, and is generate during initialization.
quiet (Boolean): boolean describing whether to use exact centroid injection, defaults to false.
t (float): nonlinear strength parameter for the insert. Defaults to 0.1 (unitless).
c (float): the nonlinear aperture parameter (m^-1/2), defining poles in the x-axis. Defaults to 0.01.
cutoff (float): cutoff parameter for the nonlinear Gaussian distributoin, defaults to 4.
"""
super(NonlinearBunch,self).__init__(npart, dist, emitx, emity, betax, alphax, betay, alphay, stdz, dpop, seed)
self._t = t
self._c = c
self._cutoff = cutoff
#Define c property which maintains a positive definite value
@property
def c(self):
return self._c
@c.setter
def c(self, cval):
if cval < 0:
raise ValueError("Aperture parameter c must be larger than 0.")
self._c = cval
#Define t property which maintains a positive definite value
@property
def t(self):
return self._t
@t.setter
def t(self, tval):
if tval < 0:
raise ValueError("Nonlinear strength parameter t must be larger than 0.")
self._t = tval
#Define cutoff property which maintains a positive definite value
@property
def cutoff(self):
return self._cutoff
@cutoff.setter
def cutoff(self, cutoff):
if cval < 0:
raise ValueError("Cutoff parameter for distributions must be larger than 0.")
self._cutoff = cutoff
#Define beta and alpha properties which automatically update gamma
@property
def betax(self):
return self._betax
@betax.setter
def betax(self, bet):
if bet < 0:
raise ValueError("Beta must be larger than 0.")
self._betax = bet
self._gammax = (1 + self._alphax**2) / self._betax
#betax should be the same as betay
self._betay = bet
self._gammay = (1 + self._alphax**2) / self._betax
@property
def betay(self):
return self._betay
@betax.setter
def betay(self, bet):
if bet < 0:
raise ValueError("Beta must be larger than 0.")
self._betay = bet
self._gammay = (1 + self._alphay**2) / self._betay
#betax and betay should be the same
self._betax = bet
self._gammax = (1 + self._alphax**2) / self._betax
@property
def alphax(self):
return self._alphax
@alphax.setter
def alphax(self, alph):
self._alphax = alph
self._gammax = (1 + self._alphax**2) / self._betax
@property
def alphay(self):
return self._alphay
@alphay.setter
def alphay(self, alph):
self._alphay = alph
self._gammay = (1 + self._alphay**2) / self._betay
def set_transverse_coordinates(self, emitx = None, emity = None, betax = None, alphax = None, cutoff = None):
"""Define the arrays describing the longitudinal coordinates z, dpop"""
#The nonlinear insert requires equal beta functions, therefore we only permit setting betax
if betax is not None:
self.betax = betax
if alphax is not None:
self.alphax = alphax
if emitx is not None:
self.emitx = emitx
if emity is not None:
self.emity = emity
if cutoff is not None:
self.cutoff = cutoff
if self.dist == 'KV':
self.distribute_KV()
elif self.dist == 'waterbag':
self.distribute_waterbag()
elif self.dist == 'Gaussian':
self.distribute_Gaussian()
def distribute_KV(self):
"""
Generates a generalized KV distribution in 4D phase space using known bunch attributes.
Note that the KV distribution uniqely characterizes the bunch given a single emittance
and appropriate normalizing (Twiss) parameters. In the nonlinear case, there is a different
relationship between the total emittance and the planar emittance.
"""
assert (self.emitx == self.emity), "For a KV distribution, the planar emittances must be equal"
#total emittance of the K-V distribution is equal to the planar emittance
#this differs from the linear K-V distribution
emit = self.emitx
self.emit = emit
# Generate some bounds on the transverse size to reduce waste in generating the bunch
# Use the lemming method to find the maximum y
y0 = np.sqrt(self.emit)
yMax = newton(self.whatsleft, y0)
#bounding the horizontal coordinate is difficult, but it should not exceed the pole
xMax = self.c
# Generate particles by creating trials and finding particles with potential less than emittance,
# then assign the rest to momentum
ptclsMade = 0
phaseSpaceList = []
while ptclsMade < self.npart:
#Note that the particle coordinates here are distributed in normal coordinates
xTrial = 2.*(0.5 - random.random())*xMax
yTrial = 2.*(0.5 - random.random())*yMax
trialValue = self.compute_potential(xTrial, yTrial)
if trialValue < self.emit:
pMag = np.sqrt(2.*(self.emit - trialValue))
pDir = 2.*np.pi * random.random()
pxHat = pMag * np.cos(pDir)
pyHat = pMag * np.sin(pDir)
xReal = xTrial * np.sqrt(self.betax)
yReal = yTrial * np.sqrt(self.betay)
#We want to provide the user with standard (non-normal) coordinates
pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)
pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)
ptclCoords = np.array([xReal, pxReal, yReal, pyReal])
phaseSpaceList.append(ptclCoords)
ptclsMade += 1
#Add 3 more particles if creating a quiet start
if self.quiet:
self.exact_centroids(ptclCoords, phaseSpaceList)
ptclsMade += 3
self.particles[:,:4] = np.asarray(phaseSpaceList)
def distribute_waterbag(self):
"""
Generates a Waterbag distribution tailored to the elliptic potential. This version uses
a modified Jacobian factor dervied by <NAME> to produce a smoothly varying distribution
at the origin. A traditional Waterbag scheme produces an unphysical peak at the origin.
The method of generating particle coordinates remains simular to that used for the K-V distribution.
"""
# Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum
ptclsMade = 0
phaseSpaceList = []
while ptclsMade < self.npart:
ranU = 0.0
while ranU <= 0:
ranU = random.random()
# Generate some bounds on the transverse size to reduce waste in generating the bunch
# Use the lemming method to find the maximum y
trialH = np.sqrt(ranU)
newH = self.emit*trialH
y0 = np.sqrt(newH)
#self.emittance = newH
yMax = newton(self.whatsleft, y0)
#bounding the horizontal coordinate is difficult, but it should not exceed the pole
xMax = self.c
#xMax = yMax
trialValue = 1e10
while trialValue >= newH:
xTrial = 2.*(0.5 - random.random())*xMax
yTrial = 2.*(0.5 - random.random())*yMax
trialValue = self.compute_potential(xTrial, yTrial)
initialValue = trialValue
if initialValue < newH:
pMag = np.sqrt(2*(newH - initialValue))
pDir = 2*np.pi* random.random()
pxHat = pMag * np.cos(pDir)
pyHat = pMag * np.sin(pDir)
xReal = xTrial * np.sqrt(self.betax)
yReal = yTrial * np.sqrt(self.betay)
pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)
pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)
ptclCoords = np.array([xReal, pxReal, yReal, pyReal])
phaseSpaceList.append(ptclCoords)
ptclsMade += 1
#Add 3 more particles if creating a quiet start
if self.quiet:
self.exact_centroids(ptclCoords, phaseSpaceList)
ptclsMade += 3
else:
print("Initial value generated exceeds limiting H. Sampling new value.")
self.particles[:,:4] = np.asarray(phaseSpaceList)
def distribute_Gaussian(self):
"""
Generates a truncated Gaussian distribution in H-space for the elliptic potential according to:
- P(H) = exp(-H/eps) for (0 < H/eps < L],
where H is the nonlinear Hamiltonian, eps is the emittance, and L is the cutoff parameter.
The method of generating particle coordinates remains simular to that used for the K-V distribution.
"""
# Copy the emittance value temporarily. It will be needed to reset the bunch attribute after fitting.
bunch_emittance = self.emit
# Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum
ptclsMade = 0
phaseSpaceList = []
while ptclsMade < self.npart:
trialH = 1e10 #1.0e10
while trialH > self.cutoff: #Test against cutoff value
ranU1 = 0.0
ranU2 = 0.0
while ranU1*ranU2 <= 0:
ranU1 = random.random()
ranU2 = random.random()
trialH = -1.0*np.log(ranU1*ranU2) #Generate an Erlang distribution in h
# Generate some bounds on the transverse size to reduce waste in generating the bunch
# Use the lemming method to find the maximum y
newH = bunch_emittance*trialH #must modify the original bunch emtitance here
y0 = np.sqrt(newH)
self.emit = newH #temporarily reset emittance for computing ymax on a particle by particle basis
yMax = newton(self.whatsleft, y0)
#bounding the horizontal coordinate is difficult, but it should not exceed the pole
xMax = self.c
#xMax = yMax
trialValue = 1e10
while trialValue >= newH:
xTrial = 2.*(0.5 - random.random())*xMax
yTrial = 2.*(0.5 - random.random())*yMax
trialValue = self.compute_potential(xTrial, yTrial)
initialValue = trialValue
if initialValue < newH:
pMag = np.sqrt(2*(newH - initialValue))
pDir = 2*np.pi* random.random()
pxHat = pMag * np.cos(pDir)
pyHat = pMag * np.sin(pDir)
xReal = xTrial * np.sqrt(self.betax)
yReal = yTrial * np.sqrt(self.betay)
pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)
pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)
ptclCoords = np.array([xReal, pxReal, yReal, pyReal])
phaseSpaceList.append(ptclCoords)
ptclsMade += 1
#Add 3 more particles if creating a quiet start
if self.quiet:
self.exact_centroids(ptclCoords, phaseSpaceList)
ptclsMade += 3
else:
print("Initial value generated exceeds limiting H. Sampling new value.")
#Completed distribution, so reset emittance
self.emit = bunch_emittance
self.particles[:,:4] = np.asarray(phaseSpaceList)
def compute_Hamiltonian(self, xHat, pxHat, yHat, pyHat):
"""Compute the Hamiltonian (1st invariant) for the integrable elliptic potential"""
quadratic = 0.5 * (pxHat**2 + pyHat**2)
hamiltonian = quadratic + self.compute_potential(xHat, yHat)
return hamiltonian
def compute_potential(self, xHat, yHat):
"""Compute the general potential for elliptic element with strength t"""
quadratic = 0.5 * (xHat**2 + yHat**2)
#compute default prefactors
elliptic = 0.
kfac = 1.
#only recompute if t > 0
if self._t != 0.:
xN = xHat / self._c
yN = yHat / self._c
# Elliptic coordinates
u = ( np.sqrt((xN + 1.)**2 + yN**2) +
np.sqrt((xN - 1.)**2 + yN**2) )/2.
v = ( np.sqrt((xN + 1.)**2 + yN**2) -
np.sqrt((xN - 1.)**2 + yN**2) )/2.
f2u = u * np.sqrt(u**2 - 1.) *
|
np.arccosh(u)
|
numpy.arccosh
|
#!/usr/bin/env python
#
# statesp_test.py - test state space class
# RMM, 30 Mar 2011 (based on TestStateSp from v0.4a)
import unittest
import numpy as np
from numpy.linalg import solve
from scipy.linalg import eigvals, block_diag
from control import matlab
from control.statesp import StateSpace, _convertToStateSpace
from control.xferfcn import TransferFunction
from control.lti import evalfr
from control.exception import slycot_check
class TestStateSpace(unittest.TestCase):
"""Tests for the StateSpace class."""
def setUp(self):
"""Set up a MIMO system to test operations on."""
A = [[-3., 4., 2.], [-1., -3., 0.], [2., 5., 3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.]]
C = [[4., 2., -3.], [1., 4., 3.]]
D = [[-2., 4.], [0., 1.]]
a = [[4., 1.], [2., -3]]
b = [[5., 2.], [-3., -3.]]
c = [[2., -4], [0., 1.]]
d = [[3., 2.], [1., -1.]]
self.sys1 = StateSpace(A, B, C, D)
self.sys2 = StateSpace(a, b, c, d)
def testPole(self):
"""Evaluate the poles of a MIMO system."""
p = np.sort(self.sys1.pole())
true_p = np.sort([3.34747678408874,
-3.17373839204437 + 1.47492908003839j,
-3.17373839204437 - 1.47492908003839j])
np.testing.assert_array_almost_equal(p, true_p)
def testEmptyZero(self):
"""Test to make sure zero() works with no zeros in system"""
sys = _convertToStateSpace(TransferFunction([1], [1,2,1]))
np.testing.assert_array_equal(sys.zero(), np.array([]))
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMIMOZero_nonsquare(self):
"""Evaluate the zeros of a MIMO system."""
z = np.sort(self.sys1.zero())
true_z = np.sort([44.41465, -0.490252, -5.924398])
np.testing.assert_array_almost_equal(z, true_z)
A = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0],
[0, 0, 0,-4, 0, 0],
[0, 0, 0, 0,-1, 0],
[0, 0, 0, 0, 0, 3]])
B = np.array([[0,-1],
[-1,0],
[1,-1],
[0, 0],
[0, 1],
[-1,-1]])
C = np.array([[1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1]])
D = np.zeros((3,2))
sys = StateSpace(A, B, C, D)
z = np.sort(sys.zero())
true_z = np.sort([2., -1.])
np.testing.assert_array_almost_equal(z, true_z)
def testAdd(self):
"""Add two MIMO systems."""
A = [[-3., 4., 2., 0., 0.], [-1., -3., 0., 0., 0.],
[2., 5., 3., 0., 0.], [0., 0., 0., 4., 1.], [0., 0., 0., 2., -3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.], [5., 2.], [-3., -3.]]
C = [[4., 2., -3., 2., -4.], [1., 4., 3., 0., 1.]]
D = [[1., 6.], [1., 0.]]
sys = self.sys1 + self.sys2
np.testing.assert_array_almost_equal(sys.A, A)
np.testing.assert_array_almost_equal(sys.B, B)
np.testing.assert_array_almost_equal(sys.C, C)
np.testing.assert_array_almost_equal(sys.D, D)
def testSub(self):
"""Subtract two MIMO systems."""
A = [[-3., 4., 2., 0., 0.], [-1., -3., 0., 0., 0.],
[2., 5., 3., 0., 0.], [0., 0., 0., 4., 1.], [0., 0., 0., 2., -3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.], [5., 2.], [-3., -3.]]
C = [[4., 2., -3., -2., 4.], [1., 4., 3., 0., -1.]]
D = [[-5., 2.], [-1., 2.]]
sys = self.sys1 - self.sys2
np.testing.assert_array_almost_equal(sys.A, A)
np.testing.assert_array_almost_equal(sys.B, B)
np.testing.assert_array_almost_equal(sys.C, C)
np.testing.assert_array_almost_equal(sys.D, D)
def testMul(self):
"""Multiply two MIMO systems."""
A = [[4., 1., 0., 0., 0.], [2., -3., 0., 0., 0.], [2., 0., -3., 4., 2.],
[-6., 9., -1., -3., 0.], [-4., 9., 2., 5., 3.]]
B = [[5., 2.], [-3., -3.], [7., -2.], [-12., -3.], [-5., -5.]]
C = [[-4., 12., 4., 2., -3.], [0., 1., 1., 4., 3.]]
D = [[-2., -8.], [1., -1.]]
sys = self.sys1 * self.sys2
np.testing.assert_array_almost_equal(sys.A, A)
np.testing.assert_array_almost_equal(sys.B, B)
np.testing.assert_array_almost_equal(sys.C, C)
np.testing.assert_array_almost_equal(sys.D, D)
def testEvalFr(self):
"""Evaluate the frequency response at one frequency."""
A = [[-2, 0.5], [0.5, -0.3]]
B = [[0.3, -1.3], [0.1, 0.]]
C = [[0., 0.1], [-0.3, -0.2]]
D = [[0., -0.8], [-0.3, 0.]]
sys = StateSpace(A, B, C, D)
resp = [[4.37636761487965e-05 - 0.0152297592997812j,
-0.792603938730853 + 0.0261706783369803j],
[-0.331544857768052 + 0.0576105032822757j,
0.128919037199125 - 0.143824945295405j]]
# Correct versions of the call
np.testing.assert_almost_equal(evalfr(sys, 1j), resp)
np.testing.assert_almost_equal(sys._evalfr(1.), resp)
# Deprecated version of the call (should generate warning)
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sys.evalfr(1.)
assert len(w) == 1
assert issubclass(w[-1].category, PendingDeprecationWarning)
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testFreqResp(self):
"""Evaluate the frequency response at multiple frequencies."""
A = [[-2, 0.5], [0.5, -0.3]]
B = [[0.3, -1.3], [0.1, 0.]]
C = [[0., 0.1], [-0.3, -0.2]]
D = [[0., -0.8], [-0.3, 0.]]
sys = StateSpace(A, B, C, D)
truemag = [[[0.0852992637230322, 0.00103596611395218],
[0.935374692849736, 0.799380720864549]],
[[0.55656854563842, 0.301542699860857],
[0.609178071542849, 0.0382108097985257]]]
truephase = [[[-0.566195599644593, -1.68063565332582],
[3.0465958317514, 3.14141384339534]],
[[2.90457947657161, 3.10601268291914],
[-0.438157380501337, -1.40720969147217]]]
trueomega = [0.1, 10.]
mag, phase, omega = sys.freqresp(trueomega)
np.testing.assert_almost_equal(mag, truemag)
np.testing.assert_almost_equal(phase, truephase)
np.testing.assert_equal(omega, trueomega)
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMinreal(self):
"""Test a minreal model reduction"""
#A = [-2, 0.5, 0; 0.5, -0.3, 0; 0, 0, -0.1]
A = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]
#B = [0.3, -1.3; 0.1, 0; 1, 0]
B = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]
#C = [0, 0.1, 0; -0.3, -0.2, 0]
C = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]
#D = [0 -0.8; -0.3 0]
D = [[0., -0.8], [-0.3, 0.]]
# sys = ss(A, B, C, D)
sys = StateSpace(A, B, C, D)
sysr = sys.minreal()
self.assertEqual(sysr.states, 2)
self.assertEqual(sysr.inputs, sys.inputs)
self.assertEqual(sysr.outputs, sys.outputs)
np.testing.assert_array_almost_equal(
eigvals(sysr.A), [-2.136154, -0.1638459])
def testAppendSS(self):
"""Test appending two state-space systems"""
A1 = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]
B1 = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]
C1 = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]
D1 = [[0., -0.8], [-0.3, 0.]]
A2 = [[-1.]]
B2 = [[1.2]]
C2 = [[0.5]]
D2 = [[0.4]]
A3 = [[-2, 0.5, 0, 0], [0.5, -0.3, 0, 0], [0, 0, -0.1, 0],
[0, 0, 0., -1.]]
B3 = [[0.3, -1.3, 0], [0.1, 0., 0], [1.0, 0.0, 0], [0., 0, 1.2]]
C3 = [[0., 0.1, 0.0, 0.0], [-0.3, -0.2, 0.0, 0.0], [0., 0., 0., 0.5]]
D3 = [[0., -0.8, 0.], [-0.3, 0., 0.], [0., 0., 0.4]]
sys1 = StateSpace(A1, B1, C1, D1)
sys2 = StateSpace(A2, B2, C2, D2)
sys3 = StateSpace(A3, B3, C3, D3)
sys3c = sys1.append(sys2)
np.testing.assert_array_almost_equal(sys3.A, sys3c.A)
np.testing.assert_array_almost_equal(sys3.B, sys3c.B)
|
np.testing.assert_array_almost_equal(sys3.C, sys3c.C)
|
numpy.testing.assert_array_almost_equal
|
# should work even without -*-
# -*- coding: utf-8 -*-
#!/bin/bash
# ONNX_management.py
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (C) 2019-2020 University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import onnx
from onnx import numpy_helper
from onnx import helper, shape_inference
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import sys
import pandas as pd
from collections import OrderedDict
import logging
import PULP_node as pulp
from ONNX_management import ONNX_management
class NEMO_onnx(ONNX_management):
# Used to manage the ONNX files. By now, supported Convolutions (PW and DW), Pooling, Fully Connected and Relu.
def __init__(self, onnx, platform):
layers_accepted = ['Conv', 'Pad', 'Mul', 'Add', 'Div', 'Constant', 'AveragePool', 'GlobalAveragePool', 'MaxPool', 'Cast', 'Clip', 'Floor', 'Flatten', 'Gemm', 'MatMul', 'Shape', 'Gather', 'Unsqueeze', 'Concat', 'Reshape', 'Sigmoid', 'LogSoftmax']
layers_neglected = ['Cast', 'Flatten', 'Shape', 'Gather', 'Unsqueeze', 'Concat', 'Reshape', 'Sigmoid', 'LogSoftmax', 'Clip']
layers_to_node = ['AveragePool', 'MaxPool', 'Conv', 'Gemm', 'MatMul', 'GlobalAveragePool']
backend = ['ConvBNRelu', 'ConvRelu', 'ConvDWBNRelu', 'ConvDWRelu', 'AveragePool', 'GlobalAveragePool', 'MaxPool', 'MatMulBNRelu', 'GemmRelu', 'Gemm', 'MatMul', 'Add','QAddRelu',\
'PadConvBNRelu', 'PadConvDWBNRelu', 'PadAveragePool', 'PadGlobalAveragePool', 'PadMaxPool', 'PadMatMulBNRelu', 'PadGemmRelu', 'PadGemm', 'PadQAddRelu',\
'PadPadConvBNRelu', 'PadPadConvDWBNRelu', 'PadPadAveragePool', 'PadPadGlobalAveragePool', 'PadPadMaxPool', 'PadPadMatMulBNRelu', 'PadPadGemmRelu', 'PadPadGemm', 'PadPadQAddRelu']
rules = {}
rules['Relu'] = 'To define'#'Mul-Div-Floor-Clip'
rules['BNRelu_1'] = 'Mul-Add-Mul-Div-Floor-Clip'
rules['BNRelu_2'] = 'Mul-Add-Cast-Mul-Div-Cast-Clip'
rules['BNRelu_3'] = 'Mul-Add-Cast-Mul-Div-Cast-Cast-Cast-Clip'
rules['QAdd'] = 'Mul-Add-Mul-Div-Floor'
rules['Pad'] = 'Pad'
ONNX_management.__init__(self, onnx, platform, backend, rules, layers_accepted, layers_neglected, layers_to_node)
def apply_rule(self, node, rule):
pulp_node = pulp.node_element()
out = node.output[0]
nodes_to_search = rule.split('-')
blocks_to_search = len(nodes_to_search)
i = 0
for key, value in self.rules.items():
if value == rule:
break
pulp_node.add_parameter('name', key.split('_')[0])
if rule in [self.rules['Relu'], self.rules['BNRelu_1'], self.rules['BNRelu_2'], self.rules['BNRelu_3']]:
for node_iterating in (self.model.graph.node):
if (out == node_iterating.output[0] or i > 0) and node_iterating.op_type == nodes_to_search[i] and i < blocks_to_search:
if i == 0:
inp = []
for input_i in node_iterating.input:
if 'weight' not in input_i and 'bn' not in input_i and 'BN' not in input_i and 'kappa' not in input_i and 'lambda' not in input_i:
if input_i not in [node.output[0] for node in self.model.graph.node if node.op_type in 'Constant']:
inp.append(input_i)
pulp_node.add_parameter('input_index', inp[0])
elif i == (blocks_to_search-1):
pulp_node.add_parameter('output_index',node_iterating.output[0])
if node_iterating.op_type in ['Mul', 'Add', 'Div']:
const = self.search_constant(node_iterating.input[1], self.model)
if isinstance(const, str):
const = self.search_constant(node_iterating.input[0], self.model)
assert (not(isinstance(const, str))), f"Error in searching BNRelu parameters"
if node_iterating.op_type == 'Mul' and rule in [self.rules['BNRelu_1'], self.rules['BNRelu_2'], self.rules['BNRelu_3']] and i == 0:
k = const
elif node_iterating.op_type == 'Mul' and rule in [self.rules['BNRelu_1'], self.rules['BNRelu_2'], self.rules['BNRelu_3']]:
outmul = const
pulp_node.add_parameter('k', k*outmul)
pulp_node.add_parameter('outmul', 1)
pulp_node.add_parameter('lambda', l*outmul)
elif node_iterating.op_type == 'Mul' and rule == self.rules['Relu']:
pulp_node.add_parameter('outmul', const)
elif node_iterating.op_type == 'Add':
l = const
elif node_iterating.op_type == 'Div':
try:
const[0]
pulp_node.add_parameter('outshift',round(np.log2(const[0])))
except:
pulp_node.add_parameter('outshift',round(np.log2(const)))
elif node_iterating.op_type in ['Clip']:
attributes_names = [attribute.name for attribute in node_iterating.attribute]
for attribute in node_iterating.attribute:
if attribute.name == 'out_bits':
pulp_node.add_parameter('out_activation_bits', attribute.i)
if node_iterating.op_type in '.'.join([*self.rules.values()]):
i+=1
if i >= blocks_to_search:
break
elif rule == self.rules['QAdd']:
first_node_found = 0
for node_iterating in (self.model.graph.node):
if out == node_iterating.output[0] and node_iterating.op_type == nodes_to_search[i] and i < blocks_to_search:
inp = []
for input_i in node_iterating.input:
if 'weight' not in input_i and 'bn' not in input_i and 'BN' not in input_i and 'kappa' not in input_i and 'lambda' not in input_i:
if input_i not in [node.output[0] for node in self.model.graph.node if node.op_type in 'Constant']:
inp.append(input_i)
input_index = inp[0]
i+=1
first_node_found = 1
const = self.search_constant(node_iterating.input[1], self.model)
if isinstance(const, str):
const = self.search_constant(node_iterating.input[0], self.model)
assert (not(isinstance(const, str))), f"Error in searching Inmul1"
try:
const = const[0]
except:
pass
inmul1 = const
elif node_iterating.op_type == 'Add' and i < blocks_to_search and first_node_found == 1:
pulp_node = self.create_node(pulp.node_element(), 0, node_iterating, self.model)
pulp_node.add_parameter('input_index', inp[0])
pulp_node.add_parameter('inmul1', const)
i+=2
elif node_iterating.op_type == nodes_to_search[i] and i < blocks_to_search and first_node_found == 1:
i+=1
if node_iterating.op_type == 'Div':
const = self.search_constant(node_iterating.input[1], self.model)
if isinstance(const, str):
const = self.search_constant(node_iterating.input[0], self.model)
assert (not(isinstance(const, str))), f"Error in searching BNRelu parameters"
try:
const[0]
pulp_node.add_parameter('outshift',round(np.log2(const[0])))
except:
pulp_node.add_parameter('outshift',round(
|
np.log2(const)
|
numpy.log2
|
import numpy as np
from scipy.stats import gaussian_kde
import matplotlib
import matplotlib.pyplot as plt
def violin_plot(ax, data, pos, bp=False, fc='b'):
'''
create violin plots on an axis
'''
dist = max(pos)-min(pos)
w = min(0.15*max(dist,1.0),0.5)
i = -1
for d,p in zip(data,pos):
i += 1
d_ = d[~np.isnan(d)]
if d_.shape[0] < 2:
continue
try:
k = gaussian_kde(d_) #calculates the kernel density
except:
continue
m = k.dataset.min() #lower bound of violin
M = k.dataset.max() #upper bound of violin
x = np.arange(m,M,(M-m)/100.) # support for violin
v = k.evaluate(x) #violin profile (density curve)
v = v/v.max()*w #scaling the violin to the available space
_fc = fc[i] if isinstance(fc, list) else fc
ax.fill_betweenx(x,p,v+p,facecolor=_fc,alpha=0.3)
ax.fill_betweenx(x,p,-v+p,facecolor=_fc,alpha=0.3)
if bp:
ax.boxplot(data,positions = pos)#,notch=1,positions=pos,vert=1)
def box_plot(ax, data, pos, fc='b', notch=False):
'''
create box plot using standardized framework
'''
dist = max(pos)-min(pos)
w = min(0.15*max(dist,1.0),0.75)
i = -1
for d,p in zip(data,pos):
i += 1
d_ = d[~np.isnan(d)]
if d_.shape[0] < 2:
continue
_fc = fc[i] if isinstance(fc, list) else fc
medianprops=dict(linestyle='-', linewidth=3, color='k')
capprops=dict(linestyle='-', color=_fc)
boxprops=dict(linestyle='-', color=_fc)
whiskerprops=dict(linestyle='--', color=_fc)
ax.boxplot(d_, positions=[p], widths=[w], notch=notch, medianprops=medianprops, boxprops=capprops, capprops=capprops, whiskerprops=whiskerprops)
def qq_plot(pvals, ax=None, title=None, frm='png', dpi=150, fname=None, logscale=False):
'''
create a quantile quantile plot for the given p-values
'''
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if title is not None:
ax.set_title(title)
ax.set_ylabel("Oberserved")
ax.set_xlabel("Expected")
exp = np.linspace(0, 1, num=pvals.shape[0])
if logscale and pvals.min() == 0:
idx0 = (pvals == 0)
pvals[idx0] = pvals[~idx0].min() / 2
if logscale:
ax.plot(-np.log10(exp), -np.log10(np.sort(pvals)), 'bo')
else:
ax.plot(exp, np.sort(pvals), 'bo')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ml = max(xlim[1], ylim[1])
ax.plot([0, ml], [0, ml], 'r--')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if fname is not None:
plt.savefig(fname, dpi=dpi, format=frm)
def dist_overview(data, ax=None, fname=None, format='pdf', log=False,
axis=0, sort=False):
"""
Create a distribution overview plot
data: measurements x samples
ax: axis object (None)
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if log:
llq = np.percentile(
|
np.log10(data + 1)
|
numpy.log10
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible(object):
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, unicode)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhm')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhm')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p =
|
np.partition(d, (0, 3))
|
numpy.partition
|
import numpy as np
import pytest
import scipy
import tensorflow as tf
import gpflow
import gpflow.inducing_variables.mo_inducing_variables as mf
import gpflow.kernels.mo_kernels as mk
from gpflow.conditionals import sample_conditional
from gpflow.conditionals.util import fully_correlated_conditional, fully_correlated_conditional_repeat, sample_mvn
from gpflow.inducing_variables import InducingPoints
from gpflow.kernels import SquaredExponential
from gpflow.likelihoods import Gaussian
from gpflow.models import SVGP
from gpflow.config import default_jitter, default_float
from gpflow.utilities import set_trainable
float_type = default_float()
rng = np.random.RandomState(99201)
# ------------------------------------------
# Helpers
# ------------------------------------------
def predict(model, Xnew, full_cov, full_output_cov):
m, v = model.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
return [m, v]
def predict_all(models, Xnew, full_cov, full_output_cov):
"""
Returns the mean and variance of f(Xnew) for each model in `models`.
"""
ms, vs = [], []
for model in models:
m, v = predict(model, Xnew, full_cov, full_output_cov)
ms.append(m)
vs.append(v)
return ms, vs
def assert_all_array_elements_almost_equal(arr, decimal):
"""
Check if consecutive elements of `arr` are almost equal.
"""
for i in range(len(arr) - 1):
np.testing.assert_allclose(arr[i], arr[i + 1], atol=1e-5)
def check_equality_predictions(data, models, decimal=3):
"""
Executes a couple of checks to compare the equality of predictions
of different models. The models should be configured with the same
training data (X, Y). The following checks are done:
- check if log_likelihood is (almost) equal for all models
- check if predicted mean is (almost) equal
- check if predicted variance is (almost) equal.
All possible variances over the inputs and outputs are calculated
and equality is checked.
- check if variances within model are consistent. Parts of the covariance
matrices should overlap, and this is tested.
"""
log_likelihoods = [m.log_likelihood(data) for m in models]
# Check equality of log likelihood
assert_all_array_elements_almost_equal(log_likelihoods, decimal=5)
# Predict: full_cov = True and full_output_cov = True
means_tt, vars_tt = predict_all(models, Data.Xs, full_cov=True, full_output_cov=True)
# Predict: full_cov = True and full_output_cov = False
means_tf, vars_tf = predict_all(models, Data.Xs, full_cov=True, full_output_cov=False)
# Predict: full_cov = False and full_output_cov = True
means_ft, vars_ft = predict_all(models, Data.Xs, full_cov=False, full_output_cov=True)
# Predict: full_cov = False and full_output_cov = False
means_ff, vars_ff = predict_all(models, Data.Xs, full_cov=False, full_output_cov=False)
# check equality of all the means
all_means = means_tt + means_tf + means_ft + means_ff
assert_all_array_elements_almost_equal(all_means, decimal=decimal)
# check equality of all the variances within a category
# (e.g. full_cov=True and full_output_cov=False)
all_vars = [vars_tt, vars_tf, vars_ft, vars_ff]
_ = [assert_all_array_elements_almost_equal(var, decimal=decimal) for var in all_vars]
# Here we check that the variance in different categories are equal
# after transforming to the right shape.
var_tt = vars_tt[0] # N x P x N x P
var_tf = vars_tf[0] # P x N x c
var_ft = vars_ft[0] # N x P x P
var_ff = vars_ff[0] # N x P
np.testing.assert_almost_equal(np.diagonal(var_tt, axis1=1, axis2=3),
np.transpose(var_tf, [1, 2, 0]),
decimal=decimal)
np.testing.assert_almost_equal(np.diagonal(var_tt, axis1=0, axis2=2),
np.transpose(var_ft, [1, 2, 0]),
decimal=decimal)
np.testing.assert_almost_equal(np.diagonal(np.diagonal(var_tt, axis1=0, axis2=2)), var_ff, decimal=decimal)
def expand_cov(q_sqrt, W):
"""
:param G: cholesky of covariance matrices, L x M x M
:param W: mixing matrix (square), L x L
:return: cholesky of 1 x LM x LM covariance matrix
"""
q_cov = np.matmul(q_sqrt, q_sqrt.transpose([0, 2, 1])) # [L, M, M]
q_cov_expanded = scipy.linalg.block_diag(*q_cov) # [LM, LM]
q_sqrt_expanded = np.linalg.cholesky(q_cov_expanded) # [LM, LM]
return q_sqrt_expanded[None, ...]
def create_q_sqrt(M, L):
""" returns an array of L lower triangular matrices of size M x M """
return np.array([np.tril(rng.randn(M, M)) for _ in range(L)]) # [L, M, M]
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
N, Ntest = 20, 5
D = 1 # input dimension
M = 3 # inducing points
L = 2 # latent gps
P = 3 # output dimension
MAXITER = int(15e2)
X = tf.random.normal((N,), dtype=tf.float64)[:, None] * 10 - 5
G = np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X))
Ptrue = np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]]) # [L, P]
Y = tf.convert_to_tensor(G @ Ptrue)
G = tf.convert_to_tensor(np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X)))
Ptrue = tf.convert_to_tensor(np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]])) # [L, P]
Y += tf.random.normal(Y.shape, dtype=tf.float64) * [0.2, 0.2, 0.2]
Xs = tf.convert_to_tensor(np.linspace(-6, 6, Ntest)[:, None])
data = (X, Y)
class DataMixedKernelWithEye(Data):
""" Note in this class L == P """
M, L = 4, 3
W = np.eye(L)
G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X, 1.0 + Data.X]) # [N, P]
mu_data = tf.random.uniform((M, L), dtype=tf.float64) # [M, L]
sqrt_data = create_q_sqrt(M, L) # [L, M, M]
mu_data_full = tf.reshape(mu_data @ W, [-1, 1]) # [L, 1]
sqrt_data_full = expand_cov(sqrt_data, W) # [1, LM, LM]
Y = tf.convert_to_tensor(G @ W)
G = tf.convert_to_tensor(G)
W = tf.convert_to_tensor(W)
sqrt_data = tf.convert_to_tensor(sqrt_data)
sqrt_data_full = tf.convert_to_tensor(sqrt_data_full)
Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((L,), dtype=tf.float64) * 0.2
data = (Data.X, Y)
class DataMixedKernel(Data):
M = 5
L = 2
P = 3
W = rng.randn(P, L)
G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X]) # [N, L]
mu_data = tf.random.normal((M, L), dtype=tf.float64) # [M, L]
sqrt_data = create_q_sqrt(M, L) # [L, M, M]
Y = tf.convert_to_tensor(G @ W.T)
G = tf.convert_to_tensor(G)
W = tf.convert_to_tensor(W)
sqrt_data = tf.convert_to_tensor(sqrt_data)
Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((P,), dtype=tf.float64) * 0.1
data = (Data.X, Y)
# ------------------------------------------
# Test sample conditional
# ------------------------------------------
@pytest.mark.parametrize("cov_structure", ["full", "diag"])
def test_sample_mvn(cov_structure):
"""
Draws 10,000 samples from a distribution
with known mean and covariance. The test checks
if the mean and covariance of the samples is
close to the true mean and covariance.
"""
N, D = 10000, 2
means = tf.ones((N, D), dtype=float_type)
if cov_structure == "full":
covs = tf.eye(D, batch_shape=[N], dtype=float_type)
elif cov_structure == "diag":
covs = tf.ones((N, D), dtype=float_type)
else:
raise (NotImplementedError)
samples = sample_mvn(means, covs, cov_structure)
samples_mean = np.mean(samples, axis=0)
samples_cov = np.cov(samples, rowvar=False)
np.testing.assert_array_almost_equal(samples_mean, [1., 1.], decimal=1)
np.testing.assert_array_almost_equal(samples_cov, [[1., 0.], [0., 1.]], decimal=1)
@pytest.mark.parametrize("whiten", [True, False])
@pytest.mark.parametrize("full_cov", [True, False])
@pytest.mark.parametrize("full_output_cov", [True, False])
def test_sample_conditional(whiten, full_cov, full_output_cov):
if full_cov and full_output_cov:
return
q_mu = tf.random.uniform((Data.M, Data.P), dtype=tf.float64) # [M, P]
q_sqrt = tf.convert_to_tensor(
[np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.P)]) # [P, M, M]
Z = Data.X[:Data.M, ...] # [M, D]
Xs = np.ones((Data.N, Data.D), dtype=float_type)
inducing_variable = InducingPoints(Z)
kernel = SquaredExponential()
# Path 1
value_f, mean_f, var_f = sample_conditional(Xs,
inducing_variable,
kernel,
q_mu,
q_sqrt=q_sqrt,
white=whiten,
full_cov=full_cov,
full_output_cov=full_output_cov,
num_samples=int(1e5))
value_f = value_f.numpy().reshape((-1,) + value_f.numpy().shape[2:])
# Path 2
if full_output_cov:
pytest.skip("sample_conditional with X instead of inducing_variable does not support full_output_cov")
value_x, mean_x, var_x = sample_conditional(Xs,
Z,
kernel,
q_mu,
q_sqrt=q_sqrt,
white=whiten,
full_cov=full_cov,
full_output_cov=full_output_cov,
num_samples=int(1e5))
value_x = value_x.numpy().reshape((-1,) + value_x.numpy().shape[2:])
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(np.mean(value_x, axis=0),
|
np.mean(value_f, axis=0)
|
numpy.mean
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 16:12:27 2019
@author: ravi
"""
import numpy as np
def generate_context(features, axis=0, context=1):
backward = features.copy()
forward = features.copy()
if axis==0:
for c in range(context):
backward = np.roll(backward, 1, axis=1)
forward = np.roll(forward, -1, axis=1)
backward[:,0] = 0
forward[:,-1] = 0
features = np.concatenate((backward, features, forward), axis=axis)
else:
for c in range(context):
backward = np.roll(backward, 1, axis=0)
forward = np.roll(forward, -1, axis=0)
backward[0,:] = 0
forward[-1,:] = 0
features = np.concatenate((backward, features, forward), axis=axis)
return features
def smooth(x,window_len=7,window='hanning'):
if x.ndim != 1:
raise(ValueError, "smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise(ValueError, "Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise(ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w =
|
np.ones(window_len,'d')
|
numpy.ones
|
# -*- coding: UTF-8 -*-
# File: imgproc.py
from .base import ImageAugmentor
from ...utils import logger
import numpy as np
import cv2
__all__ = ['Hue', 'Brightness', 'BrightnessScale', 'Contrast', 'MeanVarianceNormalize',
'GaussianBlur', 'Gamma', 'Clip', 'Saturation', 'Lighting', 'MinMaxNormalize']
class Hue(ImageAugmentor):
""" Randomly change color hue.
"""
def __init__(self, range=(0, 180), rgb=None):
"""
Args:
range(list or tuple): range from which the applied hue offset is selected (maximum [-90,90] or [0,180])
rgb (bool): whether input is RGB or BGR.
"""
super(Hue, self).__init__()
if rgb is None:
logger.warn("Hue() now assumes rgb=False, but will by default use rgb=True in the future!")
rgb = False
rgb = bool(rgb)
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, hue):
m = cv2.COLOR_BGR2HSV if not self.rgb else cv2.COLOR_RGB2HSV
hsv = cv2.cvtColor(img, m)
# https://docs.opencv.org/3.2.0/de/d25/imgproc_color_conversions.html#color_convert_rgb_hsv
if hsv.dtype.itemsize == 1:
# OpenCV uses 0-179 for 8-bit images
hsv[..., 0] = (hsv[..., 0] + hue) % 180
else:
# OpenCV uses 0-360 for floating point images
hsv[..., 0] = (hsv[..., 0] + 2 * hue) % 360
m = cv2.COLOR_HSV2BGR if not self.rgb else cv2.COLOR_HSV2RGB
img = cv2.cvtColor(hsv, m)
return img
class Brightness(ImageAugmentor):
"""
Adjust brightness by adding a random number.
"""
def __init__(self, delta, clip=True):
"""
Args:
delta (float): Randomly add a value within [-delta,delta]
clip (bool): clip results to [0,255].
"""
super(Brightness, self).__init__()
assert delta > 0
self._init(locals())
def _get_augment_params(self, _):
v = self._rand_range(-self.delta, self.delta)
return v
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img += v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class BrightnessScale(ImageAugmentor):
"""
Adjust brightness by scaling by a random factor.
"""
def __init__(self, range, clip=True):
"""
Args:
range (tuple): Randomly scale the image by a factor in (range[0], range[1])
clip (bool): clip results to [0,255].
"""
super(BrightnessScale, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
v = self._rand_range(*self.range)
return v
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img *= v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class Contrast(ImageAugmentor):
"""
Apply ``x = (x - mean) * contrast_factor + mean`` to each channel.
"""
def __init__(self, factor_range, clip=True):
"""
Args:
factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.
clip (bool): clip to [0, 255] if True.
"""
super(Contrast, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self._rand_range(*self.factor_range)
def _augment(self, img, r):
old_dtype = img.dtype
img = img.astype('float32')
mean = np.mean(img, axis=(0, 1), keepdims=True)
img = (img - mean) * r + mean
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MeanVarianceNormalize(ImageAugmentor):
"""
Linearly scales the image to have zero mean and unit norm.
``x = (x - mean) / adjusted_stddev``
where ``adjusted_stddev = max(stddev, 1.0/sqrt(num_pixels * channels))``
This augmentor always returns float32 images.
"""
def __init__(self, all_channel=True):
"""
Args:
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
mean = np.mean(img)
std = np.std(img)
else:
mean = np.mean(img, axis=(0, 1), keepdims=True)
std = np.std(img, axis=(0, 1), keepdims=True)
std = np.maximum(std, 1.0 / np.sqrt(np.prod(img.shape)))
img = (img - mean) / std
return img
class GaussianBlur(ImageAugmentor):
""" Gaussian blur the image with random window size"""
def __init__(self, max_size=3):
"""
Args:
max_size (int): max possible Gaussian window size would be 2 * max_size + 1
"""
super(GaussianBlur, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
sx, sy = self.rng.randint(self.max_size, size=(2,))
sx = sx * 2 + 1
sy = sy * 2 + 1
return sx, sy
def _augment(self, img, s):
return np.reshape(cv2.GaussianBlur(img, s, sigmaX=0, sigmaY=0,
borderType=cv2.BORDER_REPLICATE), img.shape)
class Gamma(ImageAugmentor):
""" Randomly adjust gamma """
def __init__(self, range=(-0.5, 0.5)):
"""
Args:
range(list or tuple): gamma range
"""
super(Gamma, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, gamma):
old_dtype = img.dtype
lut = ((np.arange(256, dtype='float32') / 255) ** (1. / (1. + gamma)) * 255).astype('uint8')
img = np.clip(img, 0, 255).astype('uint8')
ret = cv2.LUT(img, lut).astype(old_dtype)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
class Clip(ImageAugmentor):
""" Clip the pixel values """
def __init__(self, min=0, max=255):
"""
Args:
min, max: the clip range
"""
self._init(locals())
def _augment(self, img, _):
img = np.clip(img, self.min, self.max)
return img
class Saturation(ImageAugmentor):
""" Randomly adjust saturation.
Follows the implementation in `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L218>`__.
"""
def __init__(self, alpha=0.4, rgb=True):
"""
Args:
alpha(float): maximum saturation change.
rgb (bool): whether input is RGB or BGR.
"""
super(Saturation, self).__init__()
rgb = bool(rgb)
assert alpha < 1
self._init(locals())
def _get_augment_params(self, _):
return 1 + self._rand_range(-self.alpha, self.alpha)
def _augment(self, img, v):
old_dtype = img.dtype
m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
grey = cv2.cvtColor(img, m)
ret = img * v + (grey * (1 - v))[:, :, np.newaxis]
return ret.astype(old_dtype)
class Lighting(ImageAugmentor):
""" Lighting noise, as in the paper
`ImageNet Classification with Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_.
The implementation follows `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L184>`__.
"""
def __init__(self, std, eigval, eigvec):
"""
Args:
std (float): maximum standard deviation
eigval: a vector of (3,). The eigenvalues of 3 channels.
eigvec: a 3x3 matrix. Each column is one eigen vector.
"""
eigval = np.asarray(eigval)
eigvec = np.asarray(eigvec)
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self._init(locals())
def _get_augment_params(self, img):
assert img.shape[2] == 3
return self.rng.randn(3) * self.std
def _augment(self, img, v):
old_dtype = img.dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MinMaxNormalize(ImageAugmentor):
"""
Linearly scales the image to the range [min, max].
This augmentor always returns float32 images.
"""
def __init__(self, min=0, max=255, all_channel=True):
"""
Args:
max (float): The new maximum value
min (float): The new minimum value
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
minimum =
|
np.min(img)
|
numpy.min
|
"""
test_linearisation.py
We test the linearisation.py module.
Besides checking that some common wrong inputs raise errors,
we do sanitychecks regarding the equivalence of the linearisation
and a filter output.
Todo:
* Refactor these tests into some more elegant design. At the moment,
this is a lot of copy and pase.
Tests include:
* Make linearisation.compute_linearisation complain about non-scalar
uncerts (third element of derivative_data)
* Test linear ODE for one and two evaluation points
* Test logistic ODE for one and two evaluation points
* Test lotka-volterra ODE for one and two evaluation points
"""
import unittest
import numpy as np
from difflikelihoods import covariance as cov
from difflikelihoods import odesolver
from difflikelihoods import linearised_odesolver as linsolve
from difflikelihoods import linearised_ode as linode
from difflikelihoods import statespace
from difflikelihoods import linearisation
class TestWrongInputs(unittest.TestCase):
"""
Tests whether linearisation.compute_linearisation complains about
input arguments in the wrong shape.
"""
def test_uncert_not_scalar(self):
"""
We test whether the uncertainty (third element of derivative_data)
is only accepted as a scalar.
"""
# Set Model Parameters
odeparam = 1.
y0, y0_unc = 1.0, 0
t0, tmax = 0.0, 1.25
# Set Method Parameters
q = 1
h = 0.1
# Set up and solve ODE
ibm = statespace.IBM(q=q, dim=1)
solver = linsolve.LinearisedODESolver(ibm)
ivp = linode.LinearODE(t0, tmax, odeparam, y0, y0_unc)
tsteps, means, __, rhs_parts, should_not_work = solver.solve(ivp, stepsize=h)
self.mean = odesolver.get_trajectory(means, 0, 0)
# Set up BM and IBM covariance matrices
evalpt = np.array(tsteps[[-1]])
with self.assertRaises(TypeError):
derdat = (tsteps, rhs_parts, should_not_work)
linearisation.compute_linearisation(ssm=ibm, initial_value=y0,
derivative_data=derdat,
prdct_tsteps=evalpt)
class TestSanityCheckLinearOneData(unittest.TestCase):
"""
We check whether the mean as given out by the ODE-Filter
coincides with certain GP regression for ONE datapoint.
Based on: Linear ODE (one-dim, one parameter) and one evalpt.
"""
def setUp(self):
"""
Set up linear ODE (i.e. one-dim, one parameter) and one evalpt.
"""
# Set Model Parameters
odeparam = 1.
y0, y0_unc = 1.0, 0
t0, tmax = 0.0, 1.25
# Set Method Parameters
q = 1
h = 0.1
# Set up and solve ODE
ibm = statespace.IBM(q=q, dim=1)
solver = linsolve.LinearisedODESolver(ibm)
ivp = linode.LinearODE(t0, tmax, odeparam, y0, y0_unc)
tsteps, means, __, rhs_parts, uncerts = solver.solve(ivp, stepsize=h)
self.mean = odesolver.get_trajectory(means, 0, 0)
# Set up BM and IBM covariance matrices
evalpt = np.array([tsteps[-1]])
derdat = (tsteps, rhs_parts, 0.)
const, jacob = linearisation.compute_linearisation(
ssm=ibm, initial_value=y0,
derivative_data=derdat, prdct_tsteps=evalpt)
# Compute GP Estimation of filter mean at t=tmax
self.postmean = const + np.dot(jacob[:, 0], odeparam)
def test_equivalence_to_filter_output(self):
"""
Check whether filter output coincides with linearised version
up to threshold 1e-12.
"""
error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])
self.assertLess(error, 1e-12)
class TestSanityCheckLogisticOneData(unittest.TestCase):
"""
We check whether the mean as given out by the ODE-Filter
coincides with certain GP regression for ONE datapoint.
Based on: logistic ODE (one-dim, two parameters) and one evalpt.
"""
def setUp(self):
"""
Set up logistic ODE (i.e. one-dim, two parameters) and one evalpt.
"""
# Set Model Parameters
odeparam = np.array([1, 2])
y0, y0_unc = 1.0, 0
t0, tmax = 0.0, 1.25
# Set Method Parameters
q = 1
h = 0.1
# Set up and solve ODE
ibm = statespace.IBM(q=q, dim=1)
solver = linsolve.LinearisedODESolver(ibm)
ivp = linode.LogisticODE(t0, tmax, odeparam, y0, y0_unc)
tsteps, means, __, rhs_parts, uncerts = solver.solve(ivp, stepsize=h)
self.mean = odesolver.get_trajectory(means, 0, 0)
# Set up BM and IBM covariance matrices
evalpt = np.array(tsteps[[-1]])
derdat = (tsteps, rhs_parts, 0.)
const, jacob = linearisation.compute_linearisation(
ssm=ibm, initial_value=y0,
derivative_data=derdat, prdct_tsteps=evalpt)
# Compute GP Estimation of filter mean at t=tmax
self.postmean = const + np.dot(jacob, odeparam)
def test_equivalence_to_filter_output(self):
"""
Check whether filter output coincides with linearised version
up to threshold 1e-12.
"""
error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])
self.assertLess(error, 1e-12)
class TestSanityCheckLotkaVolterraOneData(unittest.TestCase):
"""
We check whether the mean as given out by the ODE-Filter
coincides with certain GP regression for ONE datapoint.
Based on: Lotka-Volterra ODE (two-dim, four parameters) and one evalpt.
"""
def setUp(self):
"""
Set up Lotka-Volterra ODE (i.e. two-dim, four parameter) and one evalpt.
"""
# Set Model Parameters
odeparam = np.array([0, 1, 1, 2])
y0, y0_unc = np.ones(2), 0 * np.ones(2)
t0, tmax = 0.0, 1.25
# Set Method Parameters
q = 1
h = 0.1
# Set up and solve ODE
ibm = statespace.IBM(q=q, dim=len(y0))
solver = linsolve.LinearisedODESolver(ibm)
ivp = linode.LotkaVolterra(t0, tmax, odeparam, y0, y0_unc)
tsteps, means, __, rhs_parts, uncerts = solver.solve(ivp, stepsize=h)
self.mean = odesolver.get_trajectory_multidim(means, [0, 1], 0)
# Set up BM and IBM covariance matrices
evalpt = np.array(tsteps[[-1]])
derdat = (tsteps, rhs_parts, 0.)
const, jacob = linearisation.compute_linearisation(
ssm=ibm, initial_value=y0,
derivative_data=derdat, prdct_tsteps=evalpt)
# Compute GP Estimation of filter mean at t=tmax
postmean = const +
|
np.dot(jacob, odeparam)
|
numpy.dot
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 22:51:47 2016
New class file
@author: Bonan
"""
import scipy as sp
import numpy as np
from scipy.interpolate import interp1d
from multiprocessing import Pool
from functools import partial
from os import cpu_count
import tm4.mathFunc as mfc
class customCall:
"""
A convenient class to overlording calls to a object.
It is used in Material Class in order to allow multiprocessing
compatibility.
"""
def __init__(self, value):
self.v = value
def __call__(self, *args):
return self.v
class OpticalProperties:
"""
Class for extracting optical properties from results
"""
C = 1 / np.sqrt(2) * np.array([[1, 1], [1j, -1j]])
# Transition from circular to plane for reflected wave
D = 1 / np.sqrt(2) * np.array([[1, 1], [-1j, 1j]])
TM = np.array([C, C])
invC = np.array(sp.linalg.inv(C))
invD = np.array(sp.linalg.inv(D))
invTM = np.array([invD, invC])
def __init__(self, TOverall):
"""
A conventnt class to store/access optical properties.
Initialise the class by pass the overall transfer matrix
(inclu ding incident and reflected light).
"""
self.J = self.getJones(TOverall)
self.Jc = self.circularJones()
self.RP = self.J[0].conj()*self.J[0]
self.RC = self.Jc[0].conj()*self.Jc[0]
def getJones(self, T):
"""Returns the Jones matrices with linear polarisation basis
J_ri is the Jones matrix in reflexion : [[r_pp, r_ps],
[r_sp, r_ss]]
J_ti is the Jones matrix in transmission : [[t_pp, t_ps],
[t_sp, t_ss]]
basis: [p, s]
"""
# Extract element from the transfer matrix to form the Jones matrix
J_it = T[2::-2, 2::-2]
J_ti = sp.linalg.inv(J_it)
J_rt = T[3::-2, 2::-2]
# Then we have J_ri = J_rt * J_ti
J_ri = np.dot(J_rt, J_ti)
return (J_ri, J_ti)
def circularJones(self):
"""Returns the Jones matrices for circular polarization basis
The Jones matrices for circular polarization are Jr^c = D⁻¹ Jr C and
Jt^c = C⁻¹ Jt C.
Jones matrice for circular polarisation is in the form of:
[[r_LL, r_LR],
[r_RL, r_RR]]
Returns : array of the same shape.
"""
J = self.J
Jc_ri = np.linalg.solve(self.D, J[0].dot(self.C))
Jc_ti = np.linalg.solve(self.C, J[1].dot(self.C))
return (Jc_ri, Jc_ti)
def applyAnalyser(self, angle, i=0):
"""Return the Intensity after applying analyser. Assuming the incident
light is unpolarised.
i: set to 0 for reflection, 1 for transimission. default is 0
"""
Jp = mfc.polariserJ(angle)
v = Jp.dot(self.J[i].dot([1, 1]))
return np.linalg.norm(v)**2
# Propagator for a homogeneous slab of material...
# need to stop using deprecated scipy functions
class Propagator():
"""
A propagator class for easy access
"""
def __init__(self, method='eig', inv=True):
"""the value of 'method':
"linear" -> first order approximation of exp()
"Padé" -> Padé approximation of exp()
"Taylor" -> Taylor development of exp()
"eig" -> calculation with eigenvalue decomposition
The default method is Pade. It is the default method in scipy
libray for matrix exponential
"""
self.method = method
if inv is True:
self._i = -1
else:
self._i = 1
self.q = 1
def setMethod(self, method):
self.method = method
def setQ(self, q):
self.q = q
def __call__(self, Delta, h, k0, q):
"""
'Delta' : Delta matrix of the homogeneous material
'h' : thickness of the homogeneous slab
'k0' : wave vector in vacuum, k0 = ω/c
'q' : order of the approximation method, if useful
Returns a ndarry of the propagator for the division
"""
q = self.q
if self.method == "linear":
return np.identity(4) + 1j * h * k0 * Delta * self._i
elif self.method == "Pade":
return sp.linalg.expm(1j * h * k0 * Delta * self._i, q)
elif self.method == "Taylor":
return sp.linalg.expm3(1j * h * k0 * Delta * self._i, q + 1)
elif self.method == "eig":
return sp.linalg.expm2(1j * h * k0 * Delta * self._i)
class Material:
"""
An class object that represent any type of anisotropic material
"""
def __init__(self, a, b, c, kind='quadratic'):
"""
Input some known points of the refractive index with respect to
wavelength.
known_e and known_o should be 2 x N array of known values of
ne and no.
"""
if type(a) != list and type(a) != np.ndarray:
self.fa = customCall(a)
self.fb = customCall(b)
self.fc = customCall(c)
return
self.a, self.b, self.c = np.asarray(a), np.asarray(b), np.asarray(c)
self.kind = kind
self._build_interp()
def _build_interp(self):
# assigne function for interpolated
self.fa = interp1d(self.a[0], self.a[1], self.kind)
self.fb = interp1d(self.b[0], self.b[1], self.kind)
self.fc = interp1d(self.c[0], self.c[1], self.kind)
def getTensor(self, wl):
"""
Return the calcuated dielectric tensor at given wave length.
Optical axis is along the x direction by default
"""
# construct the dielectric constant tensor
e = np.diag([self.fa(wl), self.fb(wl), self.fc(wl)])**2
return e
class UniaxialMaterial(Material):
"""
An sub-class representing an uniaxial material(a!=b=c)
"""
def __init__(self, e, o, kind='quadratic'):
Material.__init__(self, e, o, o, kind)
class HomogeneousNondispersiveMaterial(Material):
def __init__(self, n):
self.n = n
def getTensor(self, wavelength):
return np.diag([self.n, self.n, self.n])**2
def getRefractiveIndex(self, wavelength):
return self.n
class HomogeneousDispersiveMaterial(Material):
def __init__(self, n_array):
Material.__init__(self, n_array, n_array, n_array, kind='quadratic')
def getRefractiveIndex(self, wavelength):
return self.fa(wavelength)
# %%HalfSpace class used in the front and back layer
class HalfSpace:
"""Homogeneous half-space with arbitrary permittivity.
A HalfSpace must provide this method:
* getTransitionMatrix(k0, Kx) : return transition matrix
"""
material = None # Material object
def __init__(self, material=None):
"""Create a homogeneous half-space of the given material."""
self.setMaterial(material)
def setMaterial(self, material):
"""Defines the material for this half-space"""
self.material = material
def getTransitionMatrix(self, Kx, wl):
"""Returns transition matrix L.
'Kx' : reduced wavenumber in the x direction, Kx = kx/k0
'k0' : wavenumber in vacuum, k0 = ω/c
Sort eigenvectors of the Delta matrix according to propagation
direction first, then according to $y$ component.
Returns eigenvectors ordered like (s+,s-,p+,p-)
"""
epsilon = self.material.getTensor(wl)
Delta = mfc.buildDeltaMatrix(epsilon, Kx)
q, Psi = sp.linalg.eig(Delta)
# Sort according to z propagation direction, highest Re(q) first
i = np.argsort(-np.real(q))
# Result should be (+,+,-,-)
q, Psi = q[i], Psi[:, i]
# For each direction, sort according to Ey component,
# highest Ey first
i1 = np.argsort(-np.abs(Psi[1, :2]))
i2 = 2 + np.argsort(-np.abs(Psi[1, 2:]))
# Result should be (s+,p+,s-,p-)
i = np.hstack((i1, i2))
# Reorder
i[[1, 2]] = i[[2, 1]]
# Result should be(s+,s-,p+,p-)
q, Psi = q[i], Psi[:, i]
# Adjust Ey in ℝ⁺ for 's', and Ex in ℝ⁺ for 'p'
E = np.hstack((Psi[1, :2], Psi[0, 2:]))
nE = np.abs(E)
c = np.ones_like(E)
i = (nE != 0.0)
c[i] = E[i]/nE[i]
Psi = Psi * c
# Normalize so that Ey = c1 + c2, analog to Ey = Eis + Ers
# For an isotropic half-space, this should return the same matrix
# as IsotropicHalfSpace
c = Psi[1, 0] + Psi[1, 1]
if abs(c) == 0:
c = 1.
Psi = 2 * Psi / c
return Psi
class IsotropicHalfSpace(HalfSpace):
"""Homogeneous Isotropic HalfSpace.
* Provides transition matrix L and the inverse.
Can be equally used for the front half-space (Theta = Thetai) or
for the back half-space (Theta = Thetat).
* Provides relations between angle Theta and reduced wave vector Kx.
'Theta' is the angle of the plane wave traveling to the right
(angle measured with respect to z axis and oriented by y).
'-Theta' is the angle of the wave traveling to the left.
"""
def getKxFromTheta(self, Theta, wl):
"""Returns the value of Kx.
'Phi' : angle of the wave traveling to the right (radians)
'k0' : wavenumber in vacuum
kx = n k0 sin(Theta) : Real and constant throughout the structure.
If n ∈ ℂ, then Theta ∈ ℂ
Kx = kx/k0 = n sin(Theta) : Reduced wavenumber.
"""
n = self.material.getRefractiveIndex(wl)
Kx = n * np.sin(Theta)
return Kx
def getKzFromKx(self, Kx, wl):
"""Returns the value of Kz in the half-space, function of Kx
'Kx' : Reduced wavenumber, Kx = kx/k0 = n sin(Theta)
'k0' : wavenumber in vacuum, kx = n k0 sin(Theta)
Returns : reduced wave number Kz = kz/k0
"""
# Not vectorized. Could be?
# Test type(Kz2)
n = self.material.getRefractiveIndex(wl)
Kz2 = n**2 - Kx**2
return np.sqrt(complex(Kz2))
def getThetaFromKx(self, Kx, wl):
"""Returns the value of angle Phi according to the value of Kx.
'Kx' : Reduced wavenumber, Kx = kx/k0 = n sin(Theta)
'wkl' : wavelength, kx = n k0 sin(Theta)
Returns : angle Theta in radians.
"""
# May be vectorized when I have time?
n = self.material.getRefractiveIndex(wl)
sin_Phi = Kx/n
if abs(sin_Phi) > 1:
sin_Phi = complex(sin_Phi)
Phi = np.arcsin(sin_Phi)
return Phi
def getTransitionMatrix(self, Kx, wl, inv=False):
"""Returns transition matrix L.
'Kx' : Reduced wavenumber
'k0' : wavenumber in vacuum
'inv' : if True, returns inverse transition matrix L^-1
Returns : transition matrix L
"""
n = self.material.getRefractiveIndex(wl)
sin_Theta = Kx/n
if abs(sin_Theta) > 1:
sin_Theta = complex(sin_Theta)
cos_Theta = np.sqrt(1 - sin_Theta**2)
if inv:
return 0.5 * np.array(
[[0, 1, -1/(n*cos_Theta), 0],
[0, 1, 1/(n*cos_Theta), 0],
[1/cos_Theta, 0, 0, 1/n],
[1/cos_Theta, 0, 0, -1/n]])
else:
return np.array(
[[0, 0, cos_Theta, cos_Theta],
[1, 1, 0, 0],
[-n*cos_Theta, n*cos_Theta, 0, 0],
[0, 0, n, -n]])
class Structure():
"""
A superclass represent a type of structure
"""
propagtor = Propagator(method='eig')
def __init__(self):
self.optParas = {}
self.phyParas = {}
def constructEpsilon():
"""A method to construct the relative dielectric tensor"""
raise NotImplementedError
def constructDelta():
"""A method to build the delta matrix"""
raise NotImplementedError
def constructPropogator():
"""A method a build the propagator matrices"""
raise NotImplementedError
def getPartialTransfer():
"""A method to build the partial transfer matrix for the layer"""
raise NotImplementedError
# self.propagator =
def getInfo():
"""A method to get info of the structure"""
raise NotImplementedError
def setWl(self, wl):
self.optParas['wl'] = wl
def setThickness(self, t):
self.phyParas['t'] = t
def setPhi(self, Phi):
self.optParas['Phi'] = Phi
def setOptParas(self, wl, Kx, Phi=0):
"""Set up the optical parameters
wl: wavelength of incident light
Theta: incident angle
Phi: azimuthal angle
"""
self.optParas = {'wl': wl, 'Kx': Kx, 'Phi': Phi}
class HomogeneousStructure(Structure):
"""
A class for a homogeneous layer. Can be used as basic calculation
element for complex structures.
"""
sType = 'homo'
def __init__(self, material, t, aor=0):
super(HomogeneousStructure, self).__init__()
self.setPhyParas(material, t, aor)
self.info = {"Type": "Homogeneous",
"TotalThickness": self.phyParas['t']}
def setPhyParas(self, material, t, aor=0):
self.phyParas.update({'m': material, 't': t, 'aor': aor})
def constructDelta(self):
o = self.optParas
wl, Phi, Kx = o['wl'], o['Phi'], o['Kx']
e = self.phyParas['m'].getTensor(wl)
e = mfc.rotedEpsilon(e, self.phyParas['aor']-Phi)
self.delta = mfc.buildDeltaMatrix(e, Kx)
def getPartialTransfer(self):
self.constructDelta()
self.partialTransfer = self.propagtor(self.delta,
self.phyParas['t'],
2*np.pi/self.optParas['wl'],
q=None)
return self.partialTransfer
def getInfo(self):
"""Get infomation of the structure"""
return {"Type": "Homegeneous", "TotalThickness": self.t}
class Helix(Structure):
"""An class represent a CNC helix that is sliced into a stack of homogeneous
layers
"""
sType = 'helix'
def __init__(self, *argv, **phyParas):
"""
input arguments:
material: material of the structure
pitch: pitch of the helix
d: number of division of the helix. Default is 30
aor: intrinsic angle of rotation of the helix
"""
super(Helix, self).__init__()
self.phyParas.update(phyParas)
self._distort_A = 1.0
def setPhyParas(self, material, pitch, t, d, handness, aor=0, distort_A=None):
"""
Set the physical parameters of the class
:param distort_A: (p_2 / p_3), where p_2 is the pitch in the top
(before this helix), and p_3 is the pitch in the bottom.
See also: https://doi.org/10.1103/PhysRevMaterials.3.045601
"""
self.phyParas = {'Description': 'Helix', 'd': d,
'sliceThickness': t / d, 'p': pitch,
't': t,
'm': material, 'aor': aor, 'handness': handness,
'distort_A': distort_A}
def getInfo(self):
"""Get infomation of the structure"""
return self.phyParas
def setPitch(self, pitch):
"""set the pitch of the helix"""
self.phyParas.update({'p': pitch})
def setThickness(self, t):
"""set the thicknes of the helix"""
self.phyParas['t'] = t
def _checkFastCondition(self):
"""
Check the assumption is valid.
Requirement:
Material is uniaxial, normal incidence
"""
diag = np.diag(self.phyParas['m'].getTensor(self.optParas['wl']))
if not (diag[0] != diag[1] and diag[1] == diag[2]):
return False
if self.optParas['Kx'] != 0:
return False
return True
# CORE algorithium : calculating the partial transfer matrix
def getAngles(self):
"""Get the angle of roatation for each layer. Return an 1d
array of the angles to be rotated for each layer.
These are the physics angles to be rotated"""
p = self.phyParas
endAngle = p['t'] / p['p'] * p['handness'] * np.pi
angles = np.linspace(0, endAngle, p['d'], endpoint=False) + p['aor']
distort_A = p.get('distort_A')
if distort_A and ((distort_A - 1.0) > 1e-7):
angles = np.arctan(np.tan(angles) * distort_A)
return angles
def getSliceThickness(self):
"""
Return the slice thickness based on current setting of total thickness
and number of divisions
"""
return self.phyParas['t'] / self.phyParas['d']
def getPartialTransfer(self, q=None):
"""
Build the partial transfer matrix, need to input wl and q.
If the thickness is zero, return the identity as the partial
transfer matrix
"""
p = self.phyParas
o = self.optParas
if p['t'] == 0:
return np.identity(4)
# Check we can use the Fast Route
if self._checkFastCondition is True:
raise RuntimeWarning('Can use the HelixFast '
'for faster calculation')
# Normal Calculation routine
# First we spawn a list of HomogeneousStructures
sliceT = self.getSliceThickness()
layerList = [HomogeneousStructure(p['m'], sliceT)
for i in range(p['d'])]
# Then we set the .Phi of each layer.
# Note the reqired .Phi is the opposite of the
# angles of the phyiscs rotation
PhiList = -(self.getAngles() - o['Phi'])
PMatrices = []
for layer, phi in zip(layerList, PhiList):
layer.setOptParas(o['wl'], o['Kx'], phi)
PMatrices.append(layer.getPartialTransfer())
self.P = PMatrices
# Take dot product for all 4x4 slices the first axis
return mfc.stackDot(PMatrices)
class DistortedHelix(Helix):
"""
A class to represent distorted helix
The new phi is computed as:
phi_new = atan( tan(phi_old) * A )
where A = (p_2 / p_3). p_2 is the pitch in the top (before this helix),
and p_3 is the pitch in the bottom.
See also: https://doi.org/10.1103/PhysRevMaterials.3.045601
"""
def __init__(self, *args, **kwargs):
"""Instantiate a DistortedHelix object"""
super(DistortedHelix, self).__init__(*args, **kwargs)
self._distort_A = 1.0
def getAngles(self):
"""Get the angle with distortion"""
orig_angles = super(DistortedHelix, self).getAngles()
return np.arctan(np.tan(orig_angles) * self._distort_A)
class HelixCustom(Helix):
"""
A class that allow custom orientation of each layer in a chiral medium
"""
def __init__(self, **phyParas):
super(HelixCustom, self).__init__(**phyParas)
self.angleList = []
def getAngles(self):
"""
Get a list of angles. This function should also update the thickness
"""
if self.angleList == []:
raise RuntimeError('angles not calculated')
return self.angleList
def calcStandardAngles(self):
"""
Calculated the standard angle list and store it in self.angleList
"""
self.angleList = Helix.getAngles(self)
class HelixFast(Helix):
"""
A class that uses alternative Yeh calculation routine when the incident is
normal and each layer is a uniaxial slab
"""
def __init__(self, **phyParas):
super(HelixFast, self).__init__(**phyParas)
# Calculation routine
def getPQVectors(self):
"""
Return a list of polarisation vectors
return: (p, q) where p and q are 3xN array of 3x1 vectors
"""
angles = self.getAngles()
p = mfc.vectorFromTheta(angles)
q = mfc.rotZ(np.pi/2).dot(p)
return p, q
def getPandD(self):
"""
Calculate the P and D matrices for each layers
"""
wl = self.optParas['wl']
d = self.phyParas['d']
e = self.phyParas['m'].getTensor(wl)
e_o = e[1, 1] # Ordinary relative dielectric constant
e_e = e[0, 0] # Extra-ordinary
# Wave vector for two modes note k_z = 1 we assumed during scaling
k_o, k_e = np.sqrt(e_o), np.sqrt(e_e)
p, q = self.getPQVectors()
# Initialise storage space for P and D matrix
P = np.zeros((4, 4, d), dtype=np.complex)
D = np.zeros((4, 4, d), dtype=np.float)
# We take the order of k, p ,q pair to be: k_e, -k_e, k_o, -k_o
# Note there is a angle of rotion of pi needed since p = k cross p
# r = mfc.rotZ(np.pi/2)
# The sign of polarisation vectors are arbitary
p1, p2, p3, p4 = p, p, q, q
# For q vectors need to put minus sign due to negative kz
# q -> -p is rotation of pi/2
q1, q2, q3, q4 = q * k_e, -q * k_e, -p * k_o, p * k_o
# Assign values to the D matrices
D[:3:2, :, :] = np.array([p1[:2],
p2[:2],
p3[:2],
p4[:2]]).swapaxes(0, 1)
D[-1:0:-2] = np.array([q1[:2],
q2[:2],
q3[:2],
q4[:2]]).swapaxes(0, 1)
# Assign values to the P matrices
s = self.getSliceThickness() # get the slice thickness
k_0 = 2 * np.pi / wl # The magnitude of k vector in vaccum
P = np.array([[np.exp(-1j * s * k_0 * k_e), 0, 0, 0],
[0, np.exp(1j * s * k_0 * k_e), 0, 0],
[0, 0, np.exp(-1j * s * k_0 * k_o), 0],
[0, 0, 0, np.exp(1j * s * k_0 * k_o)]])
# We now have P and D ready
return P, D.transpose((2, 0, 1))
def getPartialTransfer(self, q=None):
"""
Get the partial transfer matrix with basis Ex, Ey, Hx, Hy
"""
if self._checkFastCondition() is False:
raise RuntimeError('Condition for fast calcution is not satisfied')
P, D = self.getPandD()
DInv = np.linalg.inv(D)
# Calcuate the transition to basis for partial waves
# with k_e, -k_e, k_o, -k_o
D0, DEnd = D[0], D[-1]
# Rows: D with px, qy, py, qx, Tr need : px, py, qx, qy
Tr0 = np.array([D0[0], D0[2], D0[3], D0[1]])
TrEnd = np.array([DEnd[0], DEnd[2], DEnd[3], DEnd[1]])
# Get the parital transfer matrix
# Now begin the loop to calculate the partial transfer matrix
# T for each layer is D[n] @ P[n] @ inv(D[n])
# @ is the matrix multiplication but for backward compatibility still
# use .dot sytex
n = self.phyParas['d']
for i in range(n):
if i == 0:
# Here
T = P.dot(DInv[0])
continue
if i == n-1:
# Here
T = T.dot(D[i]).dot(P)
continue
T = T.dot(D[i]).dot(P).dot(DInv[i])
self.T_eff = T # For debug
return Tr0.dot(T).dot(np.linalg.inv(TrEnd))
# Change basis to be compatible with the rest of the code
class HeliCoidalStructure(Helix):
"""
A class that speed up the calculation by dividing itself into a repeating
unit and a remainder unit. The transfer matrix of the repeating unit can be
raised to power to get the effective transfer matrix.
"""
sType = 'helix'
# wl = None #dont specify wavelength initially
def __init__(self, material, pitch, t, d=30, handness='left', aor=0,
distort_A=None):
"""
Constructor for HeliCoidalStucture class
t: Total thickness of the structure
d: divisions per pitch/remainder unit
distort_A: ratio of distortion, default to None.
"""
# Set handness of the helix
if handness == 'left':
h = -1
elif handness == 'right':
h = 1
else:
raise RuntimeError('Handness need to be either left or right')
Helix.__init__(self)
self.setPhyParas(material, pitch, t, d, h, aor=0, distort_A=distort_A)
self.phyParas['sliceThickness'] = pitch / d
def getPartialTransfer(self, q=None, updateEpsilon=True):
"""
Build the partial transfer matrix, need to input wl and q
"""
p = self.phyParas
o = self.optParas
r = np.remainder(p['t'], p['p'])
if self._checkFastCondition() is True:
unit, remainder = HelixFast(), HelixFast()
else:
unit, remainder = Helix(), Helix()
# Need to use a copy for the sub helixs
unit.setPhyParas(p['m'], p['p'],
p['p'], p['d'],
p['handness'],
p['aor'], distort_A=p.get('distort_A'))
remainder.setPhyParas(p['m'],
p['p'],
r,
p['d'],
p['handness'],
p['aor'], distort_A=p.get('distort_A'))
# Copy properties
unit.optParas, remainder.optParas = o, o
self.unit, self.remainder = unit, remainder
unitT = unit.getPartialTransfer(None)
remainderT = remainder.getPartialTransfer(None)
n = int(p['t']/p['p'])
return
|
np.linalg.matrix_power(unitT, n)
|
numpy.linalg.matrix_power
|
import sys
import numpy as np
from itertools import combinations
from pyemto.utilities.utils import rotation_matrix
import spglib as spg
try:
from pymatgen import Lattice, Structure
from pymatgen.vis.structure_vtk import StructureVis
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.coord import get_angle
except ImportError:
# pymatgen has not been installed
raise ImportError('emto_input_generator requires pymatgen>=4.4.0 to be installed!')
import os
import pyemto
import pyemto.common.common as common
class EMTO:
"""This class can be used to create EMTO input files from
an arbitrary structure. What is needed as input:
-primitive lattice vectors,
-basis vectors,
-list of atomic species that occupy the basis sites.
"""
def __init__(self, folder=None, EMTOdir=None):
""" """
if folder is None:
self.folder = os.getcwd()
else:
self.folder = folder
if EMTOdir is None:
self.EMTOdir = '/home/EMTO'
else:
self.EMTOdir = EMTOdir
self.sg2ibz = {1:14, 2:14, 3:12, 4:12, 5:13, 6:12, 7:12, 8:13, 9:13, 10:12,
11:12, 12:13, 13:12, 14:12, 15:13, 16:8, 17:8, 18:8, 19:8, 20:9,
21:9, 22:11, 23:10, 24:10, 25:8, 26:8, 27:8, 28:8, 29:8, 30:8,
31:8, 32:8, 33:8, 34:8, 35:9, 36:9, 37:9, 38:9, 39:9, 40:9,
41:9, 42:11, 43:11, 44:10, 45:10, 46:10, 47:8, 48:8, 49:8, 50:8,
51:8, 52:8, 53:8, 54:8, 55:8, 56:8, 57:8, 58:8, 59:8, 60:8,
61:8, 62:8, 63:9, 64:9, 65:9, 66:9, 67:9, 68:9, 69:11, 70:11,
71:10, 72:10, 73:10, 74:10, 75:5, 76:5, 77:5, 78:5, 79:6, 80:6,
81:5, 82:6, 83:5, 84:5, 85:5, 86:5, 87:6, 88:6, 89:5, 90:5,
91:5, 92:5, 93:5, 94:5, 95:5, 96:5, 97:6, 98:6, 99:5, 100:5,
101:5, 102:5, 103:5, 104:5, 105:5, 106:5, 107:6, 108:6, 109:6, 110:6,
111:5, 112:5, 113:5, 114:5, 115:5, 116:5, 117:5, 118:5, 119:6, 120:6,
121:6, 122:6, 123:5, 124:5, 125:5, 126:5, 127:5, 128:5, 129:5, 130:5,
131:5, 132:5, 133:5, 134:5, 135:5, 136:5, 137:5, 138:5, 139:6, 140:6,
141:6, 142:6, 143:4, 144:4, 145:4, 146:7, 147:4, 148:7, 149:4, 150:4,
151:4, 152:4, 153:4, 154:4, 155:7, 156:4, 157:4, 158:4, 159:4, 160:7,
161:7, 162:4, 163:4, 164:4, 165:4, 166:7, 167:7, 168:4, 169:4, 170:4,
171:4, 172:4, 173:4, 174:4, 175:4, 176:4, 177:4, 178:4, 179:4, 180:4,
181:4, 182:4, 183:4, 184:4, 185:4, 186:4, 187:4, 188:4, 189:4, 190:4,
191:4, 192:4, 193:4, 194:4, 195:1, 196:2, 197:3, 198:1, 199:3, 200:1,
201:1, 202:2, 203:2, 204:3, 205:1, 206:3, 207:1, 208:1, 209:2, 210:2,
211:3, 212:1, 213:1, 214:3, 215:1, 216:2, 217:3, 218:1, 219:2, 220:3,
221:1, 222:1, 223:1, 224:1, 225:2, 226:2, 227:2, 228:2, 229:3, 230:3}
self.sg2bl = {1:'simple triclinic', 2:'simple triclinic',
3:'simple monoclinic', 4:'simple monoclinic',
5:'base-centered monoclinic', 6:'simple monoclinic',
7:'simple monoclinic', 8:'base-centered monoclinic',
9:'base-centered monoclinic', 10:'simple monoclinic',
11:'simple monoclinic', 12:'base-centered monoclinic',
13:'simple monoclinic', 14:'simple monoclinic',
15:'base-centered monoclinic', 16:'simple orthorhombic',
17:'simple orthorhombic', 18:'simple orthorhombic',
19:'simple orthorhombic', 20:'base-centered orthorhombic',
21:'base-centered orthorhombic', 22:'face-centered orthorhombic',
23:'body-centered orthorhombic', 24:'body-centered orthorhombic',
25:'simple orthorhombic', 26:'simple orthorhombic',
27:'simple orthorhombic', 28:'simple orthorhombic',
29:'simple orthorhombic', 30:'simple orthorhombic',
31:'simple orthorhombic', 32:'simple orthorhombic',
33:'simple orthorhombic', 34:'simple orthorhombic',
35:'base-centered orthorhombic', 36:'base-centered orthorhombic',
37:'base-centered orthorhombic', 38:'base-centered orthorhombic',
39:'base-centered orthorhombic', 40:'base-centered orthorhombic',
41:'base-centered orthorhombic', 42:'face-centered orthorhombic',
43:'face-centered orthorhombic', 44:'body-centered orthorhombic',
45:'body-centered orthorhombic', 46:'body-centered orthorhombic',
47:'simple orthorhombic', 48:'simple orthorhombic',
49:'simple orthorhombic', 50:'simple orthorhombic',
51:'simple orthorhombic', 52:'simple orthorhombic',
53:'simple orthorhombic', 54:'simple orthorhombic',
55:'simple orthorhombic', 56:'simple orthorhombic',
57:'simple orthorhombic', 58:'simple orthorhombic',
59:'simple orthorhombic', 60:'simple orthorhombic',
61:'simple orthorhombic', 62:'simple orthorhombic',
63:'base-centered orthorhombic', 64:'base-centered orthorhombic',
65:'base-centered orthorhombic', 66:'base-centered orthorhombic',
67:'base-centered orthorhombic', 68:'base-centered orthorhombic',
69:'face-centered orthorhombic', 70:'face-centered orthorhombic',
71:'body-centered orthorhombic', 72:'body-centered orthorhombic',
73:'body-centered orthorhombic', 74:'body-centered orthorhombic',
75:'simple tetragonal', 76:'simple tetragonal',
77:'simple tetragonal', 78:'simple tetragonal',
79:'body-centered tetragonal', 80:'body-centered tetragonal',
81:'simple tetragonal', 82:'body-centered tetragonal',
83:'simple tetragonal', 84:'simple tetragonal',
85:'simple tetragonal', 86:'simple tetragonal',
87:'body-centered tetragonal', 88:'body-centered tetragonal',
89:'simple tetragonal', 90:'simple tetragonal',
91:'simple tetragonal', 92:'simple tetragonal',
93:'simple tetragonal', 94:'simple tetragonal',
95:'simple tetragonal', 96:'simple tetragonal',
97:'body-centered tetragonal', 98:'body-centered tetragonal',
99:'simple tetragonal', 100:'simple tetragonal',
101:'simple tetragonal', 102:'simple tetragonal',
103:'simple tetragonal', 104:'simple tetragonal',
105:'simple tetragonal', 106:'simple tetragonal',
107:'body-centered tetragonal', 108:'body-centered tetragonal',
109:'body-centered tetragonal', 110:'body-centered tetragonal',
111:'simple tetragonal', 112:'simple tetragonal',
113:'simple tetragonal', 114:'simple tetragonal',
115:'simple tetragonal', 116:'simple tetragonal',
117:'simple tetragonal', 118:'simple tetragonal',
119:'body-centered tetragonal', 120:'body-centered tetragonal',
121:'body-centered tetragonal', 122:'body-centered tetragonal',
123:'simple tetragonal', 124:'simple tetragonal',
125:'simple tetragonal', 126:'simple tetragonal',
127:'simple tetragonal', 128:'simple tetragonal',
129:'simple tetragonal', 130:'simple tetragonal',
131:'simple tetragonal', 132:'simple tetragonal',
133:'simple tetragonal', 134:'simple tetragonal',
135:'simple tetragonal', 136:'simple tetragonal',
137:'simple tetragonal', 138:'simple tetragonal',
139:'body-centered tetragonal', 140:'body-centered tetragonal',
141:'body-centered tetragonal', 142:'body-centered tetragonal',
143:'hexagonal', 144:'hexagonal',
145:'hexagonal', 146:'rhombohedral',
147:'hexagonal', 148:'rhombohedral',
149:'hexagonal', 150:'hexagonal',
151:'hexagonal', 152:'hexagonal',
153:'hexagonal', 154:'hexagonal',
155:'rhombohedral', 156:'hexagonal',
157:'hexagonal', 158:'hexagonal',
159:'hexagonal', 160:'rhombohedral',
161:'rhombohedral', 162:'hexagonal',
163:'hexagonal', 164:'hexagonal',
165:'hexagonal', 166:'rhombohedral',
167:'rhombohedral', 168:'hexagonal',
169:'hexagonal', 170:'hexagonal',
171:'hexagonal', 172:'hexagonal',
173:'hexagonal', 174:'hexagonal',
175:'hexagonal', 176:'hexagonal',
177:'hexagonal', 178:'hexagonal',
179:'hexagonal', 180:'hexagonal',
181:'hexagonal', 182:'hexagonal',
183:'hexagonal', 184:'hexagonal',
185:'hexagonal', 186:'hexagonal',
187:'hexagonal', 188:'hexagonal',
189:'hexagonal', 190:'hexagonal',
191:'hexagonal', 192:'hexagonal',
193:'hexagonal', 194:'hexagonal',
195:'simple cubic', 196:'face-centered cubic',
197:'body-centered cubic', 198:'simple cubic',
199:'body-centered cubic', 200:'simple cubic',
201:'simple cubic', 202:'face-centered cubic',
203:'face-centered cubic', 204:'body-centered cubic',
205:'simple cubic', 206:'body-centered cubic',
207:'simple cubic', 208:'simple cubic',
209:'face-centered cubic', 210:'face-centered cubic',
211:'body-centered cubic', 212:'simple cubic',
213:'simple cubic', 214:'body-centered cubic',
215:'simple cubic', 216:'face-centered cubic',
217:'body-centered cubic', 218:'simple cubic',
219:'face-centered cubic', 220:'body-centered cubic',
221:'simple cubic', 222:'simple cubic',
223:'simple cubic', 224:'simple cubic',
225:'face-centered cubic', 226:'face-centered cubic',
227:'face-centered cubic', 228:'face-centered cubic',
229:'body-centered cubic', 230:'body-centered cubic'}
# BMDL, KSTR, SHAPE, KGRN and KFCD class instances
self.input_system = pyemto.System(folder=self.folder, EMTOdir=self.EMTOdir)
#
self.fit_angle_tol = 5e-6
self.fit_norm_ratio_tol = 5e-6
return
def calc_ws_radius(self, struct):
bohr2angst = 0.52917721
vol_unit = struct.volume/struct.num_sites
sws = (3*vol_unit/4.0/np.pi)**(1.0/3)/bohr2angst
return sws
def make_basis_array(self, struct):
"""Returns a 2D numpy array of the basis atom coordinates
in !!Cartesian!! coordinates.
"""
len_basis = struct.num_sites
emto_basis = np.zeros((len_basis, 3))
for i in range(len_basis):
emto_basis[i, :] = struct.sites[i].coords
return emto_basis
def make_sites_array(self, struct):
len_basis = struct.num_sites
emto_sites = []
for i in range(len_basis):
emto_sites.append(struct.sites[i].specie.number)
return emto_sites
def make_cpa_sites_array(self, struct):
len_basis = struct.num_sites
self.atoms_cpa = []
self.concs_cpa = []
self.splts_cpa = []
self.fixs_cpa = []
for i in range(len_basis):
atom_number = struct.sites[i].specie.number
for j in range(len(self.pmg_species)):
if atom_number == self.pmg_species[j]:
self.atoms_cpa.append(self.species[j])
self.concs_cpa.append(self.concs[j])
self.splts_cpa.append(self.splts[j])
self.fixs_cpa.append(self.fixs[j])
break
def get_equivalent_sites(self):
"""Find all the sites that have exactly the same species,
concentrations, and magnetic moments"""
splt_tol = 1e-6
conc_tol = 1e-6
species_sorted = []
splts_sorted = []
concs_sorted = []
for i in range(len(self.species)):
tmp1 = []
tmp2 = []
tmp3 = []
ind_sorted = np.argsort(self.species[i])
for ind in ind_sorted:
tmp1.append(self.species[i][ind])
tmp2.append(self.splts[i][ind])
tmp3.append(self.concs[i][ind])
species_sorted.append(tmp1)
splts_sorted.append(tmp2)
concs_sorted.append(tmp3)
eqv_sites = np.zeros((len(species_sorted), len(species_sorted)), dtype=np.int) + 9999
for i in range(len(species_sorted)-1):
for j in range(i+1, len(species_sorted)):
eqv_sites[i,j] = 1
if len(species_sorted[i]) != len(species_sorted[j]):
# Sites i and j contain different amound of atoms.
# For now, take them to be non-equivalent, although
# they could still be equivalent in the case that
# some element has been split into two or more parts
# concentration-wise (whole and the parts should have
# identical magnetic moments).
eqv_sites[i, j] = 0
else:
for a1, a2, splt1, splt2, conc1, conc2 in zip(species_sorted[i], species_sorted[j],
splts_sorted[i], splts_sorted[j], concs_sorted[i], concs_sorted[j]):
if a1 != a2 or np.abs(splt1 - splt2) > splt_tol or np.abs(conc1 - conc2) > conc_tol:
# Some pair of atoms (in the sorted lists) were not
# the same => sites i and j are not equivalent.
eqv_sites[i, j] = 0
break
output_sites = np.ones(len(species_sorted), dtype=np.int) * 9999
next_available = 1
for i in range(len(species_sorted)-1):
if output_sites[i] == 9999:
output_sites[i] = next_available
next_available += 1
for j in range(i+1, len(species_sorted)):
if eqv_sites[i, j] == 1:
output_sites[j] = output_sites[i]
if output_sites[-1] == 9999:
output_sites[-1] = next_available
return output_sites
def prepare_input_files(self, prims=None, basis=None, latpath=None,
coords_are_cartesian=False, latname=None,
species=None, find_primitive=True,
concs=None, splts=None, its=None, ws_wsts=None,
make_supercell=None, fixs=None,
**kwargs):
if prims is None:
sys.exit('EMTO.init_structure(): \'prims\' has to be given!')
if basis is None:
sys.exit('EMTO.init_structure(): \'basis\' has to be given!')
if latpath is None:
self.latpath = os.getcwd()
else:
self.latpath = latpath
if latname is None:
self.latname = 'structure'
else:
self.latname = latname
self.prims = np.array(prims)
self.basis = np.array(basis)
self.len_basis = len(self.basis[:, 0])
if species is None:
sys.exit('EMTO.init_structure(): \'species\' has to be given!')
else:
self.species = []
for i in range(len(species)):
if isinstance(species[i], list):
tmp = []
for j in range(len(species[i])):
tmp.append(species[i][j])
self.species.append(tmp)
else:
self.species.append([species[i]])
if splts is None:
# Assume a zero moments array
self.splts = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(0.0)
self.splts.append(tmp)
else:
self.splts.append([0.0])
else:
self.splts = []
for i in range(len(splts)):
if isinstance(splts[i], list):
tmp = []
for j in range(len(splts[i])):
tmp.append(splts[i][j])
self.splts.append(tmp)
else:
self.splts.append([splts[i]])
if fixs is None:
# Assume a zero moments array
self.fixs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append('N')
self.fixs.append(tmp)
else:
self.fixs.append(['N'])
else:
self.fixs = []
for i in range(len(fixs)):
if isinstance(fixs[i], list):
tmp = []
for j in range(len(fixs[i])):
tmp.append(fixs[i][j])
self.fixs.append(tmp)
else:
self.fixs.append([fixs[i]])
if concs is None:
# Assume a zero moments array
self.concs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(1.0/len(self.species[i]))
self.concs.append(tmp)
else:
self.concs.append([1.0])
else:
self.concs = []
for i in range(len(concs)):
if isinstance(concs[i], list):
tmp = []
tmp_sum = 0.0
for j in range(len(concs[i])):
tmp.append(concs[i][j])
tmp_sum += concs[i][j]
print(tmp_sum)
if tmp_sum < 1.1:
if np.abs(tmp_sum - 1.0) > 1.e-6:
sys.exit('Concentrations {0} for site {1} do not add up to 1.0!!!'.format(concs[i], i+1))
else:
if np.abs(tmp_sum - 100.0) > 1.e-3:
sys.exit('Concentrations {0} for site {1} do not add up to 100!!!'.format(concs[i], i+1))
self.concs.append(tmp)
else:
self.concs.append([concs[i]])
# Check that all species, concs, and splts arrays have the same dimensions
for a, b in combinations([self.basis, self.species, self.concs, self.splts, self.fixs], 2):
if len(a) != len(b):
print(a, 'len = ', len(a))
print(b, 'len = ', len(b))
sys.exit('The above input arrays have inconsistent lengths!!!')
for a, b in combinations([self.species, self.concs, self.splts, self.fixs], 2):
for sublist1, sublist2 in zip(a, b):
if len(sublist1) != len(sublist2):
print(sublist1, 'len = ', len(sublist1))
print(sublist2, 'len = ', len(sublist2))
sys.exit('The above input array elements have inconsistent lengths!!!')
self.find_primitive = find_primitive
if self.find_primitive:
self.pmg_species = self.get_equivalent_sites()
else:
self.pmg_species = np.linspace(1, len(self.species), len(self.species), dtype=np.int)
#
self.coords_are_cartesian = coords_are_cartesian
self.ibz = None
self.make_supercell = make_supercell
#
self.pmg_input_lattice = Lattice(self.prims)
self.pmg_input_struct = Structure(self.pmg_input_lattice, self.pmg_species, self.basis,
coords_are_cartesian=self.coords_are_cartesian)
#
if self.make_supercell is not None:
self.pmg_input_struct.make_supercell(self.make_supercell)
#
self.sws = self.calc_ws_radius(self.pmg_input_struct)
#
self.finder = SpacegroupAnalyzer(self.pmg_input_struct, symprec=0.0001, angle_tolerance=0.0001)
self.stm = StructureMatcher(ltol=0.001, stol=0.001, angle_tol=0.001, attempt_supercell=True)
#
print("Input structure information:")
print(self.pmg_input_struct)
print("Volume: ", self.pmg_input_struct.volume)
print("Lattice vectors:")
print(self.pmg_input_struct.lattice.matrix)
print("")
#
# spglib
spg_cell = (
self.pmg_input_lattice.matrix,
self.pmg_input_struct.frac_coords,
self.pmg_species
)
self.spg_space_group = spg.get_spacegroup(spg_cell)
self.spg_space_group_number = int(self.spg_space_group.split()[-1].lstrip('(').rstrip(')'))
self.spg_space_group_symbol = self.spg_space_group
self.spg_prim_lat, self.spg_prim_pos, self.spg_prim_species = spg.standardize_cell(spg_cell,
to_primitive=True)
self.prim_struct = Structure(Lattice(self.spg_prim_lat), self.spg_prim_species, self.spg_prim_pos)
self.spg_ibz = self.sg2ibz[self.spg_space_group_number]
self.ibz = self.spg_ibz
mesh = [kwargs['nkx'], kwargs['nky'], kwargs['nkz']]
#print()
#print('#'*60)
mapping, grid = spg.get_ir_reciprocal_mesh(mesh, spg_cell, is_time_reversal=True, is_shift=(0, 0, 0))
uniques, counts = np.unique(mapping, return_counts=True)
all_weights = []
kpoints = []
weights = []
for xx in mapping:
all_weights.append(counts[np.argwhere(uniques == xx).flatten()[0]])
for xx, yy in zip(uniques, counts):
kpoints.append(grid[np.argwhere(mapping == xx).flatten()[0]])
weights.append(yy)
#for xx, yy, zz in zip(mapping, grid, all_weights):
# print(xx, yy, zz)
#print()
#for kp, ww in zip(kpoints, weights):
# print(kp, ww)
#print()
#print('NKVEC = ', len(kpoints))
#print('#'*60)
#print()
#print(spg_prim_pos)
#print(spg_prim_species)
#
#print("Detected standard conventional structure:")
#print(self.conv_struct)
#print("Volume: ",self.conv_struct.volume)
#print("Lattice vectors:")
#print(self.conv_struct.lattice.matrix)
#print("")
print("Detected standardized structure:")
print(self.prim_struct)
print("Volume: ", self.prim_struct.volume)
print("Lattice vectors:")
print(self.prim_struct.lattice.matrix)
print("")
#
self.primaa = self.prim_struct.lattice.matrix[0, :]
self.primbb = self.prim_struct.lattice.matrix[1, :]
self.primcc = self.prim_struct.lattice.matrix[2, :]
self.output_basis = self.make_basis_array(self.prim_struct)
# Below we calculate the transformation that maps
# self.primaX to lattice vectors used by EMTO.
# This transform depends on the type of the Bravais lattice,
# so each case must be treated separately.
if self.spg_ibz == 1:
norm_tmp = np.linalg.norm(self.primaa)
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1, 0, 0])
self.emto_primb = np.array([0, 1, 0])
self.emto_primc = np.array([0, 0, 1])
self.emto_basis = self.output_basis
elif self.spg_ibz == 2:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, 0])
self.emto_primb = np.array([0, 0.5, 0.5])
self.emto_primc = np.array([0.5, 0, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 3:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, -0.5])
self.emto_primb = np.array([-0.5, 0.5, 0.5])
self.emto_primc = np.array([0.5, -0.5, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 4:
rot1 = rotation_matrix([0.0, 0.0, 1.0], 0./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([1., 0, 0])
self.emto_primb = np.array([-0.5, np.sqrt(3.)/2, 0])
self.emto_primc = np.array([0., 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 5:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, 1.0, 0.0])
self.emto_primc = np.array([0.0, 0.0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 6:
self.output_prima = self.primbb
self.output_primb = self.primcc
self.output_primc = self.primaa
# Apply transformation on the basis atoms
self.output_basis = self.output_basis
self.output_boa = 0.0
self.output_coa = 2*self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -0.5, self.output_coa/2])
self.emto_primb = np.array([0.5, 0.5, -self.output_coa/2])
self.emto_primc = np.array([-0.5, 0.5, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 7:
alpha = self.prim_struct.lattice.alpha
kulma = np.arctan((self.primaa[0]+self.primbb[0]+self.primcc[0])/
(self.primaa[2]+self.primbb[2]+self.primcc[2]))
rot1 = rotation_matrix([0.0, -1.0, 0.0], kulma)
rot2 = np.array([[-np.sqrt(3.0)/2, -0.5, 0.0],
[0.5, -np.sqrt(3.0)/2, 0.0],
[0.0, 0.0, 1.0]])
self.output_prima = np.dot(rot2, np.dot(rot1, self.primaa))
self.output_primb = np.dot(rot2, np.dot(rot1, self.primbb))
self.output_primc = np.dot(rot2, np.dot(rot1, self.primcc))
scale_a = self.output_prima[1]
print('scale_a = ',scale_a)
self.output_prima = self.output_prima/scale_a
self.output_primb = self.output_primb/scale_a
self.output_primc = self.output_primc/scale_a
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot2, np.dot(rot1, self.output_basis[i, :]))/scale_a
self.output_boa = 1.0
self.output_coa = self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.0, 1.0, self.output_coa])
self.emto_primb = np.array([-np.sqrt(3.)/2, -0.5, self.output_coa])
self.emto_primc = np.array([np.sqrt(3.)/2, -0.5, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 8:
if (np.abs(self.primaa[0]) < np.abs(self.primbb[1])) and \
(np.abs(self.primbb[1]) < np.abs(self.primcc[2])):
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
elif np.abs(np.abs(self.primaa[0]) - np.abs(self.primbb[1])) < 1.e-6 and \
np.abs(self.primbb[1]) < np.abs(self.primcc[2]):
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
elif np.abs(self.primaa[0]) < np.abs(self.primcc[2]):
norm_tmp = self.primcc[2]
rot1 = rotation_matrix([0.0, 0.0, 1.0], -90./180*np.pi)
rot2 = rotation_matrix([-1.0, 0.0, 0.0], 90./180*np.pi)
self.output_prima = np.dot(rot2, np.dot(rot1, self.primbb))/norm_tmp
self.output_primb = np.dot(rot2, np.dot(rot1, self.primcc))/norm_tmp
self.output_primc = np.dot(rot2, np.dot(rot1, self.primaa))/norm_tmp
print(self.output_prima)
print(self.output_primb)
print(self.output_primc)
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot2, np.dot(rot1, self.output_basis[i, :]))/norm_tmp
else:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
#
self.output_boa = self.output_primb[1]
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, self.output_boa, 0.0])
self.emto_primc = np.array([0.0, 0.0 ,self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 9:
if np.abs(self.primbb[1] - 0.5) < 1e-12 and \
np.abs(self.primcc[1] + 0.5) < 1e-12:
rot1 = rotation_matrix([0.0, 1.0, 0.0], 90./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
# Redefine lattice vectors
tmp = np.copy(self.output_prima)
self.output_prima[:] = self.output_primc[:]
self.output_primc[:] = tmp
# Mirror along the xy-plane
self.output_primc *= -1
# Scale lattice vectors so that a1 and a2 x-components are 0.5
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot1, self.output_basis[i, :])
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,2] *= -1
self.output_basis /= norm_tmp
#print(self.output_prima)
#print(self.output_primb)
#print(self.output_primc)
else:
norm_tmp = 2*self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 2*self.output_primb[1]
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([0.5, -self.output_boa/2, 0])
self.emto_primb = np.array([0.5, self.output_boa/2, 0])
self.emto_primc = np.array([0, 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 10:
self.output_prima = np.zeros_like(self.primaa)
self.output_primb = np.zeros_like(self.primbb)
self.output_primc = np.zeros_like(self.primcc)
self.output_prima[0] = self.primaa[1]
self.output_prima[1] = self.primaa[0]
self.output_prima[2] = self.primaa[2]
self.output_primb[0] = self.primcc[1]
self.output_primb[1] = self.primcc[0]
self.output_primb[2] = self.primcc[2]
self.output_primc[0] = self.primbb[1]
self.output_primc[1] = self.primbb[0]
self.output_primc[2] = self.primbb[2]
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
basis_tmp = np.copy(self.output_basis)
self.output_basis[:, 0] = basis_tmp[:, 1]
self.output_basis[:, 1] = basis_tmp[:, 0]
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 2*self.output_primc[1]
self.output_coa = 2*self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -self.output_boa/2, self.output_coa/2])
self.emto_primb = np.array([0.5, self.output_boa/2, -self.output_coa/2])
self.emto_primc = np.array([-0.5, self.output_boa/2, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 11:
rot1 = rotation_matrix([1, 1, 1], 120./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_basis /= norm_tmp
self.output_boa = 2*self.output_primc[1]
self.output_coa = 2*self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([0.5, 0, self.output_coa/2])
self.emto_primb = np.array([0.5, self.output_boa/2, 0])
self.emto_primc = np.array([0, self.output_boa/2, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 12:
bc_norm = np.linalg.norm(self.primaa)
# Rotate 90 degreen counter clockwise around the x-axis
rot1 = rotation_matrix([1, 0, 0], -90./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa/bc_norm)
self.output_primb = np.dot(rot1, self.primcc/bc_norm)
self.output_primc = np.dot(rot1, self.primbb/bc_norm)
# Mirror a3 from negative z-axis to positive side
self.output_primc *= -1.0
# spg uses gamma > 90, so we redefine the a3 lattice vector so that
# gamma < 90:
self.output_primb[0] *= -1.0
gamma = get_angle(self.output_prima, self.output_primb)
y_fac = self.output_primb[1]
shift = np.abs(2*self.output_primb[0])
#
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])/bc_norm
# Transform basis because self.output_primc was mirrored:
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, 2] *= -1.0
# Transform basis because gamma was changed above:
for i in range(len(self.output_basis[:, 0])):
#self.output_basis[i, :] = np.dot(shift_mat, self.output_basis[i, :])
if self.output_basis[i, 1] > 0:
self.output_basis[i, 0] += shift * np.abs(self.output_basis[i, 1] / y_fac)
else:
self.output_basis[i, 0] -= shift * np.abs(self.output_basis[i, 1] / y_fac)
self.output_boa = np.linalg.norm(self.output_primb)
self.output_coa = np.linalg.norm(self.output_primc)
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = gamma
self.emto_prima = np.array([1.0, 0, 0])
self.emto_primb = np.array([self.output_boa*np.cos(np.radians(self.output_gamma)),
self.output_boa*np.sin(np.radians(self.output_gamma)), 0])
self.emto_primc = np.array([0, 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 13:
gamma = get_angle(self.primcc, self.primaa+self.primbb)
switch_x_y = np.array([[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
rot1 = np.array([[1.0,0.0,0.0],
[0.0,np.cos(np.radians(180-gamma)),-np.sin(np.radians(180-gamma))],
[0.0,np.sin(np.radians(180-gamma)),np.cos(np.radians(180-gamma))]])
rot2 = np.array([[0.0,0.0,1.0],
[0.0,1.0,0.0],
[-1.0,0.0,0.0]])
bc_norm = np.linalg.norm(self.primaa+self.primbb)
self.output_prima = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primcc)))/bc_norm
self.output_primb = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primaa)))/bc_norm
self.output_primc = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primbb)))/bc_norm
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.output_basis[i, :])))/bc_norm
self.output_boa = np.abs(self.output_prima[1])
self.output_coa = np.abs(2*self.output_primc[2])
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = gamma
self.emto_prima = np.array([0.0, -self.output_boa, 0])
self.emto_primb = np.array([0.5*np.sin(np.radians(self.output_gamma)),
-0.5*np.cos(np.radians(self.output_gamma)),
-self.output_coa/2])
self.emto_primc = np.array([0.5*np.sin(np.radians(self.output_gamma)),
-0.5*np.cos(np.radians(self.output_gamma)),
self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 14:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
# This could be tested, should be OK:
#self.output_boa = np.sqrt(self.output_primb[0]**2+self.output_primb[1]**2)
self.output_boa = self.prim_struct.lattice.b/self.prim_struct.lattice.a
self.output_coa = self.prim_struct.lattice.c/self.prim_struct.lattice.a
self.output_alpha = self.prim_struct.lattice.alpha
self.output_beta = self.prim_struct.lattice.beta
self.output_gamma = self.prim_struct.lattice.gamma
self.emto_prima = np.array([1.0, 0, 0])
self.emto_primb = np.array([self.output_boa*np.cos(np.radians(self.output_gamma)),
self.output_boa*np.sin(np.radians(self.output_gamma)),
0])
self.emto_primc = np.array([self.output_coa*np.cos(np.radians(self.output_beta)),
self.output_coa*(np.cos(np.radians(self.output_alpha)) -
np.cos(np.radians(self.output_beta)) *
np.cos(np.radians(self.output_gamma))) / np.sin(np.radians(self.output_gamma)),
self.output_coa*np.sqrt(1 - np.cos(np.radians(self.output_gamma))**2 -
np.cos(np.radians(self.output_alpha))**2 -
np.cos(np.radians(self.output_beta))**2 +
2*np.cos(np.radians(self.output_alpha))*
np.cos(np.radians(self.output_beta))*
np.cos(np.radians(self.output_gamma)))/np.sin(np.radians(self.output_gamma))])
self.emto_basis = self.output_basis
self.output_sites = self.make_sites_array(self.prim_struct)
self.output_lattice = Lattice(np.array([self.emto_prima, self.emto_primb, self.emto_primc]))
self.output_struct = Structure(self.output_lattice, self.output_sites,
self.emto_basis, coords_are_cartesian=True)
#
# Print EMTO structure information
print("")
print("Generated EMTO structure:")
print(self.output_struct)
print("Volume: ", self.output_struct.volume)
print("WS-rad: ", self.sws)
print("Lattice vectors:")
print(self.output_struct.lattice.matrix)
print("Basis vectors:")
for i in range(len(self.output_struct.sites)):
print(self.output_struct.sites[i].coords)
print("")
# Print symmetry information
print("spglib reports the following information:")
print("The spacegroup symbol of input structure: {}".format(self.spg_space_group))
print("The spacegroup number of input structure: {}".format(self.spg_space_group_number))
print("The Bravais lattice of input structure : {}".format(self.sg2bl[self.spg_space_group_number]))
print("Number of basis atoms : {}".format(self.prim_struct.num_sites))
print("EMTO IBZ : {}".format(self.spg_ibz))
print("")
emto_cell = (
self.output_lattice.matrix,
self.output_struct.frac_coords,
self.output_sites
)
self.emto_space_group = spg.get_spacegroup(emto_cell)
self.emto_space_group_number = int(self.emto_space_group.split()[-1].lstrip('(').rstrip(')'))
self.emto_space_group_symbol = self.emto_space_group
self.emto_prim_lat, self.emto_prim_pos, self.emto_prim_species = spg.standardize_cell(emto_cell, to_primitive=True)
self.emto_struct = Structure(Lattice(self.emto_prim_lat), self.emto_prim_species, self.emto_prim_pos)
self.emto_ibz = self.sg2ibz[self.emto_space_group_number]
print("spglib reports the following information:")
print("The spacegroup symbol of EMTO structure : {}".format(self.emto_space_group))
print("The spacegroup number of EMTO structure : {}".format(self.emto_space_group_number))
print("The Bravais lattice of EMTO structure : {}".format(self.sg2bl[self.emto_space_group_number]))
print("Number of basis atoms : {}".format(self.output_struct.num_sites))
print("EMTO IBZ : {}".format(self.emto_ibz))
print("")
print(self.prim_struct)
print(self.emto_struct)
should_exit = False
if (self.spg_space_group != self.emto_space_group):
print("Input and output spacegroups are different!!!")
should_exit = True
if (self.spg_ibz != self.emto_ibz):
print("Input and output IBZ are different!!!")
should_exit = True
if should_exit:
sys.exit("Structure conversion went wrong! Check the symmetry information above.")
#
fitted_angles = [get_angle(self.output_prima, self.emto_prima),
get_angle(self.output_primb, self.emto_primb),
get_angle(self.output_primc, self.emto_primc)]
for i, angle in enumerate(fitted_angles):
#print(angle)
if angle > self.fit_angle_tol:
sys.exit('Error: Angle between lattice vectors {0} is {1} > {2}!!!'.format(i+1, angle, self.fit_angle_tol))
fitted_ratios = [np.linalg.norm(self.output_prima) / np.linalg.norm(self.emto_prima),
np.linalg.norm(self.output_primb) / np.linalg.norm(self.emto_primb),
|
np.linalg.norm(self.output_primc)
|
numpy.linalg.norm
|
# -*- coding: utf-8 -*-
import numpy as np
# Ornstein-Ulhenbeck Process
# Source: https://github.com/vitchyr/rlkit/blob/5274672e9ff6481def0ffed61cd1b1c52210a840/rlkit/exploration_strategies/ou_strategy.py#L7 # noqa: E501
class OUNoise:
def __init__(
self,
action_dim: int,
mu: float = 0.0,
theta: float = 0.15,
max_sigma: float = 0.3,
min_sigma: float = 0.3,
decay_period: int = 100000,
):
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
self.action_dim = action_dim
self.low = -1
self.high = 1
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
self.state = x + dx
return self.state
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(
1.0, t / self.decay_period
)
return np.clip(action + ou_state, self.low, self.high)
# Plotting utilities from OpenAI baselines package
# Source: https://github.com/openai/baselines/blob/master/baselines/common/plot_util.py
# Plotting utility to smooth out a noisy series
def smooth(y, radius, mode='two_sided', valid_only=False):
"""
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
"""
assert mode in ('two_sided', 'causal')
if len(y) < 2 * radius + 1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius + 1)
out = np.convolve(y, convkernel, mode='same') / np.convolve(
|
np.ones_like(y)
|
numpy.ones_like
|
import unittest
from inferelator.single_cell_workflow import SingleCellWorkflow
from inferelator.preprocessing import single_cell, metadata_parser
from inferelator.tests.artifacts.test_stubs import TestDataSingleCellLike, create_puppet_workflow, TEST_DATA
from inferelator import default
from inferelator.utils import InferelatorData
import numpy as np
import pandas as pd
import os
my_dir = os.path.dirname(__file__)
test_count_data = pd.DataFrame([[0, 0, 0], [10, 0, 10], [4, 0, 5], [0, 0, 0]])
test_meta_data = metadata_parser.MetadataParserBranching.create_default_meta_data(test_count_data.index)
class SingleCellTestCase(unittest.TestCase):
def setUp(self):
self.data = TEST_DATA.copy()
self.prior = TestDataSingleCellLike.priors_data
self.gold_standard = self.prior.copy()
self.tf_names = TestDataSingleCellLike.tf_names
self.workflow = create_puppet_workflow(base_class=SingleCellWorkflow)(self.data, self.prior, self.gold_standard)
self.gene_data = TestDataSingleCellLike.gene_metadata
self.gene_list_index = TestDataSingleCellLike.gene_list_index
class SingleCellPreprocessTest(SingleCellTestCase):
def test_count_filter(self):
expr_filtered_1 = self.data.copy()
single_cell.filter_genes_for_count(expr_filtered_1)
self.assertEqual(expr_filtered_1.gene_names.tolist(), ["gene1", "gene2", "gene4", "gene6"])
expr_filtered_2 = self.data.copy()
single_cell.filter_genes_for_count(expr_filtered_2, count_minimum=4)
self.assertEqual(expr_filtered_2.gene_names.tolist(), ["gene1", "gene2", "gene4"])
expr_filtered_3 = self.data.copy()
single_cell.filter_genes_for_count(expr_filtered_3, count_minimum=20)
self.assertEqual(expr_filtered_3.gene_names.tolist(), ["gene2"])
with self.assertRaises(ValueError):
self.data.subtract(3)
single_cell.filter_genes_for_count(self.data, count_minimum=1)
def test_library_to_one_norm(self):
single_cell.normalize_expression_to_one(self.data)
np.testing.assert_almost_equal(self.data.expression_data.sum(axis=1).tolist(), [1] * 10)
def test_median_scaling_norm(self):
data = self.data.copy()
single_cell.normalize_medians_for_batch(data, batch_factor_column="Condition")
data.meta_data['umi'] = data.expression_data.sum(axis=1)
np.testing.assert_almost_equal(data.meta_data.groupby("Condition")['umi'].median().tolist(), [45, 45, 45])
data = self.data.copy()
single_cell.normalize_medians_for_batch(data, batch_factor_column="Genotype")
data.meta_data['umi'] = data.expression_data.sum(axis=1)
np.testing.assert_almost_equal(data.meta_data.groupby("Genotype")['umi'].median().tolist(), [45])
def test_size_factor_scaling_norm(self):
single_cell.normalize_sizes_within_batch(self.data, batch_factor_column="Condition")
test_umi = pd.Series({"A": 45.0, "B": 36.0, "C": 58.5})
meta_data1 = self.data.meta_data
meta_data1['umi'] = np.sum(self.data.expression_data, axis=1)
for group in meta_data1['Condition'].unique():
idx = meta_data1['Condition'] == group
np.testing.assert_almost_equal(meta_data1.loc[idx, 'umi'].tolist(), [test_umi[group]] * idx.sum(),
decimal=4)
def test_log_scaling(self):
data = self.data.copy()
single_cell.log10_data(data)
np.testing.assert_almost_equal(np.log10(self.data.expression_data + 1), data.expression_data)
data = self.data.copy()
single_cell.log2_data(data)
np.testing.assert_almost_equal(np.log2(self.data.expression_data + 1), data.expression_data)
data = self.data.copy()
single_cell.ln_data(data)
np.testing.assert_almost_equal(np.log(self.data.expression_data + 1), data.expression_data)
data = self.data.copy()
single_cell.tf_sqrt_data(data)
np.testing.assert_almost_equal(np.sqrt(self.data.expression_data + 1) +
|
np.sqrt(self.data.expression_data)
|
numpy.sqrt
|
# --------------------------------------------------------
# Sparse Steerable Convolutions
# Dataloder of LineMOD dataset
# Modified from https://github.com/j96w/DenseFusion by <NAME>
# --------------------------------------------------------
import os
import numpy as np
import numpy.ma as ma
import math
import random
import yaml
from PIL import Image
from transforms3d.euler import euler2mat
import torch
import torchvision.transforms as transforms
class LinemodDataset():
def __init__(self, mode, cfg, root='Linemod_preprocessed'):
self.npoint = cfg.npoint
self.num_img_per_epoch = cfg.num_img_per_epoch
self.total_voxel_extent = cfg.total_voxel_extent
self.mode = mode
self.root = root
self.objlist = [1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]
self.list_rgb = []
self.list_depth = []
self.list_label = []
self.list_obj = []
self.list_rank = []
self.meta = {}
self.pt = {}
item_count = 0
for item in self.objlist:
if self.mode == 'train':
input_file = open('{0}/data/{1}/train.txt'.format(self.root, '%02d' % item))
else:
input_file = open('{0}/data/{1}/test.txt'.format(self.root, '%02d' % item))
while 1:
item_count += 1
input_line = input_file.readline()
if self.mode == 'test' and item_count % 10 != 0:
continue
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
self.list_rgb.append('{0}/data/{1}/rgb/{2}.png'.format(self.root, '%02d' % item, input_line))
self.list_depth.append('{0}/data/{1}/depth/{2}.png'.format(self.root, '%02d' % item, input_line))
if self.mode == 'eval':
self.list_label.append('{0}/segnet_results/{1}_label/{2}_label.png'.format(self.root, '%02d' % item, input_line))
else:
self.list_label.append('{0}/data/{1}/mask/{2}.png'.format(self.root, '%02d' % item, input_line))
self.list_obj.append(item)
self.list_rank.append(int(input_line))
meta_file = open('{0}/data/{1}/gt.yml'.format(self.root, '%02d' % item), 'r')
self.meta[item] = yaml.load(meta_file, Loader=yaml.FullLoader)
self.pt[item] = self._ply_vtx('{0}/models/obj_{1}.ply'.format(self.root, '%02d' % item))
print("Object {0} buffer loaded".format(item))
self.length = len(self.list_rgb)
self.img_index = np.arange(self.length)
print("Total img num: {}".format(len(self.list_rgb)))
self.cam_cx = 325.26110
self.cam_cy = 242.04899
self.cam_fx = 572.41140
self.cam_fy = 573.57043
self.xmap = np.array([[j for i in range(640)] for j in range(480)])
self.ymap = np.array([[i for i in range(640)] for j in range(480)])
# self.img_width = 480
# self.img_length = 640
self.border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
self.num_pt_mesh_large = 500
self.num_pt_mesh_small = 500
self.symmetry_obj_idx = [7, 8]
def __len__(self):
if self.mode == 'train' and self.num_img_per_epoch != -1:
return self.num_img_per_epoch
else:
return self.length
def reset(self):
if self.mode == 'train':
valid_len = self.length
required_len = self.__len__()
if required_len >= valid_len:
self.img_index = np.random.choice(valid_len, required_len)
else:
self.img_index = np.random.choice(valid_len, required_len, replace=False)
else:
self.img_index = np.arange(self.length)
def __getitem__(self, index):
idx = self.img_index[index]
img = Image.open(self.list_rgb[idx])
depth = np.array(Image.open(self.list_depth[idx]))
label = np.array(Image.open(self.list_label[idx]))
obj = self.list_obj[idx]
rank = self.list_rank[idx]
if obj == 2:
for i in range(0, len(self.meta[obj][rank])):
if self.meta[obj][rank][i]['obj_id'] == 2:
meta = self.meta[obj][rank][i]
break
else:
meta = self.meta[obj][rank][0]
# mask
mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
if self.mode == 'eval':
mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
else:
mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
mask = mask_label * mask_depth
rmin, rmax, cmin, cmax = self._get_bbox(meta['obj_bb'])
# choose
choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
if self.mode == 'train' and len(choose)<32:
index = np.random.randint(self.__len__())
return self.__getitem__(index)
if len(choose)==0:
return self._return_unvalid_output()
# gt
target_r = np.resize(np.array(meta['cam_R_m2c']), (3, 3))
target_t = np.array(meta['cam_t_m2c']) / 1000.0
# img
img = np.array(img)[:, :, :3]
img = img[rmin:rmax, cmin:cmax, :].astype(np.float32).reshape((-1, 3))[choose, :]
img = img/255.0 - np.array([0.485, 0.456, 0.406])[np.newaxis,:]
# pts
depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
cam_scale = 1.0
pt2 = depth_masked / cam_scale
pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
cloud = np.concatenate((pt0, pt1, pt2), axis=1)
cloud = cloud / 1000.0
centroid = np.mean(cloud, axis=0)
cloud = cloud - centroid[np.newaxis, :]
target_t = target_t - centroid
if self.mode == 'train':
a1 = np.random.uniform(-math.pi/36.0, math.pi/36.0)
a2 = np.random.uniform(-math.pi/36.0, math.pi/36.0)
a3 = np.random.uniform(-math.pi/36.0, math.pi/36.0)
aug_r = euler2mat(a1, a2, a3)
cloud = (cloud - target_t[np.newaxis, :]) @ target_r
target_t = target_t + np.array([random.uniform(-0.02, 0.02) for i in range(3)])
target_r = target_r @ aug_r
cloud = cloud @ target_r.T + target_t[np.newaxis, :]
# point selection
valid_idx = (
|
np.abs(cloud[:,0])
|
numpy.abs
|
"""
Tools for in-depth analysis of SUNTANS output
Includes:
- Volume and tracer budget calculations
- ...
<NAME>
Stanford University
March 2014
"""
from .sunpy import Spatial
from .sunslice import MultiSliceEdge
import sfoda.utils.mypandas as mpd
from sfoda.utils.timeseries import timeseries
from sfoda.utils.maptools import maskShpPoly
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta
from netCDF4 import Dataset
from scipy import sparse
import os
import pandas as pd
import pdb
# Global constants
RHO0 = 1000.
Cp = 4186 # specific heat of seawater
GRAV = 9.81
class Energetics(Spatial):
fluxvar = 'U_F' # U or U_F
etavar='eta_avg' # 'eta_avg' or 'eta'
verbose=False
def __init__(self,ncfile,**kwargs):
"""
Calculates the energy variables from suntans output
"""
# Initialize the spatial class
Spatial.__init__(self,ncfile,klayer=[-99])
def __call__(self,tstep,cellindex=None):
"""
Calculate the terms for tstep
"""
if self.verbose: print('Calculating energy at time step: %d'%tstep)
if cellindex==None:
self.cellindex=list(range(self.Nc))
else:
self.cellindex=cellindex
self.tstep=[tstep]
###
# Step 1: Load the flux variable and the vertical depths
# These are needed for depth integrals and upwind calculations
###
if self.verbose: print('Loading raw model data...')
self.dzf = self.loadData(variable='dzf')
# dzf is calculated using max free surface height
self.dzz = self.loadData(variable='dzz')
self.u=self.loadData(variable=self.fluxvar)
if self.fluxvar=='U':
if self.verbose: print('Calculating U to flow rate...')
#TBC
# Load the cell variable used by others at all depth
self.eta = self.loadData(variable=self.etavar)
self.uc = self.loadData(variable='uc')
self.vc = self.loadData(variable='vc')
self.buoyancy = self.loadData(variable='buoyancy')
self.nu_v = self.loadData(variable='nu_v')
if self.hasVar('kappa_tv'):
self.kappa_tv = self.loadData(variable='kappa_tv')
else:
self.kappa_tv = self.nu_v
# Make sure that all variables = 0 in masked regions...
# (mask does not work with all operations)
self.u[self.u.mask]=0
self.uc[self.uc.mask]=0
self.vc[self.vc.mask]=0
self.buoyancy[self.buoyancy.mask]=0
self.nu_v[self.nu_v.mask]=0
self.kappa_tv[self.kappa_tv.mask]=0
# Put all of the terms in a dictionary called... energy
self.energy={}
###
# Term: Vertical PE flux
if self.verbose: print('Calculating vertical buoyancy flux...')
self.energy.update({'B_flux':self.calc_buoyflux()})
###
# Term: Wind work
if self.verbose: print('Calculating the wind work...')
self.energy.update({'W_work':self.calc_windwork()})
###
# Depth integrated KE and PE
if self.verbose: print('Calculating energy...')
self.KE = self.calc_KE(u=self.uc,v=self.vc)
self.energy.update({'KE':self.depthint(self.KE,dz=self.dzz)})
self.PE = self.calc_PE(b=self.buoyancy)
self.energy.update({'PE':self.depthint(self.PE,dz=self.dzz)})
###
# Dissipation
if self.verbose: print('Calculating dissipation...')
self.energy.update({'diss':self.calc_dissipation()})
###
# Flux terms
if self.verbose: print('Calculating energy flux divergence terms...')
# Pressure work flux
self.energy.update({'uKE':self.calc_fluxdivergence(self.KE)})
self.energy.update({'uP':self.calc_Pworkflux()})
self.energy.update({'uPE':self.calc_fluxdivergence(self.PE)})
# Tide only estimate
self.energy.update({'ueta':self.calc_fluxdivergence2d(-self.eta*GRAV)})
def write2netcdf(self,outfile,trange):
"""
Write all time steps in trange to an output file
!! Note that all terms are converted to Wm-2 (multiplied by rho0) !!
!! Divergent terms are divided by cell area (self.Ac) !!!
"""
tstep = list(range(0,self.Nt))[trange[0]:trange[1]]
# Write the output to netcdf
print('Writing the output to netcdf...')
self.writeNC(outfile)
nc = Dataset(outfile,'a')
nc.Title = 'SUNTANS energy output'
nc.close()
# Create the new variable names
self.create_nc_var(outfile, 'time', ('time',),\
{'long_name':'time','units':'seconds since 1990-01-01 00:00:00'})
self.create_nc_var(outfile, 'KEz', ('time','Nc'),\
{'long_name':'Depth-integrated kinetic energy',\
'units':'J m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'PEz', ('time','Nc'),\
{'long_name':'Depth-integrated potential energy',\
'units':'J m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'uP', ('time','Nc'),\
{'long_name':'Depth-integrated pressure work divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'uKE', ('time','Nc'),\
{'long_name':'Depth-integrated kinetic energy flux divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'uPE', ('time','Nc'),\
{'long_name':'Depth-integrated potential energy flux divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'ueta', ('time','Nc'),\
{'long_name':'Depth-integrated tidal energy flux divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'W_work', ('time','Nc'),\
{'long_name':'Wind work',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'B_flux', ('time','Nc'),\
{'long_name':'Turbulent vertical buoyancy flux (KE->PE)',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'diss', ('time','Nc'),\
{'long_name':'Dissipation rate',\
'units':'W m-2','coordinates':'yv xv'})
# Testing variables
self.create_nc_var(outfile, 'S2', ('time','Nk','Nc'),\
{'long_name':'Shear squared',\
'units':'s-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'Pressure', ('time','Nk','Nc'),\
{'long_name':'Pressure',\
'units':'Pa','coordinates':'yv xv'})
# Calculate the energy for each time step and write the output
print('Writing the variable data to netcdf...')
nc = Dataset(outfile,'a')
for ii, tt in enumerate(tstep):
# Call the object to calculate the variables
print('Writing energy for timestep %d of %d...'%(tt,tstep[-1]))
self.__call__(tt)
# Write the variable data out
nc.variables['time'][ii]=self.timeraw[tt]
nc.variables['KEz'][ii,:]=self.energy['KE']*RHO0
nc.variables['PEz'][ii,:]=self.energy['PE']*RHO0
nc.variables['uP'][ii,:]=self.energy['uP']/self.Ac*RHO0
nc.variables['uKE'][ii,:]=self.energy['uKE']/self.Ac*RHO0
nc.variables['uPE'][ii,:]=self.energy['uPE']/self.Ac*RHO0
nc.variables['ueta'][ii,:]=self.energy['ueta']/self.Ac*RHO0
nc.variables['W_work'][ii,:]=self.energy['W_work']*RHO0
nc.variables['B_flux'][ii,:]=self.energy['B_flux']*RHO0
nc.variables['diss'][ii,:]=self.energy['diss']*RHO0
# Testing variables
nc.variables['S2'][ii,:,:]=self.S2
nc.variables['Pressure'][ii,:,:]=self.pressure*RHO0
nc.close()
def gradZ(self,phi,dzz,dzmin=0.01):
"""
Overloaded vertical gradient calculation function
Make sure the calculation is consistent with turbulence.c
Gradients are evaluated at k-1/2
"""
Nc = phi.shape[1]
dphi_dz = np.zeros((self.Nkmax+1,Nc))
#dzz values less than dzmin are set to dzmin
dzz[dzz<dzmin]=dzmin
# Calculate mid-point gradients
dphi_dz[1:-1,:] = 2.0 * (phi[0:-1,:] - phi[1:,:])/ \
(dzz[0:-1,:]+dzz[1:,:])
# Specify the surface gradient the same as the next layer
ctop = self.getctop(self.eta)
j = list(range(Nc))
dphi_dz[ctop[j],j] = dphi_dz[ctop[j]+1,j]
# Specify the seabed gradients
dphi_dz[self.Nk[j]+1,j]=dphi_dz[self.Nk[j],j]
# Return the average at the mid-layer depth
return 0.5*(dphi_dz[1:,:] + dphi_dz[0:-1,:])
def calc_fluxdivergence(self,phi):
"""
Calculates the flux divergece of a cell-centered scalar, phi.
"""
# Map the data onto the edge
phi_e = np.zeros((self.Nkmax,self.Ne))
for k in range(self.Nkmax):
phi_e[k,:] = \
self.get_edgevar(phi[k,:],k=k,U=self.u[k,:],method='upwind')
face = self.face.copy()
normal = 1.0*self.normal # Convert to float
mask = face.mask.copy()
# Create a mask so that tthe masked face values are not included in the
# flucx calculation
facemask = np.zeros((self.Nc,self.maxfaces))
facemask[mask==False]=1.0
face[mask]=0 # Index the mask to the first cell
# (this is multiplied by zero later..)
# Calculate the fluxes at all cells - dimensions: [Nk, Nc, nfaces]
flux_cell = phi_e[...,face] * self.u[...,face] * normal * facemask
# Sum along all faces - dimensions: [Nk, Nc]
flux_div = flux_cell.sum(axis=-1)
# Return the depth integrated divergence
return flux_div.sum(axis=0)
def calc_fluxdivergence2d(self,phi):
"""
Calculates the flux divergece of a cell-centered scalar, phi.
"""
#depth-integrate the flux rate
U = np.sum(self.u,axis=0)
de = self.get_edgevar(self.dv,method='max')
U/=de # Divide by the edge depth (u_bar)
# Map the data onto the edge
phi_e = self.get_edgevar(phi,k=0,U=U,method='upwind')
face = self.face.copy()
normal = 1.0*self.normal # Convert to float
mask = face.mask.copy()
# Create a mask so that tthe masked face values are not included in the
# flucx calculation
facemask = np.zeros((self.Nc,self.maxfaces))
facemask[mask==False]=1.0
face[mask]=0 # Index the mask to the first cell
# (this is multiplied by zero later..)
# Calculate the fluxes at all cells - dimensions: [Nc, nfaces]
flux_cell = phi_e[face] * U[face] * normal * facemask
# Sum along all faces - dimensions: [Nc]
return flux_cell.sum(axis=-1)
def calc_Pworkflux(self):
"""
Calculate the pressure work flux divergence for all grid cells
"""
# Calculate pressure at the mid-point
# Note that this is already normalized by rho0
#rho = self.buoyancy/GRAV*RHO0+RHO0
#self.pressure = self.depthint(-GRAV*rho,dz=self.dzz,cumulative=True)
# Buoyancy only
self.pressure = self.depthint(self.buoyancy,dz=self.dzz,cumulative=True)
# Need to add the free-surface contribution???
# Shouldn't be necessary since dzz varies with the free-surface
#H = self.depthint(self.dzz,dz=self.dzz,cumulative=True) # total depth
#self.pressure += H*GRAV # H = eta - z
self.pressure += self.eta*GRAV
#return self.calc_fluxdivergence(self.pressure/RHO0)
return self.calc_fluxdivergence(self.pressure)
def calc_windwork(self):
"""
Calculate the wind work component
"""
u_surf = self.get_surfacevar(self.uc,self.eta)
v_surf = self.get_surfacevar(self.vc,self.eta)
tau_x = self.loadData(variable='tau_x')
tau_y = self.loadData(variable='tau_y')
return (u_surf*tau_x + v_surf*tau_y)/RHO0
def calc_buoyflux(self):
"""
Calculates the vertical flux of buoyancy:
B_f = K_v * db/dz
Returns the depth-integral.
"""
db_dz = self.gradZ(self.buoyancy,self.dzz)
return self.depthint(self.kappa_tv*db_dz,dz=self.dzz)
def calc_dissipation(self):
r"""
Calculates the depth-integrated dissipation
eps = nu_v * (du/dz^2 + dv/dz^2)
"""
du_dz = self.gradZ(self.uc,self.dzz)
dv_dz = self.gradZ(self.vc,self.dzz)
self.S2 = (du_dz**2 + dv_dz**2)
# Zero the seabed shear - it is too large??
self.S2[self.Nk,list(range(self.Nc))]=0
diss = self.nu_v * self.S2
return self.depthint(diss,dz=self.dzz)
########################
########################
def energy_budget(energyfile,polyfile,trange):
"""
# Area-integrate the energy terms
"""
varnames = ['KEz','PEz','uP','uKE','uPE','ueta','W_work','B_flux','diss']
# Load the energy file as a suntans object
sun = Spatial(energyfile)
# Create the mask
mask,maskpoly = maskShpPoly(sun.xv,sun.yv,polyfile)
# Initialise the output dictionary
tstep = list(range(0,sun.Nt))[trange[0]:trange[1]]
nt = len(tstep)
budget ={}
for vv in varnames:
budget.update({vv:np.zeros((nt,))})
for ii,tt in enumerate(tstep):
print('Area-integrating step: %d of %d...'%(ii,tstep[-1]))
for vv in varnames:
sun.tstep=[tt]
data = sun.loadData(variable=vv)
budget[vv][ii],areatotal = sun.areaint(data,mask)
budget.update({'time':sun.time[tstep]})
# Calculate the time-rate of change of KE and PE
dt = sun.timeraw[1]-sun.timeraw[0]
budget.update({'dKE_dt':np.zeros((nt,))})
budget.update({'dPE_dt':np.zeros((nt,))})
budget['dKE_dt'][1::] = (budget['KEz'][1::]-budget['KEz'][0:-1])/dt
budget['dPE_dt'][1::] = (budget['PEz'][1::]-budget['PEz'][0:-1])/dt
return budget
########################
########################
def calc_avg_budget(sun, trange, cellindex,plot=False):
"""
Calculate the volume, temperature and salt budgets from
an average output file.
These calculations are very specific to the variables
stored in the averages file.
"""
# Load the SUNTANS average file object
sun.klayer=[-99]
#sun = Spatial(avgfile,klayer=[-99])
# Calculate the time dimensions
tstep = list(range(0,sun.Nt))[trange[0]:trange[1]]
nt = len(tstep)
time = sun.time[tstep]
dt = sun.globalatts['dt']*sun.globalatts['ntaverage']
# Remove cells that are next to type-2 or 3 edges here
# ...
#facemark=sun.get_facemark()
#for cc in cellindex:
# if facemark[cc] in [2,3]:
# print 'Removing edge cell index = %d'%cc
# cellindex.remove(cc)
Nc = len(cellindex)
# Calculate some grid variables
area = sun.Ac[cellindex]
sumarea = np.sum(area)
face = sun.face[cellindex,:] # edge pointers for each cell
normal = 1.0*sun.normal[cellindex,:]
# Create a mask so that the masked face values are not included
# in the flux calculations
facemask = np.zeros_like(normal)
facemask[face.mask==False]=1.0
face[face.mask]=0 # Index masked cells to the first cell (this is multiplied
# by zero
# Initialise the output variables
# Sum of fluxes
Mass_f = np.zeros((nt,),np.float)
Salt_f = np.zeros((nt,),np.float)
Temp_f = np.zeros((nt,),np.float)
# Volume integrals
V = np.zeros((nt,),np.float)
s_V = np.zeros((nt,),np.float)
T_V = np.zeros((nt,),np.float)
# Surface fluxes (T and S only)
s_surf = np.zeros((nt,),np.float)
T_surf = np.zeros((nt,),np.float)
###
# Start stepping through and read all variable time step by time step
###
for ii,tt in enumerate(tstep):
sun.tstep=[tt]
print('Calculating budget for time = %d of %d'%(tt,tstep[-1]))
# Load the depth-average and flux quantities
s_dz = sun.loadDataRaw(variable='s_dz')
T_dz = sun.loadDataRaw(variable='T_dz')
eta = sun.loadDataRaw(variable='eta')
Sflux = sun.loadDataRaw(variable='s_F')
Tflux = sun.loadDataRaw(variable='T_F')
Mflux = sun.loadDataRaw(variable='U_F') #[m3 s-1] * [s] = [m3]
# Subset the variables at cell index only
eta = eta[cellindex]
s_dz = s_dz[cellindex]
T_dz = T_dz[cellindex]
# Calculate the fluxes for each cell [Nk, Nc, maxfaces]
Mflux_cell = Mflux[...,face] * normal * facemask
Sflux_cell = Sflux[...,face] * normal * facemask
Tflux_cell = Tflux[...,face] * normal * facemask
# Compute the total mass/tracer flux in/out of each cell
# sum along all dimension edges
Mass = Mflux_cell.sum(axis=-1)
Salt = Sflux_cell.sum(axis=-1)
Temp = Tflux_cell.sum(axis=-1)
# Sum along all depth
Mass = Mass.sum(axis=0)
Salt = Salt.sum(axis=0)
Temp = Temp.sum(axis=0)
# Sum all cells
Mass_f[ii] = Mass.sum()
Salt_f[ii] = Salt.sum()
Temp_f[ii] = Temp.sum()
# Calculate the volume integrals
s_V[ii] = np.sum(s_dz*area,axis=-1).squeeze() # m3 S
T_V[ii] = np.sum(T_dz*area,axis=-1).squeeze() # m3 C
V[ii] = np.sum(eta*area,axis=-1).squeeze() # m3 [volume]
# Get the surface temp and salinity flux arrays
if sun.hasVar('Hs'):
# Load the surface flux quantities
Hs = sun.loadDataRaw(variable='Hs')
Hsw = sun.loadDataRaw(variable='Hsw')
Hl = sun.loadDataRaw(variable='Hl')
Hlw = sun.loadDataRaw(variable='Hlw')
# Convert heat flux [W m-2] -> temperature flux
Qs = (Hs+Hl+Hlw+Hsw)/(RHO0*Cp) # units [C m s-1]
# Surface flux contribution
T_surf[ii] = np.sum(Qs[...,cellindex]*area) # units [C m3 s-1]
else:
T_surf[ii] = 0
if sun.hasVar('EP'):
EPs0 = sun.loadDataRaw(variable='EP')
s_surf[ii] = np.sum(EPs0[...,cellindex]*area) # units [psu m3 s-1]
else:
s_surf[ii] = 0
##########
# units are:
##########
# s_V [ppt m3]
# T_V [C m3]
# eta [m3]
# Mass_f [m3 s-1]
# Salt_f [ppt m3 s-1]
# Temp_f [C m3 s-1]
###
# Compute each of the terms in the budget
# Tendency
Tend_V = (V[:-1]-V[1:]).squeeze()/dt # m3 s-1
Tend_s = (s_V[:-1]-s_V[1:]).squeeze()/dt # psu m3 s-1
Tend_T = (T_V[:-1]-T_V[1:]).squeeze()/dt # C m3 s-1
# Advective fluxes
Adv_V = Mass_f[1:]# m3 s-1
Adv_s = Salt_f[1:]# psu s-1
Adv_T = Temp_f[1:]# C s-1
# Surface fluxes (note change of sign)
Sflux_T = -T_surf[1:]# C m3 s-1
Sflux_s = s_surf[1:]# psu m3 s-1
# Compute the error (residual) in each budget
Err_V =(Tend_V - Adv_V)
Err_T = (Tend_T - Adv_T - Sflux_T)
Err_s = (Tend_s - Adv_s - Sflux_s)
# Output time
time = time[1:]
# Save the output as a dictionary
budget = {'time':time,\
'cellindex':cellindex,\
'Volume':{'Advection':Adv_V,'Tendency':Tend_V,'Residual':Err_V},\
'Temp':{'Advection':Adv_T,'Tendency':Tend_T,'Surface_Flux':Sflux_T,'Residual':Err_T},\
'Salt':{'Advection':Adv_s,'Tendency':Tend_s,'Surface_Flux':Sflux_s,'Residual':Err_s},\
}
if plot:
# Free-surface
fig1=plt.figure()
f1ax1=fig1.add_subplot(2,1,1)
plt.title('Volume budget')
plt.plot(time,Tend_V,'b',linewidth=2)
plt.plot(time,Adv_V,'r')
plt.ylabel('$m^3 \ s^{-1}$')
plt.ylim(Tend_V.min(),Tend_V.max())
plt.legend(('Tendency','Advection'))
ax2=fig1.add_subplot(2,1,2,sharex=f1ax1)
plt.plot(time,Err_V)
plt.ylabel('error')
fig2=plt.figure()
f2ax1=fig2.add_subplot(2,1,1)
plt.title('Temperature budget')
plt.plot(time,Tend_T,'b',linewidth=2)
plt.plot(time,Adv_T,'r')
plt.plot(time,Adv_T + Sflux_T,'k')
plt.grid(b=True)
plt.ylabel(r'$^\circ C \ m^3 \ s^{-1}$')
plt.legend(('Tendency','Advection','Adv. + Sflux'))
f2ax1=fig2.add_subplot(2,1,2,sharex=f2ax1)
plt.title('Temperature budget')
plt.plot(time,Err_T)
plt.ylabel('error')
fig3=plt.figure()
f3ax1=fig3.add_subplot(2,1,1)
plt.title('Salt budget')
plt.plot(time,Tend_s,'b',linewidth=2)
plt.plot(time,Adv_s,'r')
plt.plot(time,Adv_s + Sflux_s,'k')
plt.grid(b=True)
plt.ylabel('$psu \ m^3 \ s^{-1}$')
plt.legend(('Tendency','Advection','Adv. + Sflux'))
f2ax1=fig3.add_subplot(2,1,2,sharex=f3ax1)
plt.plot(time,Err_s)
plt.ylabel('error')
plt.figure()
sun.plotmesh()
plt.plot(sun.xv[cellindex],sun.yv[cellindex],'m.')
plt.show()
return budget
#
def calc_isopycnal_discharge(ncfile,xpt,ypt,saltbins,tstart,tend,scalarvar='salt'):
"""
Calculates the discharge as a function of salinity along
a transect, defined by xpt/ypt, in the suntans model
Returns a dictionary with the relevant variables
"""
nbins = saltbins.shape[0]
# Load the slice object and extract the data
SE = MultiSliceEdge(ncfile,xpt=xpt,ypt=ypt)
# if SE==None:
# SE = SliceEdge(ncfile,xpt=xpt,ypt=ypt)
# SE.tstep = range(SE.Nt)
# else:
# SE.update_xy(xpt,ypt)
#
SE.tstep = SE.getTstep(tstart,tend)
print('Loading the salt flux data...')
#s_F_all= SE.loadData(variable='s_F')
s_F_all= SE.loadData(variable=scalarvar)
print('Loading the flux data...')
Q_all = SE.loadData(variable='U_F')
def Q_S_flux(salt,Q,saltbins,normal):
# mask sure masked values are zeroed
#s_F[s_F.mask]=0
#Q[Q.mask]=0
Q = Q*normal
Nt,Nk,Ne = Q.shape
#salt = np.abs(s_F)/np.abs(Q)
#salt[np.isnan(salt)]=0
Ns = saltbins.shape[0]
ds = np.diff(saltbins).mean()
###
# Calculate Q(s,x)
###
# Create an arrayo
#Nt = len(SE.tstep)
#ne = len(SE.j) # number of edges
jindex = np.arange(0,Ne)
jindex = np.repeat(jindex[np.newaxis,np.newaxis,:],Nt,axis=0)
jindex = np.repeat(jindex,SE.Nkmax,axis=1)
# Groups the salt matrix into bins
sindex = np.searchsorted(saltbins,salt)
sindex[sindex>=Ns]=Ns-1
#tindex = np.arange(0,Nt)
#tindex = np.repeat(tindex[:,np.newaxis,np.newaxis],ne,axis=-1)
#tindex = np.repeat(tindex,SE.Nkmax,axis=1)
# Calculate the salt flux for each time step
Qs =
|
np.zeros((Nt,Ns,Ne))
|
numpy.zeros
|
import logging
import numpy
from pyscf import lib
from cqcpy import ft_utils
from cqcpy.ov_blocks import one_e_blocks
from cqcpy.ov_blocks import two_e_blocks
from cqcpy.ov_blocks import two_e_blocks_full
from .system import System
from .ueg_utils import UEGBasis
einsum = lib.einsum
# einsum = einsum
class UEGSystem(System):
"""The uniform electron gas in a plane-wave basis set.
Attributes:
T (float): Temperature.
L (float): Box-length.
basis: UEG plane-wave basis set.
mu (float): Chemical potential.
Na (float): Number of alpha electrons.
Nb (float): Number of beta electrons.
N (float): Total number of electrons.
den (float): Number density.
rs (float): Wigner-Seitz radius.
Ef (float): Fermi-energy (of non-interacting system).
Tf (float): Redued temperature.
"""
def __init__(self, T, L, Emax, mu=None, na=None, nb=None,
norb=None, orbtype='u', madelung=None):
self.T = T
self.L = L
self.basis = UEGBasis(L, Emax, norb=norb)
if na is None:
assert(nb is None)
assert(mu is not None)
self.mu = mu
beta = 1.0 / self.T if self.T > 0.0 else 1.0e20
en = self.g_energies_tot()
fo = ft_utils.ff(beta, en, self.mu)
N = fo.sum()
self.Na = N/2.0
self.Nb = self.Na
else:
self.Na = na
self.Nb = nb
assert(na > 0)
assert(nb > 0)
mua = self.basis.Es[self.Na - 1] + 0.00001
mub = self.basis.Es[self.Nb - 1] + 0.00001
assert(mua == mub)
self.mu = mua
assert(self.T == 0.0)
self.N = self.Na + self.Nb
self.den = self.N/(L*L*L)
self.rs = (3/(4.0*numpy.pi*self.den))**(1.0/3.0)
pi2 = numpy.pi*numpy.pi
self.Ef = 0.5*(3.0*pi2*self.den)**(2.0/3.0)
self.Tf = self.T / self.Ef
self.orbtype = orbtype
self.madelung = madelung
self._mconst = 2.837297479 / (2*self.L)
def has_g(self):
return True
def has_u(self):
return (False if self.orbtype == 'g' else True)
def has_r(self):
return (True if self.orbtype == 'r' else False)
def verify(self, T, mu):
if T > 0.0:
s = T == self.T and mu == self.mu
else:
s = T == self.T
if not s:
return False
else:
return True
def const_energy(self):
if self.madelung == 'const':
return -(self.Na + self.Nb)*self._mconst
else:
return 0.0
def get_mp1(self):
if self.has_r():
if self.T > 0:
V = self.r_int_tot()
beta = 1.0 / self.T
en = self.r_energies_tot()
fo = ft_utils.ff(beta, en, self.mu)
E1 = einsum('ijij,i,j->', V - V.transpose((0, 1, 3, 2)), fo, fo)
E1 += einsum('ijij,i,j->', V, fo, fo)
return E1
else:
Va, Vb, Vabab = self.u_aint()
E1 = 0.5*numpy.einsum('ijij->', Va.oooo)
E1 += 0.5*numpy.einsum('ijij->', Vb.oooo)
E1 += numpy.einsum('ijij->', Vabab.oooo)
return E1
elif self.has_u():
if self.T > 0:
Va, Vb, Vabab = self.u_aint_tot()
beta = 1.0 / self.T
ea, eb = self.u_energies_tot()
foa = ft_utils.ff(beta, ea, self.mu)
fob = ft_utils.ff(beta, eb, self.mu)
E1 = 0.5*einsum('ijij,i,j->', Va, foa, foa)
E1 += 0.5*einsum('ijij,i,j->', Vb, fob, fob)
E1 += einsum('ijij,i,j->', Vabab, foa, fob)
return E1
else:
Va, Vb, Vabab = self.u_aint()
E1 = 0.5*numpy.einsum('ijij->', Va.oooo)
E1 += 0.5*numpy.einsum('ijij->', Vb.oooo)
E1 += numpy.einsum('ijij->', Vabab.oooo)
return E1
else:
if self.T > 0:
V = self.g_aint_tot()
beta = 1.0 / self.T
en = self.g_energies_tot()
fo = ft_utils.ff(beta, en, self.mu)
return 0.5*einsum('ijij,i,j->', V, fo, fo)
else:
V = self.g_aint()
return 0.5*einsum('ijij->', V.oooo)
def r_mp1_den(self):
if self.T > 0:
V = self.r_int_tot()
beta = 1.0 / self.T
en = self.r_energies_tot()
fo = ft_utils.ff(beta, en, self.mu)
fv = ft_utils.ffv(beta, en, self.mu)
vec = fo*fv
Den = -beta*einsum(
'ijij,i,j->i', 2.0*V - V.transpose((0, 1, 3, 2)), vec, fo)
return Den
else:
logging.warning("Derivative of MP1 energy is zero at OK")
return numpy.zeros(en.shape)
def u_d_mp1(self, dveca, dvecb):
if self.T > 0:
Va, Vb, Vabab = self.u_aint_tot()
beta = 1.0 / self.T
ea, eb = self.u_energies_tot()
foa = ft_utils.ff(beta, ea, self.mu)
fva = ft_utils.ffv(beta, ea, self.mu)
veca = dveca*foa*fva
fob = ft_utils.ff(beta, eb, self.mu)
fvb = ft_utils.ffv(beta, eb, self.mu)
vecb = dvecb*fob*fvb
D = -einsum('ijij,i,j->', Va, veca, foa)
D -= einsum('ijij,i,j->', Vb, vecb, fob)
D -= einsum('ijij,i,j->', Vabab, veca, fob)
D -= einsum('ijij,i,j->', Vabab, foa, vecb)
return D
else:
logging.warning("Derivative of MP1 energy is zero at OK")
return 0.0
def u_mp1_den(self):
if self.T > 0:
Va, Vb, Vabab = self.u_aint_tot()
beta = 1.0 / self.T
ea, eb = self.u_energies_tot()
foa = ft_utils.ff(beta, ea, self.mu)
fva = ft_utils.ffv(beta, ea, self.mu)
veca = foa*fva
fob = ft_utils.ff(beta, eb, self.mu)
fvb = ft_utils.ffv(beta, eb, self.mu)
vecb = fob*fvb
Da = -beta*einsum('ijij,i,j->i', Va, veca, foa)
Db = -beta*einsum('ijij,i,j->i', Vb, vecb, fob)
Da -= beta*einsum('ijij,i,j->i', Vabab, veca, fob)
Db -= beta*einsum('ijij,i,j->j', Vabab, foa, vecb)
return Da, Db
else:
logging.warning("Derivative of MP1 energy is zero at OK")
return numpy.zeros(ea.shape), numpy.zeros(eb.shape)
def g_d_mp1(self, dvec):
if self.T > 0:
V = self.g_aint_tot()
beta = 1.0 / self.T
en = self.g_energies_tot()
fo = ft_utils.ff(beta, en, self.mu)
fv = ft_utils.ffv(beta, en, self.mu)
vec = dvec*fo*fv
return -einsum('ijij,i,j->', V, vec, fo)
else:
logging.warning("Derivative of MP1 energy is zero at OK")
return 0.0
def g_mp1_den(self):
if self.T > 0:
V = self.g_aint_tot()
beta = 1.0 / self.T
en = self.g_energies_tot()
fo = ft_utils.ff(beta, en, self.mu)
fv = ft_utils.ffv(beta, en, self.mu)
vec = fo*fv
return -beta*einsum('ijij,i,j->i', V, vec, fo)
else:
logging.warning("Derivative of MP1 energy is zero at OK")
return numpy.zeros(self.g_energies_tot.shape)
def r_energies(self):
if self.T > 0.0:
raise Exception("Undefined ov blocks at FT")
if self.Na != self.Nb:
raise Exception("UEG system is not restricted")
d = self.basis.Es
na = int(self.Na)
eo = d[:na]
ev = d[na:]
if self.madelung == "orb":
eo -= self._mconst
return (eo, ev)
def u_energies(self):
d = self.basis.Es
na = int(self.Na)
nb = int(self.Nb)
eoa = numpy.asarray(d[:na])
eva = numpy.asarray(d[na:])
eob = numpy.asarray(d[:nb])
evb = numpy.asarray(d[nb:])
if self.madelung == "orb":
eoa -= self._mconst
eob -= self._mconst
return (eoa, eva, eob, evb)
def g_energies(self):
if self.T > 0.0:
raise Exception("Undefined ov blocks at FT")
d = self.g_energies_tot()
nbsf = self.basis.get_nbsf()
na = int(self.Na)
nb = int(self.Nb)
eoa = d[:na]
eva = d[na:nbsf]
eob = d[nbsf:nbsf+nb]
evb = d[-(nbsf-nb):]
eo = numpy.hstack((eoa, eob))
ev = numpy.hstack((eva, evb))
if self.madelung == "orb":
eo -= self._mconst
return (eo, ev)
def r_energies_tot(self):
return numpy.asarray(self.basis.Es)
def u_energies_tot(self):
return self.basis.u_build_diag()
def g_energies_tot(self):
return self.basis.g_build_diag()
def r_fock(self):
if self.T > 0.0:
raise Exception("Undefined ov blocks at FT")
F = self.r_hcore()
d = self.r_energies_tot()
n = d.shape[0]
occ = []
vir = []
for p in range(n):
if d[p] < self.mu:
occ.append(p)
if d[p] > self.mu:
vir.append(p)
oidx = numpy.r_[occ]
vidx = numpy.r_[vir]
V = self.r_int_tot()
Vd = V[numpy.ix_(numpy.arange(n), oidx, numpy.arange(n), oidx)]
Vx = V[numpy.ix_(numpy.arange(n), oidx, oidx, numpy.arange(n))]
F = F + 2*einsum('piri->pr', Vd) - einsum('piir->pr', Vx)
Foo = F[numpy.ix_(oidx, oidx)]
Fvv = F[numpy.ix_(vidx, vidx)]
Fov = F[numpy.ix_(oidx, vidx)]
Fvo = F[numpy.ix_(vidx, oidx)]
return one_e_blocks(Foo, Fov, Fvo, Fvv)
def u_fock(self):
if self.T > 0.0:
raise Exception("Undefined ov blocks at FT")
F = self.r_hcore()
d = self.r_energies_tot()
n = d.shape[0]
occ = []
vir = []
for p in range(n):
if d[p] < self.mu:
occ.append(p)
if d[p] > self.mu:
vir.append(p)
oidx = numpy.r_[occ]
vidx = numpy.r_[vir]
V = self.r_int_tot()
Vd = V[numpy.ix_(numpy.arange(n), oidx, numpy.arange(n), oidx)]
Vx = V[numpy.ix_(numpy.arange(n), oidx, oidx,
|
numpy.arange(n)
|
numpy.arange
|
import smooth
import numpy as np
from datetime import datetime
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import scipy.optimize as opt
df=pd.read_excel("Texas COVID-19 Case Count Data by County.xlsx",header=2)
df=df.set_index("County Name")
cases=np.array(df.loc["Dallas"].values[1:],dtype=np.float64)
my=np.amax(np.diff(cases))
dates=[pd.to_datetime(df.loc["Dallas"].index[i].split("\n")[1]+"-2020") for i in range(1,len(df.loc["Dallas"]))]
dates=list(map(datetime.date,dates))
cases=df.loc["Dallas"].values[1:]
dates=dates[1:]
shelter_in_place=np.where(np.array(dates)==datetime(2020,3,23).date())[0][0]
plt.plot(np.diff(cases),linewidth=3.0)
plt.scatter(shelter_in_place,np.diff(cases)[shelter_in_place],color="orange",s=64)
plt.legend(["Cases per day","Shelter-in-place order"])
plt.title("Cases per day")
plt.xlabel("Days since first case")
plt.ylabel("Cases")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,my))
plt.savefig("dallas_raw.svg")
plt.close()
ys=smooth.epi_smooth_dx(np.diff(cases))
plt.plot(ys,linewidth=3)
plt.scatter(shelter_in_place,ys[shelter_in_place],color="orange",s=64)
plt.legend(["Cases per day","Shelter-in-place order"])
plt.title("Cases per day")
plt.xlabel("Days since first case")
plt.ylabel("Cases")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,my))
plt.savefig("dallas_smoothed.svg")
plt.close()
ys=smooth.moving_average(
|
np.diff(cases)
|
numpy.diff
|
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from __future__ import print_function, division
import os
import numpy as np
def load_object_points(point_path):
assert os.path.exists(point_path), "Path does not exist: {}".format(point_path)
points = np.loadtxt(point_path)
return points
def load_points_from_obj(obj_path):
from glumpy import data
assert os.path.exists(obj_path), "Path does not exist: {}".format(obj_path)
vertices, indices = data.objload("{}".format(obj_path), rescale=True)
vertices["position"] = vertices["position"] / 10.0
points =
|
np.array(vertices["position"])
|
numpy.array
|
"""Tests for the numerical lifting line grid."""
# pylint: disable=redefined-outer-name
import pytest
import numpy as np
import machup.geometry as geom
from machup import LLGrid
PLANE_DIR = "test/geometry/testairplanes/"
@pytest.fixture
def single_wing_grid():
"""Get a LLGrid for the single_wing.json example."""
filename = PLANE_DIR+"single_wing.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def small_wing_grid():
"""Get a LLGrid for the straight_wing_5sect.json example."""
filename = PLANE_DIR+"straight_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def linear_wing_grid():
"""Get LLGrid w/ linear spacing using straight_wing_5sect.json example."""
filename = PLANE_DIR+"straight_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane, cosine_spacing=False)
return grid
@pytest.fixture
def vertical_wing_grid():
"""Get a LLGrid for the vertical_wing_5sect.json example."""
filename = PLANE_DIR+"vertical_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def swept_wing_grid():
"""Get a LLGrid from the swept_wing_5sect.json example."""
filename = PLANE_DIR+"swept_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def tapered_wing_grid():
"""Get a LLGrid from the tapered_wing_5sect.json example."""
filename = PLANE_DIR+"tapered_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def dihedral_sweep_wing_grid():
"""Get a LLGrid from the dihedral_sweep_wing.json example."""
filename = PLANE_DIR+"dihedral_sweep_wing.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def aero_twist_wing_grid():
"""Get a LLGrid from the aero twist example."""
filename = PLANE_DIR+"aerodynamic_twist_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
@pytest.fixture
def yoffset_wing_grid():
"""Get a LLGrid from the yoffset wing example."""
filename = PLANE_DIR+"yoffset_wing_5sect.json"
plane = geom.Airplane(inputfile=filename)
grid = LLGrid(plane)
return grid
def test_integral_of_chord_squared(dihedral_sweep_wing_grid):
controls = {
"aileron": 10.,
"elevator": 0.,
"rudder": 0.
}
int_chord2 = dihedral_sweep_wing_grid.get_integral_chord2()
test = np.array([3.819660112501050975e-01,
1.000000000000000000e+00,
1.236067977499789805e+00,
1.000000000000000000e+00,
3.819660112501050975e-01,
3.819660112501050975e-01,
1.000000000000000000e+00,
1.236067977499789805e+00,
1.000000000000000000e+00,
3.819660112501050975e-01])
assert np.allclose(int_chord2, test, rtol=0., atol=1e-12) is True
def test_get_grid_position(single_wing_grid):
# get vortex positions from grid
r_pos = single_wing_grid.get_control_point_pos()
r_1_pos, r_2_pos = single_wing_grid.get_corner_point_pos()
# set up what control point positins should be for the single wing case
num_sections = 40
span = 8.
index = np.arange(num_sections+1)
s_cp = np.zeros((num_sections*2, 3))
s_1 = np.zeros((num_sections*2, 3))
s_2 = np.zeros((num_sections*2, 3))
r_cp = (span/4.)*(1. - np.cos((np.pi/num_sections)*(index-0.5)))[1:]
r_1 = (span/4.)*(1. - np.cos(np.pi*index/num_sections))[:-1]
r_2 = (span/4.)*(1. - np.cos(np.pi*index/num_sections))[1:]
s_cp[:, 1] = np.concatenate((-np.flip(r_cp, axis=0), r_cp))
s_1[:, 1] = np.concatenate((-np.flip(r_2, axis=0), r_1))
s_2[:, 1] = np.concatenate((-np.flip(r_1, axis=0), r_2))
assert len(s_cp) == len(r_pos)
assert len(s_1) == len(r_1_pos)
assert len(s_2) == len(r_2_pos)
assert np.allclose(r_pos, s_cp, rtol=0., atol=1e-15) is True
assert np.allclose(r_1_pos, s_1, rtol=0., atol=1e-15) is True
assert np.allclose(r_2_pos, s_2, rtol=0., atol=1e-15) is True
def test_get_grid_position_swept(swept_wing_grid):
# get vortex positions from grid
r_pos = swept_wing_grid.get_control_point_pos()
r_1_pos, r_2_pos = swept_wing_grid.get_corner_point_pos()
# set up what vortex positions should be for the single wing case
num_sections = 5
length = 5./np.cos(np.pi/4.)
index = np.arange(num_sections+1)
comp_cp = (length/2.)*(1. - np.cos((np.pi/num_sections)*(index-0.5)))[1:]
comp_1 = (length/2.)*(1. - np.cos(np.pi*index/num_sections))[:-1]
comp_2 = (length/2.)*(1. - np.cos(np.pi*index/num_sections))[1:]
comp_cp *= np.sqrt(2.)/2.
comp_1 *= np.sqrt(2.)/2.
comp_2 *= np.sqrt(2.)/2.
# comp = np.array([0.12235870926211616, 1.0305368692688170, 2.5000000000000000,
# 3.9694631307311816, 4.8776412907378832])
r_cp = np.zeros((10, 3))
r_1 = np.zeros((10, 3))
r_2 = np.zeros((10, 3))
r_cp[:, 0] = np.concatenate((-1.*np.flipud(comp_cp), -1.*comp_cp))
r_cp[:, 1] = np.concatenate((-np.flipud(comp_cp), comp_cp))
r_1[:, 0] = np.concatenate((-1.*np.flipud(comp_2), -1.*comp_1))
r_1[:, 1] = np.concatenate((-np.flipud(comp_2), comp_1))
r_2[:, 0] = np.concatenate((-1.*np.flipud(comp_1), -1.*comp_2))
r_2[:, 1] = np.concatenate((-np.flipud(comp_1), comp_2))
assert len(r_cp) == len(r_pos)
assert len(r_1) == len(r_1_pos)
assert len(r_2) == len(r_2_pos)
assert np.allclose(r_pos, r_cp, rtol=0., atol=1e-15) is True
assert np.allclose(r_1_pos, r_1, rtol=0., atol=1e-15) is True
assert np.allclose(r_2_pos, r_2, rtol=0., atol=1e-15) is True
def test_get_grid_position_linear(linear_wing_grid):
# get vortex positions from grid
r_pos = linear_wing_grid.get_control_point_pos()
r_1_pos, r_2_pos = linear_wing_grid.get_corner_point_pos()
r_cp = np.zeros((10, 3))
r_1 =
|
np.zeros((10, 3))
|
numpy.zeros
|
import math
import random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import matplotlib.pyplot as plt
from collections import deque
# Use CUDA
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
# Define the Experience Replay Buffer
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward,
|
np.concatenate(next_state)
|
numpy.concatenate
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
from mxnet import gluon
from mxnet import autograd
from mxnet.gluon import nn
import numpy as np
import cv2
class ReluOp(mx.operator.CustomOp):
"""Modified ReLU as described in section 3.4 in https://arxiv.org/abs/1412.6806.
This is used for guided backpropagation to get gradients of the image w.r.t activations.
This Operator will do a regular backpropagation if `guided_backprop` is set to False
and a guided packpropagation if `guided_backprop` is set to True. Check gradcam_demo.py
for an example usage."""
guided_backprop = False
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
y = nd.maximum(x, nd.zeros_like(x))
self.assign(out_data[0], req[0], y)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
if ReluOp.guided_backprop:
# Get output and gradients of output
y = out_data[0]
dy = out_grad[0]
# Zero out the negatives in the gradients of the output
dy_positives = nd.maximum(dy, nd.zeros_like(dy))
# What output values were greater than 0?
y_ones = y.__gt__(0)
# Mask out the values for which at least one of dy or y is negative
dx = dy_positives * y_ones
self.assign(in_grad[0], req[0], dx)
else:
# Regular backward for ReLU
x = in_data[0]
x_gt_zero = x.__gt__(0)
dx = out_grad[0] * x_gt_zero
self.assign(in_grad[0], req[0], dx)
def set_guided_backprop(mode=True):
ReluOp.guided_backprop = mode
@mx.operator.register("relu")
class ReluProp(mx.operator.CustomOpProp):
def __init__(self):
super(ReluProp, self).__init__(True)
def infer_shape(self, in_shapes):
data_shape = in_shapes[0]
output_shape = data_shape
return (data_shape,), (output_shape,), ()
def create_operator(self, ctx, in_shapes, in_dtypes):
return ReluOp()
class Activation(mx.gluon.HybridBlock):
@staticmethod
def set_guided_backprop(mode=False):
ReluOp.guided_backprop = mode
def __init__(self, act_type, **kwargs):
assert act_type == 'relu'
super(Activation, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.Custom(x, op_type='relu')
class Conv2D(mx.gluon.HybridBlock):
"""Wrapper on top of gluon.nn.Conv2D to capture the output and gradients of output of a Conv2D
layer in a network. Use `set_capture_layer_name` to select the layer
whose outputs and gradients of outputs need to be captured. After the backward pass,
`conv_output` will contain the output and `conv_output.grad` will contain the
output's gradients. Check gradcam_demo.py for example usage."""
conv_output = None
capture_layer_name = None
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
super(Conv2D, self).__init__(**kwargs)
self.conv = nn.Conv2D(channels, kernel_size, strides=strides, padding=padding,
dilation=dilation, groups=groups, layout=layout,
activation=activation, use_bias=use_bias, weight_initializer=weight_initializer,
bias_initializer=bias_initializer, in_channels=in_channels)
def hybrid_forward(self, F, x):
out = self.conv(x)
name = self._prefix[:-1]
if name == Conv2D.capture_layer_name:
out.attach_grad()
Conv2D.conv_output = out
return out
def set_capture_layer_name(name):
Conv2D.capture_layer_name = name
def _get_grad(net, image, class_id=None, conv_layer_name=None, image_grad=False):
"""This is an internal helper function that can be used for either of these
but not both at the same time:
1. Record the output and gradient of output of an intermediate convolutional layer.
2. Record the gradients of the image.
Parameters
----------
image : NDArray
Image to visuaize. This is an NDArray with the preprocessed image.
class_id : int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured.
image_grad: bool
Whether to capture gradients of the image."""
if image_grad:
image.attach_grad()
Conv2D.capture_layer_name = None
Activation.set_guided_backprop(True)
else:
# Tell convviz.Conv2D which layer's output and gradient needs to be recorded
Conv2D.capture_layer_name = conv_layer_name
Activation.set_guided_backprop(False)
# Run the network
with autograd.record(train_mode=False):
out = net(image)
# If user didn't provide a class id, we'll use the class that the network predicted
if class_id == None:
model_output = out.asnumpy()
class_id = np.argmax(model_output)
# Create a one-hot target with class_id and backprop with the created target
one_hot_target = mx.nd.one_hot(mx.nd.array([class_id]), 1000)
out.backward(one_hot_target, train_mode=False)
if image_grad:
return image.grad[0].asnumpy()
else:
# Return the recorded convolution output and gradient
conv_out = Conv2D.conv_output
return conv_out[0].asnumpy(), conv_out.grad[0].asnumpy()
def get_conv_out_grad(net, image, class_id=None, conv_layer_name=None):
"""Get the output and gradients of output of a convolutional layer.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured."""
return _get_grad(net, image, class_id, conv_layer_name, image_grad=False)
def get_image_grad(net, image, class_id=None):
"""Get the gradients of the image.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used."""
return _get_grad(net, image, class_id, image_grad=True)
def grad_to_image(gradient):
"""Convert gradients of image obtained using `get_image_grad`
into image. This shows parts of the image that is most strongly activating
the output neurons."""
gradient = gradient - gradient.min()
gradient /= gradient.max()
gradient = np.uint8(gradient * 255).transpose(1, 2, 0)
gradient = gradient[..., ::-1]
return gradient
def get_cam(imggrad, conv_out):
"""Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details"""
weights = np.mean(imggrad, axis=(1, 2))
cam = np.ones(conv_out.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * conv_out[i, :, :]
cam = cv2.resize(cam, (imggrad.shape[1], imggrad.shape[2]))
cam = np.maximum(cam, 0)
cam = (cam -
|
np.min(cam)
|
numpy.min
|
from __future__ import annotations
from typing import Tuple, Optional
import numpy as np
import matplotlib
from .ellipsoid import Ellipsoid
import time
class EllipsoidObj():
def __init__(self,
ellipsoid: Optional[Ellipsoid] = None,
q: np.ndarray = np.array([]),
Q: np.ndarray =
|
np.array([[], []])
|
numpy.array
|
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import time
import multiprocessing as mp
import ctypes as c
import numpy as np
from autograd import grad
EPS = 1e-9
def get_value(G, U, I, F, key):
tensor_value1 = np.einsum('abc,a->bc', G, U[key[0]])
tensor_value2 = np.einsum('bc,b->c', tensor_value1, I[key[1]])
return np.einsum('c,c-> ', tensor_value2, F[key[2]])
def sign(a, b):
return 1 if a > b else -1
def grad_worker_mse(user_item_aspect, user_aspect_opinion, item_aspect_opinion,
G1, G2, G3, U, I, A, O,
error_square, error_bpr, lock, q_samples_mse,
del_g1, del_g2, del_g3, del_u, del_i, del_a, del_o, num_grad):
while 1:
if not q_samples_mse.empty():
sample = q_samples_mse.get()
if not sample:
break
uia_samples, uao_samples, iao_samples = sample
for [user_idx, item_idx, aspect_idx] in uia_samples:
key = (user_idx, item_idx, aspect_idx)
pred_rating = get_value(G1, U, I, A, key)
score = user_item_aspect[key]
del_sqerror = 2 * (pred_rating - score)
lock.acquire()
error_square.value += (pred_rating - score) ** 2
del_g1 += del_sqerror * \
np.einsum('ab,c->abc',
np.einsum('a,b->ab', U[user_idx], I[item_idx]),
A[aspect_idx])
del_u[user_idx] += del_sqerror * \
np.einsum('ac,c->a',
np.einsum('abc,b->ac', G1, I[item_idx]),
A[aspect_idx])
del_i[item_idx] += del_sqerror * \
np.einsum('bc,c->b',
np.einsum('abc,a->bc', G1, U[user_idx]),
A[aspect_idx])
del_a[aspect_idx] += del_sqerror * \
np.einsum('bc,b->c',
np.einsum('abc,a->bc', G1, U[user_idx]),
I[item_idx])
lock.release()
for [user_idx, aspect_idx, opinion_idx] in uao_samples:
key = (user_idx, aspect_idx, opinion_idx)
pred_rating = get_value(G2, U, A, O, key)
score = user_aspect_opinion[key]
del_sqerror = 2 * (pred_rating - score)
lock.acquire()
error_square.value += (pred_rating - score) ** 2
del_g2 += del_sqerror * \
np.einsum('ab,c->abc',
np.einsum('a,b->ab', U[user_idx], A[aspect_idx]),
O[opinion_idx])
del_u[user_idx] += del_sqerror * \
np.einsum('ac,c->a',
np.einsum('abc,b->ac', G2, A[aspect_idx]),
O[opinion_idx])
del_a[aspect_idx] += del_sqerror * \
np.einsum('bc,c->b',
np.einsum('abc,a->bc', G2, U[user_idx]),
O[opinion_idx])
del_o[opinion_idx] += del_sqerror * \
np.einsum('bc,b->c',
np.einsum('abc,a->bc', G2, U[user_idx]),
A[aspect_idx])
lock.release()
for [item_idx, aspect_idx, opinion_idx] in iao_samples:
key = (item_idx, aspect_idx, opinion_idx)
pred_rating = get_value(G3, I, A, O, key)
score = item_aspect_opinion[key]
del_sqerror = 2 * (pred_rating - score)
lock.acquire()
error_square.value += (pred_rating - score) ** 2
del_g3 += del_sqerror * \
np.einsum('ab,c->abc',
np.einsum('a,b->ab', I[item_idx], A[aspect_idx]),
O[opinion_idx])
del_i[item_idx] += del_sqerror * \
np.einsum('ac,c->a',
np.einsum('abc,b->ac', G3, A[aspect_idx]),
O[opinion_idx])
del_a[aspect_idx] += del_sqerror * \
np.einsum('bc,c->b',
np.einsum('abc,a->bc', G3, I[item_idx]),
O[opinion_idx])
del_o[opinion_idx] += del_sqerror * \
np.einsum('bc,b->c',
np.einsum('abc,a->bc', G3, I[item_idx]),
A[aspect_idx])
lock.release()
lock.acquire()
num_grad.value += 1
lock.release()
def grad_worker_bpr(rating_matrix, lambda_bpr,
G1, U, I, A, error_square, error_bpr, lock, q_samples_bpr,
del_g1, del_u, del_i, del_a, num_grad):
while 1:
if not q_samples_bpr.empty():
sample = q_samples_bpr.get()
if not sample:
break
bpr_sample_ele, item2_sample = sample
for i, [user_idx, item_idx1] in enumerate(bpr_sample_ele):
item_idx2 = item2_sample[i]
user_item_vector = rating_matrix[user_idx, :].A.flatten()
if user_item_vector[item_idx1] != user_item_vector[item_idx2]:
pred_x_ij = ((get_value(G1, U, I, A, (user_idx, item_idx1, -1)) - get_value(G1, U, I, A, (user_idx, item_idx2, -1)))
* sign(user_item_vector[item_idx1], user_item_vector[item_idx2]))
del_bpr = (lambda_bpr
* (np.exp(-pred_x_ij) / (1 + np.exp(-pred_x_ij)))
* sign(user_item_vector[item_idx1], user_item_vector[item_idx2]))
lock.acquire()
error_bpr.value += np.log(1 / (1 + np.exp(-pred_x_ij)))
item_diff = I[item_idx1] - I[item_idx2]
del_g1 -= del_bpr * \
np.einsum('ab,c->abc',
np.einsum('a,b->ab', U[user_idx], item_diff),
A[-1])
del_u[user_idx] -= del_bpr * \
np.einsum('ac,c->a',
np.einsum('abc,b->ac', G1, item_diff),
A[-1])
del_i[item_idx1] -= del_bpr * \
np.einsum('bc,c->b',
np.einsum('abc,a->bc', G1, U[user_idx]),
A[-1])
del_i[item_idx2] += del_bpr * \
np.einsum('bc,c->b',
np.einsum('abc,a->bc', G1, U[user_idx]),
A[-1])
del_a[-1] -= del_bpr * \
np.einsum('bc,b->c',
np.einsum('abc,a->bc', G1, U[user_idx]),
item_diff)
lock.release()
lock.acquire()
num_grad.value += 1
lock.release()
def paraserver(user_item_pairs, user_item_aspect, user_aspect_opinion, item_aspect_opinion,
n_element_samples, n_bpr_samples, lambda_reg, n_epochs, lr,
G1, G2, G3, U, I, A, O,
error_square, error_bpr, q_samples_mse, q_samples_bpr,
del_g1, del_g2, del_g3, del_u, del_i, del_a, del_o, num_grad, n_threads, seed=None, verbose=False):
from ...utils import get_rng
rng = get_rng(seed)
sum_square_gradients_G1 = np.zeros_like(G1)
sum_square_gradients_G2 = np.zeros_like(G2)
sum_square_gradients_G3 = np.zeros_like(G3)
sum_square_gradients_U = np.zeros_like(U)
sum_square_gradients_I = np.zeros_like(I)
sum_square_gradients_A = np.zeros_like(A)
sum_square_gradients_O = np.zeros_like(O)
mse_per_proc = int(n_element_samples / n_threads)
bpr_per_proc = int(n_bpr_samples / n_threads)
user_item_aspect_keys = np.array(list(user_item_aspect.keys()))
user_aspect_opinion_keys = np.array(list(user_aspect_opinion.keys()))
item_aspect_opinion_keys = np.array(list(item_aspect_opinion.keys()))
user_item_pairs_keys = np.array(user_item_pairs)
for epoch in range(n_epochs):
start_time = time.time()
if verbose:
print('iteration:', epoch + 1, '/', n_epochs)
error_square.value = 0
error_bpr.value = 0
uia_samples = user_item_aspect_keys[
rng.choice(len(user_item_aspect_keys), size=n_element_samples)]
uao_samples = user_aspect_opinion_keys[
rng.choice(len(user_aspect_opinion_keys), size=n_element_samples)]
iao_samples = item_aspect_opinion_keys[
rng.choice(len(item_aspect_opinion_keys), size=n_element_samples)]
bpr_sample_ele = user_item_pairs_keys[
rng.choice(len(user_item_pairs_keys), size=n_bpr_samples)]
item2_sample = rng.choice(range(0, I.shape[0]), size=n_bpr_samples)
num_grad.value = 0
del_g1[:] = 0
del_g2[:] = 0
del_g3[:] = 0
del_u[:] = 0
del_i[:] = 0
del_a[:] = 0
del_o[:] = 0
for i in range(n_threads):
q_samples_mse.put(
(uia_samples[mse_per_proc * i:mse_per_proc * (i + 1)],
uao_samples[mse_per_proc * i:mse_per_proc * (i + 1)],
iao_samples[mse_per_proc * i:mse_per_proc * (i + 1)]))
q_samples_bpr.put(
(bpr_sample_ele[bpr_per_proc * i:bpr_per_proc * (i + 1)],
item2_sample[bpr_per_proc * i:bpr_per_proc * (i + 1)]))
while 1:
if num_grad.value == 2 * n_threads:
break
del_g1_reg = del_g1 + lambda_reg * G1 * (del_g1 != 0)
del_g2_reg = del_g2 + lambda_reg * G2 * (del_g2 != 0)
del_g3_reg = del_g3 + lambda_reg * G3 * (del_g3 != 0)
del_u_reg = del_u + lambda_reg * U * (del_u != 0)
del_i_reg = del_i + lambda_reg * I * (del_i != 0)
del_a_reg = del_a + lambda_reg * A * (del_a != 0)
del_o_reg = del_o + lambda_reg * O * (del_o != 0)
sum_square_gradients_G1 += EPS + np.square(del_g1_reg)
sum_square_gradients_G2 += EPS +
|
np.square(del_g2_reg)
|
numpy.square
|
## Bollinger Bands
import timeit
start_time = timeit.default_timer()
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import datetime
from datetime import timedelta
import io
def get_num_lines(fname):
with open(fname) as f:
for i, _ in enumerate(f):
pass
return i + 1
num_lines = get_num_lines("gsef_output/historical_gsef.csv")
n = 253 #load the data of 1 trading year (number of trading days per year is 253)
df = pd.read_csv("gsef_output/historical_gsef.csv", skiprows=range(1, num_lines-n), parse_dates=['Date'], usecols= ["Date", "Close"], dayfirst=True)
closing_prices = df['Close'] # Use only closing prices
def get_sma(prices, rate):
return prices.rolling(rate).mean()
def get_bollinger_bands(prices, rate= 20):
sma = get_sma(prices, rate)
std = prices.rolling(rate).std()
bollinger_up = sma + std * 2 # Calculate top band
bollinger_down = sma - std * 2 # Calculate bottom band
return bollinger_up, bollinger_down
df.index =
|
np.arange(df.shape[0])
|
numpy.arange
|
"""Tests probflow.utils.plotting module and methods which use it"""
import matplotlib.pyplot as plt
import numpy as np
import pytest
import tensorflow as tf
import probflow as pf
def test_approx_kde(plot):
"""Tests utils.plotting.approx_kde"""
data = np.random.randn(1000)
x, y = pf.utils.plotting.approx_kde(data)
assert x.shape[0] == y.shape[0]
if plot:
plt.plot(x, y)
plt.title("should be kde density of samples from norm dist")
plt.show()
def test_get_next_color():
"""Tests utils.plotting.get_next_color"""
# default
col = pf.utils.plotting.get_next_color(None, 0)
assert isinstance(col, str)
assert col[0] == "#"
# list of colors
col = pf.utils.plotting.get_next_color(["#eeefff", "#gggaaa"], 1)
assert isinstance(col, str)
assert col[0] == "#"
# single color
col = pf.utils.plotting.get_next_color("#eeefff", 1)
assert isinstance(col, str)
assert col[0] == "#"
def test_get_ix_label():
"""Tests utils.plotting.get_ix_label"""
# 1d
lab = pf.utils.plotting.get_ix_label(2, [3])
assert isinstance(lab, str)
assert lab == "2"
# 2d
lab = pf.utils.plotting.get_ix_label(5, [3, 3])
assert isinstance(lab, str)
assert lab == "[2, 1]"
# 3d
lab = pf.utils.plotting.get_ix_label(5, [3, 3, 3])
assert isinstance(lab, str)
assert lab == "[2, 1, 0]"
def test_plot_dist(plot):
"""Tests utils.plotting.plot_dist"""
data = np.random.randn(1000)
# Should error on invalid ci
with pytest.raises(ValueError):
pf.utils.plotting.plot_dist(data, ci=-0.1)
with pytest.raises(ValueError):
pf.utils.plotting.plot_dist(data, ci=1.1)
pf.utils.plotting.plot_dist(data)
if plot:
plt.title("should be kde density (filled) of samples from norm dist")
plt.show()
pf.utils.plotting.plot_dist(data, ci=0.9)
if plot:
plt.title(
"should be kde density (filled) of samples from norm dist w/ ci"
)
plt.show()
pf.utils.plotting.plot_dist(data, style="line")
if plot:
plt.title("should be line plot of samples from norm dist")
plt.show()
pf.utils.plotting.plot_dist(data, style="line", ci=0.9)
if plot:
plt.title("should be line plot of samples from norm dist w/ ci")
plt.show()
pf.utils.plotting.plot_dist(data, style="hist")
if plot:
plt.title("should be line plot of samples from norm dist")
plt.show()
pf.utils.plotting.plot_dist(data, style="hist", ci=0.9)
if plot:
plt.title("should be line plot of samples from norm dist w/ ci")
plt.show()
# Should error on invalid style
with pytest.raises(ValueError):
pf.utils.plotting.plot_dist(data, style="lala")
# Should be able to show multiple distributions
data = np.random.randn(1000, 3) + np.array([[-2.0, 0.0, 2.0]])
pf.utils.plotting.plot_dist(data, ci=0.9)
if plot:
plt.title("should be 3 kde density (filled) w/ ci")
plt.show()
def test_plot_line(plot):
"""Tests utils.plotting.plot_line"""
x = np.linspace(0, 10, 100)
y = np.random.randn(100)
# Should error on invalid shapes
with pytest.raises(ValueError):
pf.utils.plotting.plot_line(x, np.random.randn(5))
pf.utils.plotting.plot_line(x, y)
if plot:
plt.title("should be noisy line")
plt.show()
pf.utils.plotting.plot_line(x, np.random.randn(100, 3))
if plot:
plt.title("should be 3 noisy lines w/ labels")
plt.show()
def test_fill_between(plot):
"""Tests utils.plotting.fill_between"""
x = np.linspace(0, 10, 100)
y1 = np.random.randn(100)
y2 = np.random.randn(100) + 5
# Should error on invalid shapes
with pytest.raises(ValueError):
pf.utils.plotting.fill_between(x, y1, np.random.randn(3))
with pytest.raises(ValueError):
pf.utils.plotting.fill_between(np.random.randn(3), y1, y2)
pf.utils.plotting.fill_between(x, y1, y2)
if plot:
plt.title("should be one filled area")
plt.show()
y1 = np.random.randn(100, 3)
y2 = np.random.randn(100, 3) + 3
y1 +=
|
np.array([0, 5, 10])
|
numpy.array
|
"""
A module for unit tests of the logic module
Todo:
* automate the creation of particle arrays
so that it isn't harcoded in each test func
"""
import unittest
import numpy as np
from unittest.mock import Mock
from ..sbelt import logic
ATTR_COUNT = 7 # Number of attributes associated with a Particle
# For reference:
# [0] = x-coord
# [1] = diameter,
# [2] = y-coord (elevation),
# [3] = uid,
# [4] = active (boolean)
# [5] = age counter
# [6] = loop age counter
class TestGetEventParticlesWithOneSubregion(unittest.TestCase):
"""
Test that getting event particles with one Subregion
returns a valid list of event particles.
A 'valid list' will change depending on the function.
See function docstrings for more details.
Attributes:
test_length: the length of the bed
num_particles: the number of model particles
mock_sub_list: list of Mock-type subregions
entrainment_events: number of entrainment events to request
per subregion
level_limit: random int representing level limit
"""
def setUp(self):
self.test_length = 10
self.num_particles = 3
mock_subregion = Mock()
mock_subregion.leftBoundary.return_value = 0
mock_subregion.rightBoundary.return_value = self.test_length
mock_subregion.getName.return_value = 'Mock_Subregion'
self.mock_sub_list = [mock_subregion]
self.entrainment_events = 3
self.level_limit = np.random.randint(0, np.random.randint(2, 10))
def test_all_active_returns_valid_list(self):
"""If there are N active particles in 1 subregion and N events requested
per subregion then a valid list will be a list of all particles.
"""
model_particles = np.zeros((self.num_particles, ATTR_COUNT))
model_particles[:,3] = np.arange(self.num_particles) # unique ids
model_particles[:,4] = np.ones(self.num_particles) # all active
model_particles[:,0] = np.random.randint(
self.test_length,
size=self.num_particles ) # random placement
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
model_particles,
self.level_limit )
self.assertCountEqual(list, model_particles[:,3])
# Height dependancy should not effect list results here
hp_list = list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
model_particles,
self.level_limit,
height_dependant=True )
self.assertCountEqual(hp_list, model_particles[:,3])
self.assertCountEqual(hp_list, list)
def test_not_all_active_returns_list_of_2(self):
"""If there are N particles in 1 subregion and N-1 are _active_,
and if N events are requested per subregion then a valid list will be
a list of the two active particles.
"""
mp_one_inactive = np.zeros((self.num_particles, ATTR_COUNT))
mp_one_inactive[:,3] = np.arange(self.num_particles)
mp_one_inactive[0][4] = 1
mp_one_inactive[1][4] = 1
mp_one_inactive[:,0] = np.random.randint(self.test_length, size=self.num_particles)
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
mp_one_inactive,
self.level_limit )
self.assertEqual(len(list), self.num_particles - 1)
active_list = mp_one_inactive[mp_one_inactive[:,4] != 0]
self.assertCountEqual(list, active_list[:,3])
def test_none_active_returns_empty_list(self):
"""If there are N particles in 1 subregion and 0 are _active_
and if N events are requested per subregion, then a valid list will be
an empty list.
"""
np_none_active = np.zeros((self.num_particles, ATTR_COUNT))
np_none_active[:,3] = np.arange(self.num_particles)
np_none_active[:,0] = np.random.randint(self.test_length, size=self.num_particles)
empty_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_none_active,
self.level_limit )
self.assertEqual(len(empty_list), 0)
def test_all_ghost_particles_returns_ghost_particles(self):
"""If there are N particles in 1 subregion and all N particles
are 'ghost' particles (at -1), and if N particles are requested
per subregion, then a valid list will be a list of all the
ghost particles (all the particles).
"""
np_all_ghost = np.zeros((self.num_particles, ATTR_COUNT))
np_all_ghost[:,3] = np.arange(self.num_particles)
np_all_ghost[:,0] = -1
ghost_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_all_ghost,
self.level_limit )
self.assertCountEqual(ghost_list, np_all_ghost[:,3])
class TestGetEventParticlesWithNSubregions(unittest.TestCase):
"""
Test that getting event particles with N Subregion
returns a valid list of event particles.
A 'valid list' will change depending on the function.
See function docstrings for more details.
Attributes:
test_length: the length of the bed
num_particles: the number of model particles
mock_sub_list_2: list of Mock-type subregions
entrainment_events: number of entrainment events to request
per subregion
level_limit: random int representing level limit
"""
def setUp(self):
self.test_length = 20
self.num_particles = 6
mock_subregion_0 = Mock()
mock_subregion_0.leftBoundary.return_value = 0
mock_subregion_0.rightBoundary.return_value = self.test_length / 2
mock_subregion_0.getName.return_value = 'Mock_Subregion_0'
mock_subregion_1 = Mock()
mock_subregion_1.leftBoundary.return_value = self.test_length / 2
mock_subregion_1.rightBoundary.return_value = self.test_length
mock_subregion_1.getName.return_value = 'Mock_Subregion_1'
self.mock_sub_list_2 = [mock_subregion_0, mock_subregion_1]
self.entrainment_events = 3
self.level_limit = np.random.randint(0, np.random.randint(2, 10))
def test_all_active_returns_3_per_subregion(self):
"""If there are M active particles in each of the N subregions and there
are M events requested per subregion, then a valid list will be a
list of all M*N particles.
"""
model_particles = np.zeros((self.num_particles, ATTR_COUNT))
model_particles[:,3] = np.arange(self.num_particles) # unique ids
model_particles[:,4] = np.ones(self.num_particles) # all active
# Randomly place first three particles in Subregion 1
model_particles[0:3, 0] = np.random.randint(
9,
size=3 )
# Randomly place last three particles in Subregion 2
model_particles[3:6, 0] = np.random.randint(
11,
self.test_length,
size=3 )
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
model_particles,
self.level_limit )
self.assertCountEqual(list, model_particles[:,3])
self.assertEqual(len(list), self.entrainment_events * 2)
# Height dependancy should not effect list results here
hp_list = list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
model_particles,
self.level_limit,
height_dependant=True )
self.assertCountEqual(hp_list, model_particles[:,3])
self.assertCountEqual(hp_list, list)
def test_active_in_1_subregion_returns_only_active(self):
"""If there are M active particles in each 1..K subregions and 0
active in K+1...N subregions, and there are M events requested per
subregion, then a valid list will be a list of the M*K active particles.
This is simplified down to only 2 subregions.
"""
mp_half_active = np.zeros((self.num_particles, ATTR_COUNT))
mp_half_active[:,3] = np.arange(self.num_particles)
mp_half_active[0:3, 4] = np.ones(int((self.num_particles/2))) # First half active
mp_half_active[0:3, 0] = np.random.randint(10,size=3 )
mp_half_active[3:6, 0] = np.random.randint(10, self.test_length, size=3 )
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
mp_half_active,
self.level_limit )
active_particles = mp_half_active[mp_half_active[:,4] != 0]
self.assertCountEqual(list, active_particles[:,3])
self.assertEqual(len(list), 3)
def test_particle_on_boundary_is_not_returned_twice(self):
""" Test that a particle resting on a boundary between
Subregions (recall, other than upstream and downstream
boundaries, Subregions share boundaries) will not
be selected for entrainment twice.
"""
one_particle_on_boundary = np.zeros((1, ATTR_COUNT))
one_particle_on_boundary[0][4] = 1
one_particle_on_boundary[0][0] = 10
# Use custom entrainment_event count for simplicity
entrainment_events = 1
list = logic.get_event_particles(
entrainment_events,
self.mock_sub_list_2,
one_particle_on_boundary,
self.level_limit )
self.assertEqual(len(list), 1)
# Test Define Subregions
class TestDefineSubregions(unittest.TestCase):
""" Test define subregions module
Attributes:
bed_length: the length of the bed the subregion
is being defined on/in
iterations: the number of iterations that a
subregion needs to maintain data for
"""
def setUp(self):
self.bed_length = 10
self.iterations = 10
def test_good_parameters_return_good_subregion_list(self):
""" If the bed length is divisible by the number of
subregions, and the iterations and bed length are valid
int inputs then the function should return a list of
Subregion objects whose boundaries overlap exactly
with the length of the stream and each other.
For example:
if bed_length = 2, num_subregions = 2 then a
valid list will be [subregion_1, subregion_2]
where:
subregion_1.leftBoundary() = 0
subregion_1.leftBoundary() = 1
subregion_2.leftBoundary() = 1
subregion_2.rightBoundary() = 2
"""
subregion_count_even = 2
left_boundary = 0
middle_boundary = self.bed_length / 2
right_boundary = self.bed_length
subregion_list = logic.define_subregions(self.bed_length,
subregion_count_even,
self.iterations)
# Check number of subregions
self.assertEqual(len(subregion_list), 2)
# Check boundary definitions
self.assertEqual(subregion_list[0].leftBoundary(), left_boundary)
self.assertEqual(subregion_list[0].rightBoundary(), middle_boundary)
self.assertEqual(subregion_list[1].leftBoundary(), middle_boundary)
self.assertEqual(subregion_list[1].rightBoundary(), right_boundary)
subregion_count_odd = 5
sub_length = self.bed_length / subregion_count_odd
left_boundary = 0
middle_boundary_1 = left_boundary + sub_length*1
middle_boundary_2 = left_boundary + sub_length*2
middle_boundary_3 = left_boundary + sub_length*3
middle_boundary_4 = left_boundary + sub_length*4
right_boundary = self.bed_length
subregion_list_odd = logic.define_subregions(self.bed_length,
subregion_count_odd,
self.iterations)
# Check number of subregions
self.assertEqual(len(subregion_list_odd), 5)
# Check boundary definitions
self.assertEqual(subregion_list_odd[0].leftBoundary(), left_boundary)
self.assertEqual(subregion_list_odd[0].rightBoundary(), middle_boundary_1)
self.assertEqual(subregion_list_odd[1].leftBoundary(), middle_boundary_1)
self.assertEqual(subregion_list_odd[1].rightBoundary(), middle_boundary_2)
self.assertEqual(subregion_list_odd[2].leftBoundary(), middle_boundary_2)
self.assertEqual(subregion_list_odd[2].rightBoundary(), middle_boundary_3)
self.assertEqual(subregion_list_odd[3].leftBoundary(), middle_boundary_3)
self.assertEqual(subregion_list_odd[3].rightBoundary(), middle_boundary_4)
self.assertEqual(subregion_list_odd[4].leftBoundary(), middle_boundary_4)
self.assertEqual(subregion_list_odd[4].rightBoundary(), right_boundary)
def test_all_subregion_flux_are_init_0(self):
"""
All values in flux_lists should be 0 at initialization.
"""
subregion_count_even = 2
empty_list = np.zeros(self.iterations, dtype=np.int64)
subregion_list_even = logic.define_subregions(self.bed_length,
subregion_count_even,
self.iterations)
for subregion in subregion_list_even:
self.assertEqual(len(subregion.getFluxList()), self.iterations)
self.assertCountEqual(subregion.getFluxList(), empty_list)
# TODO: test incrementFlux()
# Test Build Streambed
class TestBuildStreambed(unittest.TestCase):
""" Test build_streambed module """
def test_compat_diam_returns_good_particles(self):
""" Test that bed is 'tightly packed' meaning
that bed particles are place beside each other
and that no part of any particle exists beyond
the range [0, bed_length]. Also test that
attributes are initialized to proper values.
"""
stream_length = 100
diameter = 0.5
expected_number_particles = stream_length / diameter
bed_particles = logic.build_streambed(stream_length, diameter)
self.assertEqual(len(bed_particles), expected_number_particles)
expected_centres = np.arange(diameter/2, expected_number_particles*diameter, step=diameter)
expected_ids = np.arange(1, int(expected_number_particles)+1)*-1
expected_attr = np.zeros(int(expected_number_particles))
expected_diam = np.ones(int(expected_number_particles))*diameter
# Reverse array when testing because bed_particles array is packed from index N to 0
self.assertIsNone(np.testing.assert_array_equal(expected_centres[::-1], bed_particles[:,0]))
self.assertIsNone(np.testing.assert_array_equal(expected_ids[::-1], bed_particles[:,3]))
self.assertIsNone(np.testing.assert_array_equal(expected_diam[::-1], bed_particles[:,1]))
# attributes y, active, age, and loop should all be 0
for attribute_idx in [2, 4, 5, 6]:
self.assertIsNone(np.testing.assert_array_equal(expected_attr, bed_particles[:,attribute_idx]))
final_particle_idx = -len(bed_particles)
final_particle_extent = bed_particles[final_particle_idx][0] + diameter/2
self.assertEqual(final_particle_extent, stream_length)
class TestSetModelParticles(unittest.TestCase):
""" Test the set_model_particles module
Attributes:
diam: diameter of all particles in the test
pack_fraction: float representing packing density value
h: float value derived from diam - used in geometric
placement of particles ontop of other particles
bed_particles: An n-7 array representing the bed particles
for this test
available_vertices: A numpy array of vertices a particle
is allowed to be placed at. Created with np.arange()
"""
def setUp(self):
stream_length = 10
self.diam = 0.5
self.pack_fraction = 0.8
# Directly from https://math.stackexchange.com/questions/2293201/
# Variables used for geometric placement
d = np.divide(np.multiply(np.divide(self.diam, 2),
self.diam),
self.diam)
self.h = np.sqrt(np.square(self.diam) - np.square(d))
# Mock a full bed_particles array
num_bed_particles = int(stream_length/self.diam)
bed_particles = np.zeros([num_bed_particles, ATTR_COUNT], dtype=float)
bed_particles[:,0] = np.arange(self.diam/2, stream_length+(self.diam/2), step=self.diam)
bed_particles[:,3] = np.arange(1, num_bed_particles+1)*-1
self.bed_particles = bed_particles
# Make all vertices created by the touching bed particles available
# -----> 0.5, 1.0, 1.5, ... , 9.5 (with stream length 10)
self.available_vertices = np.arange(self.diam, stream_length, step=self.diam)
def test_model_particles_placed_at_valid_locations(self):
""" Test that the function places particles
only at vectors provided by available_vertices
"""
model_particles, model_supports = logic.set_model_particles(self.bed_particles,
self.available_vertices,
self.diam,
self.pack_fraction,
self.h)
# Particles should only be placed at available vertices
self.assertTrue(set(model_particles[:,0]).issubset(self.available_vertices))
# All placements should be unique
self.assertTrue(len(model_particles[:,0]) == len(set(model_particles[:,0])))
# All ids should be unique
# There should be no stacking
self.assertEqual(len(set(model_particles[:,2])), 1)
def test_all_model_particles_have_valid_initial_attributes(self):
""" Test that the function produces particles
with valid initial attributes. Valid initial
attributes are unique IDs, correct diameter,
active = 1, age = 0, loop = 0, and that
all supports should be from the bed (id < 0)
"""
model_particles, model_supports = logic.set_model_particles(self.bed_particles,
self.available_vertices,
self.diam,
self.pack_fraction,
self.h)
# all diam = self.diam
expected_diam = np.ones(len(model_particles)) * self.diam
self.assertCountEqual(model_particles[:,1], expected_diam)
# unique id's
self.assertTrue(len(model_particles[:,3]) == len(set(model_particles[:,3])))
# all model are active
expected_activity = np.ones(len(model_particles))
self.assertCountEqual(model_particles[:,4], expected_activity)
# 0 age counter and loop age
expected_age_and_loop = np.zeros(len(model_particles))
self.assertCountEqual(model_particles[:,5], expected_age_and_loop)
self.assertCountEqual(model_particles[:,6], expected_age_and_loop)
# Supports should all be negative (resting on the bed)
self.assertEqual(0, len(model_supports[model_supports > 0]))
class TestComputeAvailableVerticesLifted(unittest.TestCase):
""" Test compute_available_vertices function with
the lifted argument set to True.
Attributes:
stream_length: int representing length of test stream
diam: float representing diam of test particles
bed_particles: An n-7 array representing the test bed particles
expected_bed_vertices: An np array of the available vertices
that an empty bed should produce if all is working well
"""
def setUp(self):
# make bed particles
self.stream_length = 5
self.diam = 0.5
# Mock a full bed_particles array
num_bed_particles = int(self.stream_length/self.diam) # 10 bed particles
bed_particles = np.zeros([num_bed_particles, ATTR_COUNT], dtype=float)
bed_particles[:,0] = np.arange(self.diam/2, self.stream_length+(self.diam/2), step=self.diam)
bed_particles[:,3] = np.arange(num_bed_particles) # unique ids
self.bed_particles = bed_particles
self.expected_bed_vertices = np.arange(self.diam, self.stream_length, step=self.diam)
def test_only_bed_and_empty_lifted_returns_expected_bed_vert(self):
"""If there are no model particles, and the lifted array is
empty, then the available vertices should = expected_bed_vertices
"""
level_limit = 3 # Arbitrary level limit
empty_model_particles = np.empty((0, ATTR_COUNT))
# Bed of length n should return n-1 available vertices
available_vertices = logic.compute_available_vertices(empty_model_particles, self.bed_particles, self.diam,
level_limit=level_limit, lifted_particles=[])
self.assertEqual(len(self.bed_particles)-1, len(available_vertices))
self.assertCountEqual(available_vertices, self.expected_bed_vertices)
def test_bed_and_all_model_lifted_returns_expected_bed_vertices(self):
"""If there are N model particles resting directly on the bed and
the lifted array has all N model particle ids in it, then the available
vertices should = expected_bed_vertices
"""
level_limit = 3
num_model_particles = 3
model_particles = np.zeros([num_model_particles, ATTR_COUNT], dtype=float)
# Particles will be at the first 3 available vertices
model_particles[:,0] = self.expected_bed_vertices[0:3]
model_particles[:,3] = np.arange(num_model_particles)
# Bed of length n should return n-1 available vertices
available_vertices = logic.compute_available_vertices(model_particles, self.bed_particles, self.diam,
level_limit=level_limit, lifted_particles=model_particles[:,3].astype(int))
self.assertEqual(len(available_vertices), len(self.bed_particles)-1)
self.assertCountEqual(available_vertices, self.expected_bed_vertices)
def test_not_touching_and_one_lifted_model_returns_valid_vertices(self):
""" If there are N model particles resting directly on the bed and K are
lifted then the available vertices should be
expected_bed_vertices - (x locations of the N-K unlifted particles)
"""
level_limit = 3
num_model_particles = 3
model_particles = np.zeros([num_model_particles, ATTR_COUNT], dtype=float)
# Particles will be at the first 3 available vertices
model_particles[:,0] = self.expected_bed_vertices[0:3]
model_particles[:,3] = np.arange(num_model_particles)
# Lift first particle, keep later 2 particles -- t/f locations of first particles should be be available
# and locations of second and third particle should not be avaliable
available_vertices = logic.compute_available_vertices(model_particles, self.bed_particles, self.diam,
level_limit=level_limit, lifted_particles=model_particles[0][3].astype(int))
expected_vertices = np.delete(self.expected_bed_vertices, [1,2])
self.assertEqual(len(available_vertices), len(expected_vertices))
self.assertCountEqual(available_vertices, expected_vertices)
class TestComputeAvailableVerticesNotLifted(unittest.TestCase):
""" Test compute_available_vertices function with
the lifted argument set to False.
Attributes:
stream_length: int representing length of test stream
diam: float representing diam of test particles
bed_particles: An n-7 array representing the test bed particles
expected_bed_vertices: An np array of the available vertices
that an empty bed should produce if all is working well
"""
def setUp(self):
# make bed particles
self.stream_length = 5
self.diam = 0.5
# Mock a full bed_particles array
num_bed_particles = int(self.stream_length/self.diam) # 10 bed particles
bed_particles = np.zeros([num_bed_particles, ATTR_COUNT], dtype=float)
bed_particles[:,0] = np.arange(self.diam/2, self.stream_length+(self.diam/2), step=self.diam)
bed_particles[:,3] = np.arange(num_bed_particles) # unique ids
self.bed_particles = bed_particles
self.expected_bed_vertices = np.arange(self.diam, self.stream_length, step=self.diam)
def test_only_bed_returns_expected_bed_vertices(self):
"""If there are no model particles then the
available vertices should = expected_bed_vertices
"""
level_limit = 3 # Arbitrary level limit
empty_model_particles = np.empty((0, ATTR_COUNT))
# Bed of length n should return n-1 available vertices
available_vertices = logic.compute_available_vertices(empty_model_particles, self.bed_particles, self.diam,
level_limit=level_limit)
self.assertEqual(len(self.bed_particles)-1, len(available_vertices))
self.assertCountEqual(available_vertices, self.expected_bed_vertices)
def test_one_model_particle_returns_bed_available_minus_one(self):
"""If there is 1 model particle resting directly on the bed
then the available vertices should be
expected_bed_vertices - (x location of the 1 particle)
"""
level_limit = 3 # Arbitrary level limit
one_particle = np.array([[self.diam, 0, 0, 0, 0, 0, 0]]) # at first resting spot
available_vertices = logic.compute_available_vertices(one_particle, self.bed_particles, self.diam,
level_limit=level_limit)
# Assert there is no available vertex at one_particle[0][0]
self.assertNotIn(one_particle[0][0], available_vertices)
self.assertEqual(len(self.bed_particles)-2, len(available_vertices))
expected_vertices = np.delete(self.expected_bed_vertices, 0) # bed minus first available vertex
self.assertCountEqual(available_vertices, expected_vertices)
def test_m_model_particles_return_bed_available_minus_m(self):
"""If there are M model particles resting directly on the bed
then the available vertices should be
expected_bed_vertices - (x location of the M particles)
"""
level_limit = 3 # Arbitrary level limit
m_particles = 4
model_particles = np.zeros([m_particles, ATTR_COUNT], dtype=float)
# Place m model particles 2 resting spots away from each other
placement_idxs = [0, 2, 4, 6]
model_particles[0][0] = self.expected_bed_vertices[placement_idxs[0]]
model_particles[1][0] = self.expected_bed_vertices[placement_idxs[1]]
model_particles[2][0] = self.expected_bed_vertices[placement_idxs[2]]
model_particles[3][0] = self.expected_bed_vertices[placement_idxs[3]]
available_vertices = logic.compute_available_vertices(model_particles, self.bed_particles, self.diam,
level_limit=level_limit)
self.assertEqual(len(self.bed_particles)-m_particles-1, len(available_vertices))
expected_vertices = np.delete(self.expected_bed_vertices, placement_idxs)
self.assertCountEqual(available_vertices, expected_vertices)
def test_no_available_vertices_returns_empty_array(self):
"""If the stream has no spots that a particle
could rest validly then the returned available vertices
should be an empty array. An example of this scenario is:
The bed resting spots are fully saturated with model particles
BUT none are touching so no new vertices are being made
"""
level_limit = 3 # Arbitrary level limit
model_particles = np.zeros([len(self.bed_particles)-1, ATTR_COUNT], dtype=float)
model_particles[:,0] = self.expected_bed_vertices
available_vertices = logic.compute_available_vertices(model_particles, self.bed_particles, self.diam,
level_limit=level_limit)
self.assertEqual(0, len(available_vertices))
def test_two_touching_model_and_empty_bed_return_one_valid_vertex(self):
""" If there are two particles at the same elevation and
their centres (x,y) are exactly a diam length away then
the two particles are touching. Touching particles should
one new vertex at the location the particles touch.
NOTE: these tests use an empty bed array to directly test the behaviour
of touching particles in a simpler manner (simpler elevation). If
the bed was not empty it would make no difference.
"""
level_limit = 3 # Arbitrary level limit
model_particles = np.zeros([2, ATTR_COUNT], dtype=float)
model_particles[:,0] = np.arange(0.5, 1.5, step=self.diam) # These particles will be touching
empty_bed = np.empty((0, ATTR_COUNT))
available_vertices = logic.compute_available_vertices(model_particles, empty_bed, self.diam,
level_limit=level_limit)
expected_new_vertex = 0.75
self.assertEqual(len(available_vertices), 1)
self.assertEqual(available_vertices, expected_new_vertex)
def test_two_model_touching_at_diff_elev_return_no_vertex(self):
""" If two particles have centres that are a diameter
length away from each other but their elevations are different
then they are NOT touching. They should not create a new vertex.
NOTE: these tests use an empty bed array to directly test the behaviour
of touching particles in a simpler manner (simpler elevation). If
the bed was not empty it would make no difference.
"""
level_limit = 3 # Arbitrary level limit
model_particles = np.zeros([2, ATTR_COUNT], dtype=float)
model_particles[:,0] = np.arange(0.5, 1.5, step=self.diam) # These particles will be touching
# Place them at different elevations
model_particles[0][2] = 0
model_particles[1][2] = 1
empty_bed = np.empty((0, ATTR_COUNT))
available_vertices = logic.compute_available_vertices(model_particles, empty_bed, self.diam,
level_limit=level_limit)
self.assertEqual(len(available_vertices), 0)
def test_3triangle_and_empty_bed_returns_empty_array(self):
""" A 3-triangle of particles is: two particles touching
and one particle resting on the vertex created by the two
touching particles. A 3-triangle should not create
any available vertices in the stream.
NOTE: these tests use an empty bed array to directly test the behaviour
of touching particles in a simpler manner (simpler elevation). If
the bed was not empty it would make no difference.
"""
level_limit = 3 # Level limit > 2
model_particles = np.zeros([3, ATTR_COUNT], dtype=float)
# 3 triangle: 2 particles touching, 1 particle resting above/between
model_particles[0:2][:,0] =
|
np.arange(0.5, 1.5, step=self.diam)
|
numpy.arange
|
import numpy as np
from math import pi
from random import random as r
from hqca.opts.core import *
class NelderMead(OptimizerInstance):
'''
Nelder-Mead Optimizer! Uses the general dimension simplex method, so should
be appropriate for arbitrary system size.
'''
def __init__(self,**kwargs):
OptimizerInstance.__init__(self,**kwargs)
OptimizerInstance._simplex_keywords(self,**kwargs)
self.set_parameters()
def set_parameters(self,
alpha=1,
beta=2,
gamma=0.5,
delta=0.5):
self.alpha=alpha
self.beta = beta
self.gamma= gamma
self.delta= delta
def initialize(self,start,initial=None):
'''
Columns in simp_x are dimensions of problem, row has each point
Note there are N+1 points in the simplex, and dimension N
We generate simplex according to initial method, then evaluate function
at each point
'''
OptimizerInstance.initialize(self,start)
self.simp_x = np.zeros((self.N+1,self.N))
self.simp_f = np.zeros(self.N+1)
if self.initial=='old':
for i in range(1,self.N+1):
self.simp_x[i,:]=start[:]+self.shift
self.simp_x[i,i-1]+=0.99*self.unity
self.simp_x[0,:] = start[:]+self.shift
elif self.initial=='han':
ch = min(max(max(start),self.unity*0.99),10)
for i in range(1,self.N+1):
self.simp_x[i,:]=start[:]+self.shift
self.simp_x[i,i-1]+=ch
t = np.ones(self.N)*ch*(1-np.sqrt(self.N+1))/self.N
self.simp_x[0,:] = start[:]+t+self.shift
print(t)
elif self.initial=='varadhan':
cs = max(np.sqrt(np.sum(np.square(start))),1)
b1 = (cs/(self.N*np.sqrt(2)))*(np.sqrt(self.N+1)+self.N-1)
b2 = (cs/(self.N*np.sqrt(2)))*(np.sqrt(self.N+1)-1)
else:
self.simp_x = initial
for i in range(0,self.N+1):
self.simp_f[i] = self.f(self.simp_x[i,:])
self.energy_calls+=1
if self.verbose:
print(' step: 0, f: {:.8f} '.format(self.simp_f[0]))
self.order_points()
self.calc_centroid()
self.reassign()
self.stuck = np.zeros((3,self.N))
self.stuck_ind = 0
def _check_stuck(self):
v1 = self.stuck[0,:]
v2 = self.stuck[1,:]
v3 = self.stuck[2,:]
diff = np.sqrt(np.sum(np.square(v1-v3)))
if diff<1e-10:
self.R_x = self.M_x+r()*(self.M_x-self.W_x)
if self.diagnostic:
print('Was stuck!')
print(self.R_x)
print(self.simp_x)
self.N_stuck+=1 #
def _reflect(self):
self.R_x = self.M_x+self.alpha*(self.M_x-self.W_x)
if self.stuck_ind==0:
self.stuck_ind = 1
self.stuck[0,:]= self.R_x
elif self.stuck_ind==1:
self.stuck_ind = 2
self.stuck[1,:]= self.R_x
elif self.stuck_ind==2:
self.stuck_ind=0
self.stuck[2,:]= self.R_x
self.N_stuck=0
self._check_stuck()
self.R_f = self.f(self.R_x)
self.energy_calls+=1
if self.diagnostic:
print(' NM: Reflection: {},{}'.format(self.R_x,self.R_f))
def _update(self,target):
if target=='reflect':
if self.diagnostic:
print(' NM: Reflected point is soso.')
# replace worst point
self.simp_x[-1,:]=self.R_x
self.simp_f[-1] =self.R_f
elif target=='extend':
if self.diagnostic:
print(' NM: Extended point better than best.')
print(self.E_x, self.E_f)
self.simp_x[-1,:]=self.E_x
self.simp_f[-1] =self.E_f
elif target=='contract':
self.simp_x[-1,:]=self.C_x
self.simp_f[-1] =self.C_f
if self.diagnostic:
print(' NM: Contracting the triangle.')
print(self.C_x, self.C_f)
print('New simplex: ')
print(self.simp_x)
print(self.simp_f)
def _extend(self):
self.E_x = self.R_x + self.beta*(self.R_x - self.M_x)
self.E_f = self.f(self.E_x)
self.energy_calls+=1
def _contract(self):
self.Cwm_x = self.W_x+self.gamma*(self.M_x-self.W_x)
self.Crm_x = self.M_x+self.gamma*(self.R_x-self.M_x)
self.Cwm_f = self.f(self.Cwm_x)
self.Crm_f = self.f(self.Crm_x)
self.energy_calls+=2
if self.Crm_f<=self.Cwm_f:
self.C_f = self.Crm_f
self.C_x = self.Crm_x
else:
self.C_f = self.Cwm_f
self.C_x = self.Cwm_x
def _shrink(self):
for i in range(1,self.N+1):
self.simp_x[i,:]=self.B_x+self.delta*(self.simp_x[i,:]-self.B_x)
self.simp_f[i]=self.f(self.simp_x[i,:])
self.energy_calls+=1
if self.diagnostic:
print(' NM: Had to shrink..')
for i in range(self.simp_x.shape[1]):
print(self.simp_x[:,i],self.simp_f[i])
def next_step(self):
'''
Carries out the next step to generate a new simplex. Each step contains
various energy evaluations, so rarely will only be one evaluation.
W_x is worst point
B_x is best point
X_x is the second-worst point
M_x is the centroid
'''
self._reflect()
if self.R_f<=self.X_f: #note this is second worst
if self.R_f>self.B_f: #reflected point not better than best
self._update('reflect')
else: # reflected points is best or better, so we extend it
self._extend()
if self.E_f<self.B_f:
self._update('extend')
else:
if self.diagnostic:
print('NM: Reflected point better than best.')
print(self.R_x,self.R_f)
self.simp_x[-1,:]=self.R_x
self.simp_f[-1] =self.R_f
else: #reflected point worsed
self._contract()
if self.C_f<self.W_f:
self._update('contract')
else:
self._shrink()
self.clean_up()
def clean_up(self):
self.order_points()
self.calc_centroid()
self.reassign()
self.check_criteria()
@property
def best_f(self):
return self.B_f
@property
def best_x(self):
return self.B_x
@property
def best_y(self):
return self.B_y
def check_criteria(self):
self.sd_f =
|
np.std(self.simp_f)
|
numpy.std
|
"""
PYFAB API using Fabber shared libraries
=======================================
This API uses the C API defined in the ``libfabbercore_shared.so`` shared library via
the Python ``ctypes`` module.
"""
import os
import collections
from ctypes import CDLL, c_int, c_char_p, c_void_p, c_uint, CFUNCTYPE, create_string_buffer
import six
import numpy as np
import numpy.ctypeslib as npct
import nibabel as nib
from .api import FabberApi, FabberException, FabberRun
class FabberShlib(FabberApi):
"""
Interface to Fabber in library mode using simplified C API
"""
def __init__(self, core_lib=None, model_libs=None, **kwargs):
FabberApi.__init__(self, core_lib=core_lib, model_libs=model_libs, **kwargs)
if self.core_lib is None or not os.path.isfile(self.core_lib):
raise FabberException("Invalid core library - file not found: %s" % self.core_lib)
self._errbuf = create_string_buffer(255)
self._outbuf = create_string_buffer(1000000)
self._progress_cb_type = CFUNCTYPE(None, c_int, c_int)
self._clib = self._init_clib()
self._handle = None
self._init_handle()
def get_methods(self):
self._init_handle()
self._trycall(self._clib.fabber_get_methods, self._handle, len(self._outbuf), self._outbuf, self._errbuf)
return self._outbuf.value.decode("UTF-8").splitlines()
def get_models(self, model_group=None):
self._init_handle(model_group)
self._trycall(self._clib.fabber_get_models, self._handle, len(self._outbuf), self._outbuf, self._errbuf)
return self._outbuf.value.decode("UTF-8").splitlines()
def get_options(self, generic=None, method=None, model=None):
self._init_handle()
if generic is None:
# For backwards compatibility - no params = generic
generic = not method and not model
ret, all_lines = [], []
if method:
self._trycall(self._clib.fabber_get_options, self._handle, "method", method, len(self._outbuf), self._outbuf, self._errbuf)
lines = self._outbuf.value.decode("UTF-8").split("\n")
ret.append(lines[0])
all_lines += lines[1:]
if model:
self._trycall(self._clib.fabber_get_options, self._handle, "model", model, len(self._outbuf), self._outbuf, self._errbuf)
lines = self._outbuf.value.decode("UTF-8").split("\n")
ret.append(lines[0])
all_lines += lines[1:]
if generic:
self._trycall(self._clib.fabber_get_options, self._handle, None, None, len(self._outbuf), self._outbuf, self._errbuf)
lines = self._outbuf.value.decode("UTF-8").split("\n")
ret.append(lines[0])
all_lines += lines[1:]
opt_keys = ["name", "description", "type", "optional", "default"]
opts = []
for opt in all_lines:
if opt:
opt = dict(zip(opt_keys, opt.split("\t")))
opt["optional"] = opt["optional"] == "1"
opts.append(opt)
ret.insert(0, opts)
return tuple(ret)
def get_model_params(self, options):
return self._init_run(options)[1]
def get_model_param_descs(self, options):
self._init_run(options)
# Set the arg types here because we do not know if this method will actually exist in the
# shared library but if the user calls this method we assume they know it does.
self._clib.fabber_get_model_param_descs.argtypes = [c_void_p, c_uint, c_char_p, c_char_p]
self._trycall(self._clib.fabber_get_model_param_descs, self._handle, len(self._outbuf), self._outbuf, self._errbuf)
return self._parse_params(self._outbuf.value.decode("UTF-8").splitlines())
def get_model_outputs(self, options):
return self._init_run(options)[2]
def model_evaluate(self, options, param_values, nvols, indata=None, output_name=""):
# Get model parameter names and form a sequence of the values provided for them
_, params, _ = self._init_run(options)
plist = []
for param in params:
if param not in param_values:
raise FabberException("Model parameter %s not specified" % param)
else:
plist.append(param_values[param])
if len(param_values) != len(params):
raise FabberException("Incorrect number of parameters specified: expected %i (%s)" % (len(params), ",".join(params)))
ret = np.zeros([nvols,], dtype=np.float32)
if indata is None:
indata = np.zeros([nvols,], dtype=np.float32)
# Call the evaluate function in the C API
self._trycall(self._clib.fabber_model_evaluate_output, self._handle, len(plist),
|
np.array(plist, dtype=np.float32)
|
numpy.array
|
from campus import campus
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
import time
import json
import os
try:
import meerschaum as mrsm
print('IMPORTED Meerschaum')
except:
print("\n\nFAILED TO IMPORT MEERSCHAUM!\n\n")
class clock:
# not doing anything with the number of campuses yet
def __init__(self, number_campuses):
start_time = time.time()
num_args = len(sys.argv)
if num_args > 1:
if sys.argv[1] == '-l':
snapshot = self.loadSnapshot()
self.campus = campus(1, snapshot)
now = datetime.now()
dtime = now.strftime("%Y-%m-%d %H:%M:%S")
inputPower = self.generateInput()
newRow = {'datetime' : [dtime], 'AEP_MW' : [inputPower]}
inputData = pd.DataFrame(newRow)
self.age(inputData)
else:
inputData = self.loadData().iloc[:5000]
inputData['datetime'] = inputData['Datetime']
numRows = len(inputData.index)
self.campus = campus(numRows)
self.age(inputData)
self.writeDB()
self.writeSnapshot(self.makeSnapshot())
elapsed_time = time.time() - start_time
print('ELAPSED TIME:\t{}'.format(elapsed_time))
# age the campus using input data as base
def age(self, input_data):
for index, row in input_data.iterrows():
self.campus.getOlder(row, index)
''' Brainstorming with Bennett :)
self.campus.write()
'''
# read data in from raw_csv file
def loadData(self):
df = pd.read_csv('./data/raw_data/AEP_hourly.csv')
df['index'] = np.arange(len(df))
df.set_index('index')
return df
def loadSnapshot(self):
print('LOADING SNAPSHOT')
with open("./data/campus_snapshot.json", "r") as f:
snapshot = json.load(f)
return snapshot
def graphDF(self):
color_pal = ["#F8766D", "#D39200", "#93AA00", "#00BA38", "#00C19F", "#00B9E3", "#619CFF", "#DB72FB"]
self.inputData.plot(style='.', figsize=(15,5), color=color_pal[0], title='Data')
plt.show()
# make snapshot of the state of the simulation
def makeSnapshot(self):
snapshot = { }
snapshot['time'] = int(time.time())
snapshot['buildings'] = {}
for building in self.campus.buildings:
building_object = self.campus.buildings[building]
building_id = building_object.id
s_building_id = str(building_object.id)
snapshot['buildings'][s_building_id] = { }
snapshot['buildings'][s_building_id]['stdev'] = building_object.stdev
snapshot['buildings'][s_building_id]['adjustment'] = building_object.adjustment
snapshot['buildings'][s_building_id]['rooms'] = { }
snapshot['buildings'][s_building_id]['corridors'] = { }
for room in building_object.rooms:
room_object = building_object.rooms[room]
room_id = room_object.id
s_room_id = str(room_id)
snapshot['buildings'][s_building_id]['rooms'][s_room_id] = { }
for sensor in room_object.sensors:
sensor_object = room_object.sensors[sensor]
meter_id = sensor_object.id
s_meter_id = str(meter_id)
snapshot['buildings'][s_building_id]['rooms'][s_room_id][s_meter_id] = { }
snapshot['buildings'][s_building_id]['rooms'][s_room_id][s_meter_id]['age'] = sensor_object.age
snapshot['buildings'][s_building_id]['rooms'][s_room_id][s_meter_id]['latest_ttl'] = sensor_object.ttl
snapshot['buildings'][s_building_id]['rooms'][s_room_id][s_meter_id]['replacement_wait'] = sensor_object.replacement_wait
for corridor in building_object.corridors:
corridor_object = building_object.corridors[corridor]
corridor_id = corridor_object.id
s_corridor_id = str(corridor_id)
snapshot['buildings'][s_building_id]['corridors'][s_corridor_id] = { }
for sensor in corridor_object.sensors:
sensor_object = corridor_object.sensors[sensor]
meter_id = sensor_object.id
s_meter_id = str(meter_id)
snapshot['buildings'][s_building_id]['corridors'][s_corridor_id][s_meter_id] = { }
snapshot['buildings'][s_building_id]['corridors'][s_corridor_id][s_meter_id]['age'] = sensor_object.age
snapshot['buildings'][s_building_id]['corridors'][s_corridor_id][s_meter_id]['latest_ttl'] = sensor_object.ttl
snapshot['buildings'][s_building_id]['corridors'][s_corridor_id][s_meter_id]['replacement_wait'] = sensor_object.replacement_wait
return snapshot
# write the snapshot to a file
def writeSnapshot(self, snapshot):
with open('./data/campus_snapshot.json', 'w+') as f:
json.dump(snapshot, f, indent=4)
def write_output(self):
print('Writing output csv...')
try:
power_filename = './data/processed_data/power.csv'
temperature_filename = './data/processed_data/temperature.csv'
np.set_printoptions(suppress=True)
np_array = np.array([
|
np.array(xi)
|
numpy.array
|
import pandas as pd
import numpy as np
from . import LineMeasurement, GridMeasurement, otherfunctions
import os
from pathlib import Path
class Sample:
@property
def linemeasurements(self):
return self._linemeasurements
@property
def gridmeasurements(self):
return self._gridmeasurements
# Store flags identifying bad acquisitions
@property
def meas_acq_flags(self):
return self._meas_acq_flags
def __init__(self, path=None, gridSize=50, adjustphase=True):
self._linemeasurements = {}
self._gridmeasurements = {}
self._meas_acq_flags = {}
if path is not None:
self.addentiresample(path, gridSize, adjustphase)
def addmeasurement(self, meas, measType=None, measName=None, gridSize=None, adjustphase=True):
if measType is None:
measType = meas._measurementName
meas_flags = meas.clean()
if measType == 'Scan':
self._linemeasurements[measType] = meas
elif measType in ['SSPFM', 'NonLin', 'Relax']:
self._gridmeasurements[measType] = meas
else:
raise ValueError(measType+': Unknown measurement type')
else:
if measType.lower() in 'scan':
try:
newMeas = LineMeasurement(meas, name=measName, adjustphase=adjustphase)
except ValueError:
raise ValueError('Argument is not a path to measurement data. Maybe its a Measurement Object?')
meas_flags = newMeas.clean()
self._linemeasurements[measName] = newMeas
else:
if measType.lower() in ['grid']:
measType = 'Relax'
try:
newMeas = GridMeasurement(meas, measType, gridSize, adjustphase=adjustphase)
except ValueError:
raise ValueError('Argument is not a path to measurement data. Maybe its a Measurement Object?')
meas_flags = newMeas.clean()
self._gridmeasurements[measName] = newMeas
self._meas_acq_flags[measType] = meas_flags
def addentiresample(self, path, gridSize=50, adjustphase=True):
for folder in next(os.walk(path))[1]:
direc = Path(path+folder)
if os.path.isfile(direc / 'shofit.csv'):
parameters = pd.read_csv(direc / 'parameters.csv', header=None, index_col=0)
try:
measType = parameters.T['Measurement Type'].values[0]
measName = parameters.T['Measurement Name'].values[0]
except KeyError:
measType = 'Grid'
measName = 'Fail'
self.addmeasurement(direc, measType=measType, measName=measName, gridSize=gridSize, adjustphase=adjustphase)
def GetMeasStack(self, measstack = None, varstack = ['Amp', 'Phase', 'Res', 'Q'], inout=0.0, plotGroup=None,
insert=None, clean=False):
if measstack is None:
measstack = self._gridmeasurements.keys()
stackedCols = 0
stackreturn = 0
flags = []
for measure in measstack:
measurement = self._gridmeasurements[measure]
meas_flags = self._meas_acq_flags[measure]
data = measurement.GetDataSubset(inout=inout, plotGroup=plotGroup, insert=insert,
stack=varstack)
cols = data.columns.values
unzipped = np.asarray([list(t) for t in zip(*cols)])
measname = np.repeat(measure, unzipped.shape[1])
newcols = np.vstack([measname, unzipped])
try:
stackreturn = pd.concat([stackreturn, data], axis=1)
stackedCols = np.hstack([stackedCols, newcols])
flags = np.hstack([flags, meas_flags])
except TypeError:
stackreturn = data
stackedCols = newcols
flags = meas_flags
tuples = list(zip(*stackedCols))
cols = pd.MultiIndex.from_tuples(tuples, names=['Meas', 'Var', 'Chirp', 'InOut', 'PlotGroup', 'xaxis'])
result = pd.DataFrame(stackreturn.values, index=stackreturn.index, columns=cols)
if clean:
try:
collapsed_flags = flags.any(axis=1)
except:
collapsed_flags = flags
result = result[~collapsed_flags]
return result, collapsed_flags
else:
return result
def plot(self, meas=None, variables=None, saveName=None, pointNum=None, InOut=0):
if meas == 'Scan':
try:
measObj = self._linemeasurements[meas]
except KeyError:
raise KeyError('That measurement does not exist')
measObj.plot(variables=variables, saveName=saveName)
elif meas in ['SSPFM', 'NonLin', 'Relax']:
try:
measObj = self._gridmeasurements[meas]
except KeyError:
raise KeyError('That measurement does not exist')
measObj.plot(variables=variables, pointNum=pointNum, InOut=InOut, saveName=saveName)
else:
raise ValueError('Please select which measurement to plot')
@staticmethod
def match_distance_to_sspfm(line_measurement, grid_measurement, distances=None):
if distances is None:
distances = line_measurement.find_distances()
grid = otherfunctions.generate_template_grid(distances[0].shape[0], grid_measurement.gridSize)
grid_measurement.analysis['x'] = pd.Series(
|
np.argwhere(grid)
|
numpy.argwhere
|
import numpy as np
from .baseMetric import BaseMetric
# A collection of commonly used simple metrics, operating on a single column and returning a float.
__all__ = ['PassMetric', 'Coaddm5Metric', 'MaxMetric', 'AbsMaxMetric', 'MeanMetric', 'AbsMeanMetric',
'MedianMetric', 'AbsMedianMetric', 'MinMetric', 'FullRangeMetric', 'RmsMetric', 'SumMetric',
'CountUniqueMetric', 'CountMetric', 'CountRatioMetric', 'CountSubsetMetric', 'RobustRmsMetric',
'MaxPercentMetric', 'AbsMaxPercentMetric', 'BinaryMetric', 'FracAboveMetric', 'FracBelowMetric',
'PercentileMetric', 'NoutliersNsigmaMetric', 'UniqueRatioMetric',
'MeanAngleMetric', 'RmsAngleMetric', 'FullRangeAngleMetric', 'CountExplimMetric']
twopi = 2.0*np.pi
class PassMetric(BaseMetric):
"""
Just pass the entire array through
"""
def __init__(self, cols=None, **kwargs):
if cols is None:
cols= []
super(PassMetric, self).__init__(col=cols, metricDtype='object', **kwargs)
def run(self, dataSlice, slicePoint=None):
return dataSlice
class Coaddm5Metric(BaseMetric):
"""Calculate the coadded m5 value at this gridpoint.
Parameters
----------
m5Col : `str`, optional
Name of the m5 column. Default fiveSigmaDepth.
metricName : `str`, optional
Name to associate with the metric output.
"""
def __init__(self, m5Col='fiveSigmaDepth', metricName='CoaddM5', **kwargs):
super(Coaddm5Metric, self).__init__(col=m5Col, metricName=metricName, **kwargs)
def run(self, dataSlice, slicePoint=None):
return 1.25 * np.log10(np.sum(10.**(.8*dataSlice[self.colname])))
class MaxMetric(BaseMetric):
"""Calculate the maximum of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.max(dataSlice[self.colname])
class AbsMaxMetric(BaseMetric):
"""Calculate the max of the absolute value of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.max(np.abs(dataSlice[self.colname]))
class MeanMetric(BaseMetric):
"""Calculate the mean of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.mean(dataSlice[self.colname])
class AbsMeanMetric(BaseMetric):
"""Calculate the mean of the absolute value of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.mean(np.abs(dataSlice[self.colname]))
class MedianMetric(BaseMetric):
"""Calculate the median of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.median(dataSlice[self.colname])
class AbsMedianMetric(BaseMetric):
"""Calculate the median of the absolute value of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.median(np.abs(dataSlice[self.colname]))
class MinMetric(BaseMetric):
"""Calculate the minimum of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.min(dataSlice[self.colname])
class FullRangeMetric(BaseMetric):
"""Calculate the range of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.max(dataSlice[self.colname])-np.min(dataSlice[self.colname])
class RmsMetric(BaseMetric):
"""Calculate the standard deviation of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.std(dataSlice[self.colname])
class SumMetric(BaseMetric):
"""Calculate the sum of a simData column slice.
"""
def run(self, dataSlice, slicePoint=None):
return np.sum(dataSlice[self.colname])
class CountUniqueMetric(BaseMetric):
"""Return the number of unique values.
"""
def run(self, dataSlice, slicePoint=None):
return np.size(np.unique(dataSlice[self.colname]))
class UniqueRatioMetric(BaseMetric):
"""Return the number of unique values divided by the total number of values.
"""
def run(self, dataSlice, slicePoint=None):
ntot = float(np.size(dataSlice[self.colname]))
result = np.size(np.unique(dataSlice[self.colname])) / ntot
return result
class CountMetric(BaseMetric):
"""Count the length of a simData column slice. """
def __init__(self, col=None, **kwargs):
super(CountMetric, self).__init__(col=col, **kwargs)
self.metricDtype = 'int'
def run(self, dataSlice, slicePoint=None):
return len(dataSlice[self.colname])
class CountExplimMetric(BaseMetric):
"""Count the number of x second visits. Useful for rejecting very short exposures
and counting 60s exposures as 2 visits."""
def __init__(self, col=None, minExp=20., expectedExp=30., expCol='visitExposureTime', **kwargs):
self.minExp = minExp
self.expectedExp = expectedExp
self.expCol = expCol
super().__init__(col=[col, expCol], **kwargs)
self.metricDtype = 'int'
def run(self, dataSlice, slicePoint=None):
nv = dataSlice[self.expCol] / self.expectedExp
nv[np.where(dataSlice[self.expCol] < self.minExp)[0]] = 0
nv = np.round(nv)
return int(np.sum(nv))
class CountRatioMetric(BaseMetric):
"""Count the length of a simData column slice, then divide by 'normVal'.
"""
def __init__(self, col=None, normVal=1., metricName=None, **kwargs):
self.normVal = float(normVal)
if metricName is None:
metricName = 'CountRatio %s div %.1f'%(col, normVal)
super(CountRatioMetric, self).__init__(col=col, metricName=metricName, **kwargs)
def run(self, dataSlice, slicePoint=None):
return len(dataSlice[self.colname])/self.normVal
class CountSubsetMetric(BaseMetric):
"""Count the length of a simData column slice which matches 'subset'.
"""
def __init__(self, col=None, subset=None, **kwargs):
super(CountSubsetMetric, self).__init__(col=col, **kwargs)
self.metricDtype = 'int'
self.badval = 0
self.subset = subset
def run(self, dataSlice, slicePoint=None):
count = len(np.where(dataSlice[self.colname] == self.subset)[0])
return count
class RobustRmsMetric(BaseMetric):
"""Use the inter-quartile range of the data to estimate the RMS.
Robust since this calculation does not include outliers in the distribution.
"""
def run(self, dataSlice, slicePoint=None):
iqr = np.percentile(dataSlice[self.colname],75)-np.percentile(dataSlice[self.colname],25)
rms = iqr/1.349 #approximation
return rms
class MaxPercentMetric(BaseMetric):
"""Return the percent of the data which has the maximum value.
"""
def run(self, dataSlice, slicePoint=None):
nMax = np.size(np.where(dataSlice[self.colname] == np.max(dataSlice[self.colname]))[0])
percent = nMax / float(dataSlice[self.colname].size) * 100.
return percent
class AbsMaxPercentMetric(BaseMetric):
"""Return the percent of the data which has the absolute value of the max value of the data.
"""
def run(self, dataSlice, slicePoint=None):
maxVal = np.abs(np.max(dataSlice[self.colname]))
nMax = np.size(np.where(np.abs(dataSlice[self.colname]) == maxVal)[0])
percent = nMax / float(dataSlice[self.colname].size) * 100.0
return percent
class BinaryMetric(BaseMetric):
"""Return 1 if there is data.
"""
def run(self, dataSlice, slicePoint=None):
if dataSlice.size > 0:
return 1
else:
return self.badval
class FracAboveMetric(BaseMetric):
"""Find the fraction of data values above a given value.
"""
def __init__(self, col=None, cutoff=0.5, scale=1, metricName=None, **kwargs):
# Col could just get passed in bundle with kwargs, but by explicitly pulling it out
# first, we support use cases where class instantiated without explicit 'col=').
if metricName is None:
metricName = 'FracAbove %.2f in %s' %(cutoff, col)
super(FracAboveMetric, self).__init__(col, metricName=metricName, **kwargs)
self.cutoff = cutoff
self.scale = scale
def run(self, dataSlice, slicePoint=None):
good = np.where(dataSlice[self.colname] >= self.cutoff)[0]
fracAbove = np.size(good)/float(np.size(dataSlice[self.colname]))
fracAbove = fracAbove * self.scale
return fracAbove
class FracBelowMetric(BaseMetric):
"""Find the fraction of data values below a given value.
"""
def __init__(self, col=None, cutoff=0.5, scale=1, metricName=None, **kwargs):
if metricName is None:
metricName = 'FracBelow %.2f %s' %(cutoff, col)
super(FracBelowMetric, self).__init__(col, metricName=metricName, **kwargs)
self.cutoff = cutoff
self.scale = scale
def run(self, dataSlice, slicePoint=None):
good = np.where(dataSlice[self.colname] <= self.cutoff)[0]
fracBelow = np.size(good)/float(np.size(dataSlice[self.colname]))
fracBelow = fracBelow * self.scale
return fracBelow
class PercentileMetric(BaseMetric):
"""Find the value of a column at a given percentile.
"""
def __init__(self, col=None, percentile=90, metricName=None, **kwargs):
if metricName is None:
metricName = '%.0fth%sile %s' %(percentile, '%', col)
super(PercentileMetric, self).__init__(col=col, metricName=metricName, **kwargs)
self.percentile = percentile
def run(self, dataSlice, slicePoint=None):
pval = np.percentile(dataSlice[self.colname], self.percentile)
return pval
class NoutliersNsigmaMetric(BaseMetric):
"""Calculate the # of visits less than nSigma below the mean (nSigma<0) or
more than nSigma above the mean of 'col'.
"""
def __init__(self, col=None, nSigma=3., metricName=None, **kwargs):
self.nSigma = nSigma
self.col = col
if metricName is None:
metricName = 'Noutliers %.1f %s' %(self.nSigma, self.col)
super(NoutliersNsigmaMetric, self).__init__(col=col, metricName=metricName, **kwargs)
self.metricDtype = 'int'
def run(self, dataSlice, slicePoint=None):
med = np.mean(dataSlice[self.colname])
std = np.std(dataSlice[self.colname])
boundary = med + self.nSigma*std
# If nsigma is positive, look for outliers above median.
if self.nSigma >=0:
outsiders = np.where(dataSlice[self.colname] > boundary)
# Else look for outliers below median.
else:
outsiders = np.where(dataSlice[self.colname] < boundary)
return len(dataSlice[self.colname][outsiders])
def _rotateAngles(angles):
"""Private utility for the '*Angle' Metrics below.
This takes a series of angles between 0-2pi and rotates them so that the
first angle is at 0, ensuring the biggest 'gap' is at the end of the series.
This simplifies calculations like the 'mean' and 'rms' or 'fullrange', removing
the discontinuity at 0/2pi.
"""
angleidx = np.argsort(angles)
diffangles = np.diff(angles[angleidx])
start_to_end = np.array([twopi-angles[angleidx][-1] + angles[angleidx][0]], float)
if start_to_end < -2.*np.pi:
raise ValueError('Angular metrics expect radians, this seems to be in degrees')
diffangles = np.concatenate([diffangles, start_to_end])
maxdiff = np.where(diffangles == diffangles.max())[0]
if len(maxdiff) > 1:
maxdiff = maxdiff[-1:]
if maxdiff == (len(angles)-1):
rotation = angles[angleidx][0]
else:
rotation = angles[angleidx][maxdiff+1][0]
return (rotation, (angles - rotation) % twopi)
class MeanAngleMetric(BaseMetric):
"""Calculate the mean of an angular (degree) simData column slice.
'MeanAngle' differs from 'Mean' in that it accounts for wraparound at 2pi.
"""
def run(self, dataSlice, slicePoint=None):
"""Calculate mean angle via unit vectors.
If unit vector 'strength' is less than 0.1, then just set mean to 180 degrees
(as this indicates nearly uniformly distributed angles).
"""
x = np.cos(np.radians(dataSlice[self.colname]))
y = np.sin(np.radians(dataSlice[self.colname]))
meanx = np.mean(x)
meany = np.mean(y)
angle = np.arctan2(meany, meanx)
radius =
|
np.sqrt(meanx**2 + meany**2)
|
numpy.sqrt
|
from . import tree_algorithms as ta
import numpy as np
from ..update_rules import update_rules as ur
#from pulp import *
import copy
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh
from itertools import cycle
########### ------------------------------ ADAPTIVE RULES
def select(rule, x, A, b, loss, args, iteration):
""" Adaptive selection rules """
n_params = x.size
block_size = args["block_size"]
it = iteration
lipschitz = loss.lipschitz
if "Tree" not in rule:
assert block_size > 0
else:
assert block_size == -1
g_func = loss.g_func
if rule == "all":
""" select all coordinates """
block = np.arange(n_params)
elif rule == "Random":
""" randomly select a coordinate"""
all_block = np.random.permutation(n_params)
block = all_block[:block_size]
#block = np.unravel_index(block, (n_features, n_classes))
elif rule in ["Perm", "Cyclic"]:
"""Select next coordinate"""
if iteration % n_params == 0:
args["perm_coors"] = np.random.permutation(n_params)
emod = it % int((n_params/block_size))
block = args["perm_coors"][emod*block_size: (emod + 1)*block_size]
#block = np.unravel_index(block, (n_features, n_classes))
elif rule == "Lipschitz":
"""non-uniform sample based on lipschitz values"""
L = lipschitz
block = np.random.choice(x.size, block_size, replace=False,
p=L/L.sum())
elif rule in ["GS"]:
""" select coordinates based on largest gradients"""
g = g_func(x, A, b, block=None)
s = np.abs(g)
block = np.argsort(s, axis=None)[-block_size:]
elif rule in ["GSDLi", "GSD"]:
""" select coordinates based on largest individual lipschitz"""
L = lipschitz
g = g_func(x, A, b, block=None)
s = np.abs(g) / np.sqrt(L)
block = np.argsort(s, axis=None)[-block_size:]
elif rule in ["GSDHb"]:
""" select coordinates based on the uper bound of the hessian"""
g = g_func(x, A, b, block=None)
if "GSD_L" not in args:
Hb = loss.Hb_func(x, A, b, block=None)
args["GSD_L"] = np.sum(np.abs(Hb), 1)
s = np.abs(g) / np.sqrt(args["GSD_L"])
block = np.argsort(s, axis=None)[-block_size:]
elif rule in ["GSQ-IHT", "IHT"]:
""" select coordinates based on largest individual lipschitz"""
L = lipschitz
if "Hb_IHT" not in args:
args["Hb_IHT"] = loss.Hb_func(x, A, b, block=None)
#args["mu_IHT"] = 1. / np.max(np.linalg.eigh(args["Hb_IHT"])[0])
args["mu_IHT"] = 1. / largest_eigsh(args["Hb_IHT"], 1, which='LM')[0]
Hb = args["Hb_IHT"]
mu = args["mu_IHT"]
G = g_func(x, A, b, block=None)
d = G / np.sqrt(L)
d_old = d.copy()
for i in range(10):
d = d - mu*(G + Hb.dot(d))
ind = np.argsort(np.abs(d))
d[ind[:-block_size]]= 0
if np.linalg.norm(d_old - d) < 1e-10:
block = ind[-block_size:]
break
#print "norm diff: %.3f" % np.linalg.norm(d_old - d)
d_old = d.copy()
block = ind[-block_size:]
#block = np.where(d != 0)
return np.array(block), args
elif rule == "gsq-nn":
""" select coordinates based on largest individual lipschitz"""
g = g_func(x, A, b, block=None)
L = lipschitz
d = -g / L
x_new = x + d
neg = x_new < 0
pos = (1 - neg).astype(bool)
# SANITY CHECK
assert x.size == (neg.sum() + pos.sum())
s = np.zeros(x.size)
d = -g[pos] / L[pos]
s[pos] = g[pos] * d + (L[pos]/2.) * d**2
d = - x[neg]
s[neg] = g[neg] * d + (L[neg]/2.) * d**2
block = np.argsort(s, axis=None)[:block_size]
elif rule in ["GSDTree", "GSTree","RTree", "GSLTree"]:
""" select coordinates that form a forest based on BGS or BGSC """
g_func = loss.g_func
if "GSDTree" == rule:
lipschitz = np.sum(np.abs(A), 1)
score_list = np.abs(g_func(x, A, b, None)) / np.sqrt(lipschitz)
sorted_indices = np.argsort(score_list)[::-1]
elif "GSLTree" == rule:
lipschitz = lipschitz
score_list = np.abs(g_func(x, A, b, None)) / np.sqrt(lipschitz)
sorted_indices = np.argsort(score_list)[::-1]
elif "GSTree" == rule:
score_list = np.abs(g_func(x, A, b, None))
sorted_indices = np.argsort(score_list)[::-1]
elif "RTree" == rule:
sorted_indices = np.random.permutation(np.arange(A.shape[0]))
block = ta.get_tree_slow(sorted_indices, adj=A)
if iteration == 0:
xr = np.random.randn(*x.shape)
xE, _ = ur.update("bpExact", xr.copy(),
A, b, loss, copy.deepcopy(args), block, iteration=iteration)
xG, _ = ur.update("bpGabp", xr.copy(),
A, b, loss, copy.deepcopy(args) , block, iteration=iteration)
np.testing.assert_array_almost_equal(xE, xG, 3)
print("Exact vs GaBP Test passed...")
elif rule == "GSExactTree":
""" select coordinates based on largest individual lipschitz"""
g = g_func(x, A, b, block=None)
s = np.abs(g)
block_size = int(loss.n_params**(1./3))
block = np.argsort(s, axis=None)[-block_size:]
elif rule == "GSLExactTree":
""" select coordinates based on largest individual lipschitz"""
l = lipschitz
g = g_func(x, A, b, block=None)
s = np.abs(g) / np.sqrt(l)
block_size = int(loss.n_params**(1./3))
block = np.argsort(s, axis=None)[-block_size:]
elif rule in ["TreePartitions", "RedBlackTree",
"TreePartitionsRandom",
"RedBlackTreeRandom"]:
""" select coordinates that form a forest based on BGS or BGSC """
g_func = loss.g_func
if "graph_blocks" not in args:
yb = args["data_y"]
unlabeled = np.where(yb == 0)[0]
Wb = args["data_W"][unlabeled][:, unlabeled]
#################### GET GRAPH BLOCKS
if args["data_lattice"] == False:
if rule == "RedBlackTree":
graph_blocks = ta.get_rb_general_graph(Wb, L=lipschitz)
elif rule == "TreePartitions":
graph_blocks = ta.get_tp_general_graph(Wb, L=lipschitz)
elif rule == "RedBlackTreeRandom":
graph_blocks = ta.get_rb_general_graph(Wb, L=np.ones(lipschitz.size))
elif rule == "TreePartitionsRandom":
graph_blocks = ta.get_tp_general_graph(Wb, L=
|
np.ones(lipschitz.size)
|
numpy.ones
|
'''
MODULE: sparse_sensing.py
@Authors:
<NAME> [1]
[1]: Université Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Bruxelles, Belgium
@Contacts:
<EMAIL>
@Additional notes:
This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
Please report any bug to: <EMAIL>
'''
import os
import numpy as np
import scipy.linalg as la
class SPR():
'''
Class used for Sparse Placement for Reconstruction (SPR)
Attributes
----------
X : numpy array
data matrix of dimensions (n,p) where n = n_features * n_points and p
is the number of operating conditions.
n_features : int
the number of features in the dataset (temperature, velocity, etc.).
Methods
----------
scale_data(scale_type='standard')
Scale the data.
unscale_data(x0, scale_type)
Unscale the data.
decomposition(X0, decomp_type='POD')
Finds the taylored basis.
reduction(U, exp_variance, select_modes, n_modes)
Perform the dimensionality reduction.
optimal_placement(scale_type='standard', select_modes='variance', n_modes=99)
Calculates the C matrix using QRCP decomposition.
fit_predict(C, y, scale_type='standard', select_modes='variance',
n_modes=99)
Calculates the Theta matrix, then predicts ar and reconstructs x.
predict(y, scale_type='standard'):
Predicts ar and reconstructs x.
'''
def __init__(self, X, n_features):
'''
Parameters
----------
X : numpy array
Data matrix of dimensions (nxm) where n = n_features * n_points and m
is the number of operating conditions.
n_features : int
The number of features in the dataset (temperature, velocity, etc.).
Returns
-------
None.
'''
if type(X) is not np.ndarray:
raise TypeError('The matrix X is not a numpy array.')
elif type(n_features) is not int:
raise TypeError('The parameter n_features is not an integer.')
else:
self.X = X
self.n_features = n_features
def scale_data(self, scale_type='standard'):
'''
Return the scaled data matrix. The default is to scale the data to
unitary variance.
Parameters
----------
scale_type : str, optional
Type of scaling. The default is 'standard'. For now, it is the only
method implemented.
Returns
-------
X0 : numpy array
Scaled data matrix.
'''
n = self.X.shape[0]
self.n_points = n // self.n_features
if n % self.n_features != 0:
raise Exception('The number of rows of X is not a multiple of n_features')
exit()
X0 = np.zeros_like(self.X)
if scale_type == 'standard':
# Scale the matrix to unitary variance
mean_vector = np.zeros((self.n_features))
std_vector = np.zeros((self.n_features))
for i in range(self.n_features):
x = self.X[i*self.n_points:(i+1)*self.n_points, :]
mean_vector[i] = np.average(x)
std_vector[i] = np.std(x)
X0[i*self.n_points:(i+1)*self.n_points, :] = (x -
mean_vector[i])/std_vector[i]
self.mean_vector = mean_vector
self.std_vector = std_vector
else:
raise NotImplementedError('The scaling method selected has not been '\
'implemented yet')
return X0
def scale_vector(self, y, scale_type):
'''
Return the scaled measurement vector.
Parameters
----------
y : numpy array
Measurement vector to scale, size (s,2). The first column contains
the measurements, the second column contains which feature is
measured.
scale_type : str
Type of scaling.
Returns
-------
y0: numpy array
The scaled measurement vector.
'''
y0 = np.zeros((y.shape[0],))
if scale_type == 'standard':
for i in range(y0.shape[0]):
y0[i] = (y[i,0] - self.mean_vector[int(y[i,1])]) / self.std_vector[int(y[i,1])]
else:
raise NotImplementedError('The scaling method selected has not been '\
'implemented yet')
return y0
def unscale_data(self, x0, scale_type):
'''
Return the unscaled vector.
Parameters
----------
x0 : numpy array
Scaled vector to unscale, size (n,).
scale_type : str
Type of scaling.
Returns
-------
x : numpy array
The unscaled vector.
'''
x = np.zeros_like(x0)
if scale_type == 'standard':
for i in range(self.n_features):
x[i*self.n_points:(i+1)*self.n_points] = self.std_vector[i] * \
x0[i*self.n_points:(i+1)*self.n_points] + self.mean_vector[i]
else:
raise NotImplementedError('The scaling method selected has not been '\
'implemented yet')
return x
def decomposition(self, X0, decomp_type='POD'):
'''
Return the taylored basis and the amount of variance of the modes.
Parameters
----------
X0 : numpy array
The scaled data matrix to be decomposed, size (n,p).
decomp_type : str, optional
Type of decomposition. The default is 'POD'.
Returns
-------
U : numpy array
The taylored basis used for SPR, size (n,p).
exp_variance : numpy array
Array containing the explained variance of the modes, size (p,).
'''
if decomp_type == 'POD':
# Compute the SVD of the scaled dataset
U, S, Vt = np.linalg.svd(X0, full_matrices=False)
L = S**2 # Compute the eigenvalues
exp_variance = 100*np.cumsum(L)/np.sum(L)
else:
raise NotImplementedError('The decomposition method selected has not been '\
'implemented yet')
return U, exp_variance
def reduction(self, U, exp_variance, select_modes, n_modes):
'''
Return the reduced taylored basis.
Parameters
----------
U : numpy array
The taylored basis to be reduced, size (n,p).
exp_variance : numpy array
The array containing the explained variance of the modes, size (p,).
select_modes : str
Method of modes selection.
n_modes : int or float
Parameter that controls the number of modes to be retained. If
select_modes = 'variance', n_modes can be a float between 0 and 100.
If select_modes = 'number', n_modes can be an integer between 1 and m.
Returns
-------
Ur : numpy array
Reduced taylored basis, size (n,r).
'''
if select_modes == 'variance':
if not 0 <= n_modes <= 100:
raise ValueError('The parameter n_modes is outside the[0-100] range.')
# The r-order truncation is selected based on the amount of variance recovered
for r in range(exp_variance.size):
if exp_variance[r] > n_modes:
break
elif select_modes == 'number':
if not type(n_modes) is int:
raise TypeError('The parameter n_modes is not an integer.')
if not 1 <= n_modes <= U.shape[1]:
raise ValueError('The parameter n_modes is outside the [1-m] range.')
r = n_modes
else:
raise ValueError('The select_mode value is wrong.')
# Reduce the dimensionality
Ur = U[:, :r]
return Ur
def optimal_placement(self, scale_type='standard', select_modes='variance', n_modes=99):
'''
Return the matrix C containing the optimal placement of the sensors.
Parameters
----------
scale_type : str, optional
Type of scaling. The default is 'standard'.
select_modes : str, optional
Type of mode selection. The default is 'variance'. The available
options are 'variance' or 'number'.
n_modes : int or float, optional
Parameters that control the amount of modes retained. The default is
99, which represents 99% of the variance. If select_modes='number',
n_modes represents the number of modes retained.
Returns
-------
C : numpy array
The measurement matrix C obtained using QRCP decomposition,
size (s,n).
'''
n = self.X.shape[0]
X0 = SPR.scale_data(self, scale_type)
U, exp_variance = SPR.decomposition(self, X0)
Ur = SPR.reduction(self, U, exp_variance, select_modes, n_modes)
r = Ur.shape[1]
# Calculate the QRCP
Q, R, P = la.qr(Ur.T, pivoting=True, mode='economic')
s = r
C = np.zeros((s, n))
for j in range(s):
C[j, P[j]] = 1
return C
def fit_predict(self, C, y, scale_type='standard', select_modes='variance',
n_modes=99):
'''
Fit the taylored basis and the measurement matrix.
Return the prediction vector.
Parameters
----------
C : numpy array
The measurement matrix, size (s,n).
y : numpy array
The measurement vector, size (s,2). The first column contains
the measurements, the second column contains which feature is
measured.
scale_type : str, optional
Type of scaling method. The default is 'standard'.
select_modes : str, optional
Type of mode selection. The default is 'variance'. The available
options are 'variance' or 'number'.
n_modes : int or float, optional
Parameters that control the amount of modes retained. The default is
99, which represents 99% of the variance. If select_modes='number',
n_modes represents the number of modes retained.
Returns
-------
ar : numpy array
The low-dimensional projection of the state of the system, size (r,)
x_rec : numpy array
The predicted state of the system, size (n,).
'''
if C.shape[0] != y.shape[0]:
raise ValueError('The number of rows of C does not match the number' \
' of rows of y.')
if C.shape[1] != self.X.shape[0]:
raise ValueError('The number of columns of C does not match the number' \
' of rows of X.')
if y.shape[1] != 2:
raise ValueError('The y array has the wrong number of columns. y has' \
' to have dimensions (s,2).')
self.scale_type = scale_type
X0 = SPR.scale_data(self, scale_type)
U, exp_variance = SPR.decomposition(self, X0)
Ur = SPR.reduction(self, U, exp_variance, select_modes, n_modes)
self.Ur = Ur
Theta = C @ Ur
self.Theta = Theta
# calculate the condition number
if Theta.shape[0] == Theta.shape[1]:
U_theta, S_theta, V_thetat = np.linalg.svd(Theta)
self.k = S_theta[0]/S_theta[-1]
else:
Theta_pinv =
|
np.linalg.pinv(Theta)
|
numpy.linalg.pinv
|
from lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, \
Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import numpy as np
from random import shuffle
import scipy.io as io
def synth_spectrum(atmos, depthData=False, Nthreads=1, conserveCharge=False, prd=False):
# Configure the atmospheric angular quadrature
atmos.quadrature(5)
# Configure the set of atomic models to use.
aSet = lw.RadiativeSet([H_6_atom(), C_atom(), OI_ord_atom(), Si_atom(), Al_atom(), CaII_atom(),
Fe_atom(), He_9_atom(), MgII_atom(), N_atom(), Na_atom(), S_atom()])
# Set H and Ca to "active" i.e. NLTE, everything else participates as an
# LTE background.
# aSet.set_active('H', 'Ca')
aSet.set_active('Ca')
# Compute the necessary wavelength dependent information (SpectrumConfiguration).
spect = aSet.compute_wavelength_grid()
# compute the equilibrium populations at the fixed electron density provided in the model
eqPops = aSet.compute_eq_pops(atmos)
# Configure the Context which holds the state of the simulation for the
# backend, and provides the python interface to the backend.
ctx = lw.Context(atmos, spect, eqPops, Nthreads=Nthreads, conserveCharge=conserveCharge)
if depthData:
ctx.depthData.fill = True
# Iterate the Context to convergence
iterate_ctx_crd(ctx, prd=prd)
# Update the background populations based on the converged solution and
eqPops.update_lte_atoms_Hmin_pops(atmos, quiet=True)
# compute the final solution for mu=1 on the provided wavelength grid.
ctx.formal_sol_gamma_matrices(printUpdate=False)
if prd:
ctx.prd_redistribute(printUpdate=False)
return ctx
def iterate_ctx_crd(ctx, prd=False, Nscatter=10, NmaxIter=500):
'''
Iterate a Context to convergence.
'''
for i in range(NmaxIter):
# Compute the formal solution
dJ = ctx.formal_sol_gamma_matrices(printUpdate=False)
if prd:
ctx.prd_redistribute(printUpdate=False)
# Just update J for Nscatter iterations
if i < Nscatter:
continue
# Update the active populations under statistical equilibrium,
# conserving charge if this option was set on the Context.
delta = ctx.stat_equil(printUpdate=False)
# If we are converged in both relative change of J and populations return
if dJ < 3e-3 and delta < 1e-3:
return
bifrost = io.readsav('../data/models_atmos/snap385_rh.save')
bifrost['tg'] = np.reshape(bifrost['tg'], (bifrost['tg'].shape[0], -1))
bifrost['vlos'] = np.reshape(bifrost['vlos'], (bifrost['vlos'].shape[0], -1))
bifrost['nel'] = np.reshape(bifrost['nel'], (bifrost['nel'].shape[0], -1))
# Shufle the bifrost columns
index = np.arange(0, bifrost['tg'].shape[-1])
shuffle(index)
bifrost['tg'] = bifrost['tg'][:, index]
bifrost['vlos'] = bifrost['vlos'][:, index]
tau500 = np.float64(bifrost['z'][::-1]*1e3)
temperature = np.float64(bifrost['tg'][:, index[0]][::-1])
vlos = np.float64(bifrost['vlos'][:, index[0]][::-1])
ne =
|
np.float64(bifrost['nel'][:, index[0]][::-1])
|
numpy.float64
|
#makes a two color fair isle to change knit stiffness
import math
import numpy as np
#has an edge protect built into the function
def stiffFairIsle(k,stitcharray,width,length,c1,c2,side,offset=1,edgeProtect=4):
"""A function to make a two color fair isle as a manner to change widthwise stiffness
k: Writer
stitcharray: array of ones and zeros that decides which carriers are knit
width: the width of the knitting
length: amount of rows (2 passes per row because two colors)
c1: first carrier (knits when array is 1)
c2: second carrier (knits when array is not 1)
side: side where both carriers are starting on
offset: how much the array changes each passes
edge protect: areas on the edges that 1x1 fairisle and not longer floats"""
repeatSize = len(stitcharray)
totalRepeatsHoriz=int(math.ceil(float(width)/repeatSize))
ref = np.tile(stitcharray,totalRepeatsHoriz+1)
#account for starting position and add first row of knitting
if side == 'l':
start=1
else:
start=2
length=length+1
#set counter to make offset
counter=0
for b in range(start, length+1):
if b%2==1:
#first handle first carrier
for w in range(0,edgeProtect):
if w%2==1:
k.knit('+',('f',w),c1)
else:
k.miss('+',('f',w),c1)
for w in range(edgeProtect,width-edgeProtect):
if ref[w+counter]==1:
k.knit('+',('f',w),c1)
else:
k.miss('+',('f',w),c1)
for w in range(width-edgeProtect,width):
if w%2==1:
k.knit('+',('f',w),c1)
else:
k.miss('+',('f',w),c1)
#next handle second carrier
for w in range(0,edgeProtect):
if w%2==1:
k.miss('+',('f',w),c2)
else:
k.knit('+',('f',w),c2)
for w in range(edgeProtect,width-edgeProtect):
if ref[w+counter]==1:
k.miss('+',('f',w),c2)
else:
k.knit('+',('f',w),c2)
for w in range(width-edgeProtect,width):
if w%2==1:
k.miss('+',('f',w),c2)
else:
k.knit('+',('f',w),c2)
else:
#first handle first carrier
for w in range(width-1,width-edgeProtect-1,-1):
if w%2==1:
k.miss('-',('f',w),c1)
else:
k.knit('-',('f',w),c1)
for w in range(width-edgeProtect-1,edgeProtect-1,-1):
if ref[w+counter]==1:
k.knit('-',('f',w),c1)
else:
k.miss('-',('f',w),c1)
for w in range(edgeProtect-1,-1,-1):
if w%2==1:
k.miss('-',('f',w),c1)
else:
k.knit('-',('f',w),c1)
#next handle second carrier
for w in range(width-1,width-edgeProtect-1,-1):
if w%2==1:
k.knit('-',('f',w),c2)
else:
k.miss('-',('f',w),c2)
for w in range(width-edgeProtect-1,edgeProtect-1,-1):
if ref[w+counter]==1:
k.miss('-',('f',w),c2)
else:
k.knit('-',('f',w),c2)
for w in range(edgeProtect-1,-1,-1):
if w%2==1:
k.knit('-',('f',w),c2)
else:
k.miss('-',('f',w),c2)
counter=counter+offset
if counter>=repeatSize:
counter=0
def stiffFairIsleArray(k,stitcharray,start,finish,length,c1,c2,side='l',bed='f',offset=1,current=0):
"""A function to make a two color fair isle as a manner to change widthwise stiffness within a set stitch range
k: Writer
stitcharray: array of ones and zeros that decides which carriers are knit
start: the first needle to be knit
finish: the needle after the last needle to be knit
length: amount of rows (2 passes per row because two colors)
c1: first carrier (knits when array is 1)
c2: second carrier (knits when array is not 1)
side: side where both carriers are starting on ('l' is default)
bed: what bed the knitting will be on ('f' is default)
offset: how much the array changes each passes (1 is default)
current: current offset when beginning using the function (used when called multiple times) (0 is default)"""
#tells how much the checkerboard fair isle
#to protect edge should be
repeatSize = len(stitcharray)
totalRepeatsHoriz=int(math.ceil(float(finish-start)/repeatSize))
ref = np.tile(stitcharray,totalRepeatsHoriz+2)
#account for starting position and add first row of knitting
if side == 'l':
beg=1
else:
beg=2
length=length+1
#set counter to make offset
current=current%repeatSize
counter=offset+current-1
for b in range(beg, length+1):
if counter>=repeatSize:
counter=0
if b%2==1:
for w in range(start,finish):
if ref[w+counter]==1:
k.knit('+',(bed,w),c1)
else:
k.miss('+',(bed,w),c1)
#next handle second carrier
for w in range(start,finish):
if ref[w+counter]==1:
k.miss('+',(bed,w),c2)
else:
k.knit('+',(bed,w),c2)
else:
#first handle first carrier
for w in range(finish-1,start-1,-1):
if ref[w+counter]==1:
k.knit('-',(bed,w),c1)
else:
k.miss('-',(bed,w),c1)
#handle second carrier
for w in range(finish-1,start-1,-1):
if ref[w+counter]==1:
k.miss('-',(bed,w),c2)
else:
k.knit('-',(bed,w),c2)
counter=counter+offset
def stiffFairIsleArraySided(k,stitcharray,start,finish,length,c1,c2,c1side='l',c2side='l',bed='f',offset=1):
"""A function to make a two color fair isle as a manner
to change widthwise stiffness in a set range. Allows carrier to start on different sides
k: Writer
stitcharray: array of ones and zeros that decides which carriers are knit
start: the first needle to be knit
finish: the needle after the last needle to be knit
length: amount of rows (2 passes per row because two colors)
c1: first carrier (knits when array is 1)
c2: second carrier (knits when array is not 1)
c1side: side where carrier 1 starts on ('l' is default)
c2side: side where carrier 2 starts on ('l' is default)
bed: what bed the knitting will be on ('f' is default)
offset: how much the array changes each passes (1 is default)
"""
#tells how much the checkerboard fair isle
#allows the fair isle to be sided
#tells how much the checkerboard fair isle
#to protect edge should be
repeatSize = len(stitcharray)
totalRepeatsHoriz=int(math.ceil(float(finish-start)/repeatSize))
ref =
|
np.tile(stitcharray,totalRepeatsHoriz+2)
|
numpy.tile
|
import numpy as np
from src.data import Problem, Case, Matter
def is_line_symmetry_row_strict(x_arr: np.array, background: np.int) -> np.array:
# center row
res_arr =
|
np.zeros((x_arr.shape[0], 2), dtype=np.int64)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 00:37:03 2019
@author: davim, matheussfarias
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from keras.models import Sequential
from keras.layers import Dense, Dropout
from python_speech_features import mfcc
PATH1 = 'clap/training/'
PATH2 = 'not_clap/training/'
PATH3 = 'clap/test/'
PATH4 = 'not_clap/test/'
#Getting training files
dt=[]
lb=[]
for file in os.listdir(PATH1):
f = open(PATH1+file,'rt')
data = []
v1=[]
for i in f:
v1.append(int(i))
data = np.array(v1)
data = data-np.mean(data)
data = data/np.max(data)
mfc = mfcc(data).flatten().tolist()
dt.append(mfc)
#dt.append(abs(data[500:1500]))
#dt.append(data[500:1000])
lb.append(1)
for file in os.listdir(PATH2):
f = open(PATH2+file,'rt')
data = []
v1=[]
for i in f:
v1.append(int(i))
data = np.array(v1)
data = data-np.mean(data)
data = data/np.max(data)
mfc = mfcc(data).flatten().tolist()
dt.append(mfc)
#dt.append(abs(data[500:1500]))
#dt.append(data[500:1000])
lb.append(0)
#Getting test files
dt_test=[]
lb_test=[]
for file in os.listdir(PATH3):
f = open(PATH3+file,'rt')
data = []
v1=[]
for i in f:
v1.append(int(i))
data = np.array(v1)
data = data-np.mean(data)
data = data/np.max(data)
mfc = mfcc(data).flatten().tolist()
dt_test.append(mfc)
#dt_test.append(abs(data[500:1500]))
#dt_test.append(data[500:1000])
lb_test.append(1)
for file in os.listdir(PATH4):
f = open(PATH4+file,'rt')
data = []
v1=[]
for i in f:
v1.append(int(i))
data = np.array(v1)
data = data-np.mean(data)
data = data/np.max(data)
mfc = mfcc(data).flatten().tolist()
dt_test.append(mfc)
#dt_test.append(abs(data[500:1500]))
#dt_test.append(data[500:1000])
lb_test.append(0)
#to numpy array
x_train =
|
np.row_stack(dt)
|
numpy.row_stack
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
from mpi4py import MPI
def test_distributed_srm(): # noqa: C901
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nrank = comm.Get_size()
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features, comm=comm)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
# DSRM: broadcast S
S = comm.bcast(S)
X = []
W = []
# DSRM: only append on rank 0
Q, R = np.linalg.qr(np.random.random((voxels, features)))
tmp_noise = 0.1*np.random.random((voxels, samples))
if rank == 0:
W.append(Q)
X.append(Q.dot(S) + tmp_noise)
else:
W.append(None)
X.append(None)
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
if rank == 0:
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
if rank == 0:
print("Test: running SRM with 1 subject")
# DSRM: cyclic distribution of subject data, otherwise None
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
tmp_noise = 0.1*
|
np.random.random((voxels, samples))
|
numpy.random.random
|
# for stats on running time
import copy,time,sys,pickle
# for mathematical calculations and statistical distributions
from scipy.stats import truncnorm
from scipy.spatial.distance import cdist
from scipy.special import comb
import math
import itertools
import numpy as np
# my files
sys.path.insert(0, '/home/chana/Documents/Thesis/FromGitFiles/SearchAlgorithm/')
from aStarClosedEvents_v1 import *
# for graphics
import seaborn as sns
from matplotlib import pyplot as plt
sns.set()
class Logs():
def __init__(self,numEvents,numCars,simLength):
self.eventsAnswered = np.zeros(shape = (simLength,1))
self.eventsCanceled =
|
np.zeros_like(self.eventsAnswered)
|
numpy.zeros_like
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from nni.networkmorphism_tuner.layers import (
StubDense,
StubReLU,
get_batch_norm_class,
get_conv_class,
get_n_dim,
)
NOISE_RATIO = 1e-4
def deeper_conv_block(conv_layer, kernel_size, weighted=True):
'''deeper conv layer.
'''
n_dim = get_n_dim(conv_layer)
filter_shape = (kernel_size,) * 2
n_filters = conv_layer.filters
weight = np.zeros((n_filters, n_filters) + filter_shape)
center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
for i in range(n_filters):
filter_weight = np.zeros((n_filters,) + filter_shape)
index = (i,) + center
filter_weight[index] = 1
weight[i, ...] = filter_weight
bias = np.zeros(n_filters)
new_conv_layer = get_conv_class(n_dim)(
conv_layer.filters, n_filters, kernel_size=kernel_size
)
bn = get_batch_norm_class(n_dim)(n_filters)
if weighted:
new_conv_layer.set_weights(
(add_noise(weight, np.array([0, 1])),
add_noise(bias, np.array([0, 1])))
)
new_weights = [
add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(
|
np.zeros(n_filters, dtype=np.float32)
|
numpy.zeros
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Node M&C info from the node's Redis database."""
from math import floor
from astropy.time import Time
import numpy as np
from sqlalchemy import Column, BigInteger, Integer, Float, Boolean, String
from . import MCDeclarativeBase
# the address of a redis database being used as a clearing house for meta-data
# and message passing which the node server has access to and watches
defaultServerAddress = 'redishost'
sensor_key_dict = {'top_sensor_temp': 'temp_top',
'middle_sensor_temp': 'temp_mid',
'bottom_sensor_temp': 'temp_bot',
'humidity_sensor_temp': 'temp_humid',
'humidity': 'humid'}
power_status_key_dict = {'snap_relay_powered': 'power_snap_relay',
'snap0_powered': 'power_snap_0',
'snap1_powered': 'power_snap_1',
'snap2_powered': 'power_snap_2',
'snap3_powered': 'power_snap_3',
'pam_powered': 'power_pam',
'fem_powered': 'power_fem'}
# key is part to command, value is function name in hera_node_mc
power_command_part_dict = {'snap_relay': 'power_snap_relay',
'snap0': 'power_snap_0', 'snap1': 'power_snap_1',
'snap2': 'power_snap_2', 'snap3': 'power_snap_3',
'pam': 'power_pam', 'fem': 'power_fem'}
wr_key_dict = {
'board_info_str': 'board_info_str',
'aliases': 'aliases',
'ip': 'ip',
'mode': 'mode',
'serial': 'serial',
'temperature': 'temp',
'build_date': 'sw_build_date',
'gw_date': 'wr_gw_date', # hack!
'gw_version': 'wr_gw_version',
'gw_id': 'wr_gw_id',
'build_hash': 'wr_build',
'manufacture_tag': 'wr_fru_custom',
'manufacture_device': 'wr_fru_device',
'manufacture_date': 'wr_fru_fid',
'manufacture_partnum': 'wr_fru_partnum',
'manufacture_serial': 'wr_fru_serial',
'manufacture_vendor': 'wr_fru_vendor',
'port0_ad': 'wr0_ad',
'port0_link_asymmetry_ps': 'wr0_asym',
'port0_manual_phase_ps': 'wr0_aux',
'port0_clock_offset_ps': 'wr0_cko',
'port0_cable_rt_delay_ps': 'wr0_crtt',
'port0_master_slave_delay_ps': 'wr0_dms',
'port0_master_rx_phy_delay_ps': 'wr0_drxm',
'port0_slave_rx_phy_delay_ps': 'wr0_drxs',
'port0_master_tx_phy_delay_ps': 'wr0_dtxm',
'port0_slave_tx_phy_delay_ps': 'wr0_dtxs',
'port0_hd': 'wr0_hd',
'port0_link': 'wr0_lnk',
'port0_lock': 'wr0_lock',
'port0_md': 'wr0_md',
'port0_rt_time_ps': 'wr0_mu',
'port0_nsec': 'wr0_nsec',
'port0_packets_received': 'wr0_rx',
'port0_phase_setpoint_ps': 'wr0_setp',
'port0_servo_state': 'wr0_ss',
'port0_sv': 'wr0_sv',
'port0_sync_source': 'wr0_syncs',
'port0_packets_sent': 'wr0_tx',
'port0_update_counter': 'wr0_ucnt',
'port0_time': 'wr0_sec',
'port1_ad': 'wr1_ad',
'port1_link_asymmetry_ps': 'wr1_asym',
'port1_manual_phase_ps': 'wr1_aux',
'port1_clock_offset_ps': 'wr1_cko',
'port1_cable_rt_delay_ps': 'wr1_crtt',
'port1_master_slave_delay_ps': 'wr1_dms',
'port1_master_rx_phy_delay_ps': 'wr1_drxm',
'port1_slave_rx_phy_delay_ps': 'wr1_drxs',
'port1_master_tx_phy_delay_ps': 'wr1_dtxm',
'port1_slave_tx_phy_delay_ps': 'wr1_dtxs',
'port1_hd': 'wr1_hd',
'port1_link': 'wr1_lnk',
'port1_lock': 'wr1_lock',
'port1_md': 'wr1_md',
'port1_rt_time_ps': 'wr1_mu',
'port1_nsec': 'wr1_nsec',
'port1_packets_received': 'wr1_rx',
'port1_phase_setpoint_ps': 'wr1_setp',
'port1_servo_state': 'wr1_ss',
'port1_sv': 'wr1_sv',
'port1_sync_source': 'wr1_syncs',
'port1_packets_sent': 'wr1_tx',
'port1_update_counter': 'wr1_ucnt',
'port1_time': 'wr1_sec',
}
wr_datetime_keys = ['build_date', 'gw_date', 'manufacture_date']
wr_tai_sec_keys = ['port0_time', 'port1_time']
def get_node_list(nodeServerAddress=defaultServerAddress):
"""
Get the list of active nodes from redis.
Parameters
----------
nodeServerAddress : str
Node redis address.
"""
import nodeControl
if nodeServerAddress is None:
nodeServerAddress = defaultServerAddress
return nodeControl.get_valid_nodes(serverAddress=nodeServerAddress)
class NodeSensor(MCDeclarativeBase):
"""
Definition of node sensor table.
Attributes
----------
time : BigInteger Column
GPS time of the node data, floored. Part of the primary key.
node : Integer Column
Node number. Part of the primary key.
top_sensor_temp : Float Column
Temperature of top sensor reported by node in Celsius.
middle_sensor_temp : Float Column
Temperature of middle sensor reported by node in Celsius.
bottom_sensor_temp : Float Column
Temperature of bottom sensor reported by node in Celsius.
humidity_sensor_temp : Float Column
Temperature of the humidity sensor reported by node in Celsius.
humidity : Float Column
Percent humidity measurement reported by node.
"""
__tablename__ = 'node_sensor'
time = Column(BigInteger, primary_key=True)
node = Column(Integer, primary_key=True)
top_sensor_temp = Column(Float)
middle_sensor_temp = Column(Float)
bottom_sensor_temp = Column(Float)
humidity_sensor_temp = Column(Float)
humidity = Column(Float)
@classmethod
def create(cls, time, node, top_sensor_temp, middle_sensor_temp,
bottom_sensor_temp, humidity_sensor_temp, humidity):
"""
Create a new node sensor object.
Parameters
----------
time : astropy Time object
Astropy time object based on a timestamp reported by node
node : int
Node number (within 1 to 30).
top_sensor_temp : float
Temperature of top sensor reported by node in Celsius.
middle_sensor_temp : float
Temperature of middle sensor reported by node in Celsius.
bottom_sensor_temp : float
Temperature of bottom sensor reported by node in Celsius.
humidity_sensor_temp : float
Temperature of the humidity sensor reported by node in Celsius.
humidity : float
Percent humidity measurement reported by node.
Returns
-------
NodeSensor object
"""
if not isinstance(time, Time):
raise ValueError('time must be an astropy Time object')
node_time = floor(time.gps)
return cls(time=node_time, node=node, top_sensor_temp=top_sensor_temp,
middle_sensor_temp=middle_sensor_temp,
bottom_sensor_temp=bottom_sensor_temp,
humidity_sensor_temp=humidity_sensor_temp,
humidity=humidity)
def _get_sensor_dict(node, nodeServerAddress=defaultServerAddress):
"""
Get node sensor information from a nodeControl object.
Parameters
----------
node : int
Node number.
nodeServerAddress : str
Node redis address.
Returns
-------
datetime timestamp
Time of the status
dict
keys are values in `sensor_key_dict`, values are sensor readings.
"""
import nodeControl
node_controller = nodeControl.NodeControl(
node, serverAddress=nodeServerAddress)
# Get the sensor data for this node, returned as a dict
return node_controller.get_sensors()
def create_sensor_readings(nodeServerAddress=defaultServerAddress,
node_list=None, sensor_dict=None):
"""
Return a list of node sensor objects with data from the nodes.
Parameters
----------
nodeServerAddress : str
Address of server where the node redis database can be accessed.
node_list : list of int
A list of integers specifying which nodes to get data for,
primarily for testing purposes. If None, get_node_list() is called.
sensor_dict : dict
A dict spoofing the return dict from _get_sensor_dict for testing
purposes.
Returns
-------
A list of NodeSensor objects
"""
if node_list is None:
node_list = get_node_list(nodeServerAddress=nodeServerAddress)
node_sensor_list = []
for node in node_list:
if sensor_dict is None:
timestamp, sensor_data = _get_sensor_dict(
node, nodeServerAddress=nodeServerAddress)
else:
sensor_data = sensor_dict[str(node)]
timestamp = sensor_data.pop('timestamp')
time = Time(timestamp, format='datetime', scale='utc')
top_sensor_temp = sensor_data.get(
sensor_key_dict['top_sensor_temp'], None)
middle_sensor_temp = sensor_data.get(
sensor_key_dict['middle_sensor_temp'], None)
bottom_sensor_temp = sensor_data.get(
sensor_key_dict['bottom_sensor_temp'], None)
humidity_sensor_temp = sensor_data.get(
sensor_key_dict['humidity_sensor_temp'], None)
humidity = sensor_data.get(sensor_key_dict['humidity'], None)
node_sensor_list.append(NodeSensor.create(
time, node, top_sensor_temp, middle_sensor_temp,
bottom_sensor_temp, humidity_sensor_temp, humidity))
return node_sensor_list
class NodePowerStatus(MCDeclarativeBase):
"""
Definition of node power status table.
Attributes
----------
time : BigInteger Column
GPS time of the node data, floored. Part of the primary key.
node : Integer Column
Node number. Part of the primary key.
snap_relay_powered : Boolean Column
Power status of the snap relay, True=powered.
snap0_powered : Boolean Column
Power status of the SNAP 0 board, True=powered.
snap1_powered : Boolean Column
Power status of the SNAP 1 board, True=powered.
snap2_powered : Boolean Column
Power status of the SNAP 2 board, True=powered.
snap3_powered : Boolean Column
Power status of the SNAP 3 board, True=powered.
fem_powered : Boolean Column
Power status of the FEM, True=powered.
pam_powered : Boolean Column
Power status of the PAM, True=powered.
"""
__tablename__ = 'node_power_status'
time = Column(BigInteger, primary_key=True)
node = Column(Integer, primary_key=True)
snap_relay_powered = Column(Boolean, nullable=False)
snap0_powered = Column(Boolean, nullable=False)
snap1_powered = Column(Boolean, nullable=False)
snap2_powered = Column(Boolean, nullable=False)
snap3_powered = Column(Boolean, nullable=False)
fem_powered = Column(Boolean, nullable=False)
pam_powered = Column(Boolean, nullable=False)
@classmethod
def create(cls, time, node, snap_relay_powered, snap0_powered,
snap1_powered, snap2_powered, snap3_powered,
fem_powered, pam_powered):
"""
Create a new node power status object.
Parameters
----------
time : astropy Time object
Astropy time object based on a timestamp reported by node.
node : int
Node number (within 1 to 30).
snap_relay_powered: boolean
Power status of the snap relay, True=powered.
snap0_powered: boolean
Power status of the SNAP 0 board, True=powered.
snap1_powered: boolean
Power status of the SNAP 1 board, True=powered.
snap2_powered: boolean
Power status of the SNAP 2 board, True=powered.
snap3_powered: boolean
Power status of the SNAP 3 board, True=powered.
fem_powered: boolean
Power status of the FEM, True=powered.
pam_powered: boolean
Power status of the PAM, True=powered.
Returns
-------
NodePowerStatus object
"""
if not isinstance(time, Time):
raise ValueError('time must be an astropy Time object')
node_time = floor(time.gps)
return cls(time=node_time, node=node,
snap_relay_powered=snap_relay_powered,
snap0_powered=snap0_powered, snap1_powered=snap1_powered,
snap2_powered=snap2_powered, snap3_powered=snap3_powered,
fem_powered=fem_powered, pam_powered=pam_powered)
def _get_power_dict(node, nodeServerAddress=defaultServerAddress):
"""
Get node sensor information from a nodeControl object.
Parameters
----------
node : int
Node number.
nodeServerAddress : str
Node redis address.
Returns
-------
datetime timestamp
Time of the status
dict
keys are values in `power_status_key_dict`, values are power states.
"""
import nodeControl
node_controller = nodeControl.NodeControl(node, serverAddress=nodeServerAddress)
# Get the sensor data for this node, returned as a dict
return node_controller.get_power_status()
def create_power_status(nodeServerAddress=defaultServerAddress, node_list=None,
power_dict=None):
"""
Return a list of node power status objects with data from the nodes.
Parameters
----------
nodeServerAddress : str
Address of server where the node redis database can be accessed.
node_list : list of int
A list of integers specifying which nodes to get data for,
primarily for testing purposes. If None, get_node_list() is called.
power_dict : dict
A dict containing info as in the return dict from _get_power_dict()
for testing purposes. If None, _get_power_dict() is called.
Returns
-------
A list of NodePowerStatus objects
"""
if node_list is None:
node_list = get_node_list(nodeServerAddress=nodeServerAddress)
node_power_list = []
for node in node_list:
if power_dict is None:
timestamp, power_data = _get_power_dict(node, nodeServerAddress=nodeServerAddress)
else:
power_data = power_dict[str(node)]
timestamp = power_data.pop('timestamp')
time = Time(timestamp, format='datetime', scale='utc')
# All items in this dictionary are strings.
snap_relay_powered = power_data[power_status_key_dict['snap_relay_powered']]
snap0_powered = power_data[power_status_key_dict['snap0_powered']]
snap1_powered = power_data[power_status_key_dict['snap1_powered']]
snap2_powered = power_data[power_status_key_dict['snap2_powered']]
snap3_powered = power_data[power_status_key_dict['snap3_powered']]
pam_powered = power_data[power_status_key_dict['pam_powered']]
fem_powered = power_data[power_status_key_dict['fem_powered']]
node_power_list.append(NodePowerStatus.create(time, node, snap_relay_powered,
snap0_powered, snap1_powered,
snap2_powered, snap3_powered,
fem_powered, pam_powered))
return node_power_list
class NodePowerCommand(MCDeclarativeBase):
"""
Definition of node power command table.
Attributes
----------
time : BigInteger Column
GPS time of the command, floored. Part of the primary key.
node : Integer Column
Node number. Part of the primary key.
part : String Column
Part to be powered on/off. Part of the primary key.
command : String Column
Command, one of 'on' or 'off'.
"""
__tablename__ = 'node_power_command'
time = Column(BigInteger, primary_key=True)
node = Column(Integer, primary_key=True)
part = Column(String, primary_key=True)
command = Column(String, nullable=False)
@classmethod
def create(cls, time, node, part, command):
"""
Create a new node power command object.
Parameters
----------
time : astropy Time object
Astropy time object for time command was sent.
node : int
Node number (within 1 to 30).
part : str
One of the keys in power_command_part_dict.
command : {'on', 'off'}
The command that was sent.
Returns
-------
NodePowerCommand object
"""
if not isinstance(time, Time):
raise ValueError('time must be an astropy Time object')
node_time = floor(time.gps)
if part not in list(power_command_part_dict.keys()):
raise ValueError('part must be one of: '
+ ', '.join(list(power_command_part_dict.keys()))
+ '. part is actually {}'.format(part))
if command not in ['on', 'off']:
raise ValueError('command must be one of: on, off')
return cls(time=node_time, node=node, part=part, command=command)
class NodeWhiteRabbitStatus(MCDeclarativeBase):
"""
Definition of node white rabbit status table.
Attributes
----------
node_time: BigInteger Column
GPS time of the status reported by the node, floored. Part of the primary key.
node: Integer Column
Node number. Part of the primary key.
board_info_str : String Column
A raw string representing the WR-LEN's response to the `ver` command.
Relevant parts of this string are individually unpacked in other entries.
aliases : String Column
Hostname aliases of this node's WR-LEN (comma separated if more than one).
ip : String Column
IP address of this node's WR-LEN
mode : String Column
WR-LEN operating mode (eg. "WRC_SLAVE_WR0")
serial : String Column
Canonical HERA hostname (~= serial number) of this node's WR-LEN
temperature : Float Column
WR-LEN temperature in degrees C
build_date : BigInteger Column
Build date of WR-LEN software in floored GPS seconds.
gw_date : BigInteger Column
WR-LEN gateware build date in floored GPS seconds.
gw_version : String Column
WR-LEN gateware version number
gw_id : String Column
WR-LEN gateware ID number
build_hash : String Column
WR-LEN build git hash
manufacture_tag : String Column
Custom manufacturer tag
manufacture_device : String Column
Manufacturer device name designation
manufacture_date : BigInteger Column
Manufacturer invoice(?) date
manufacture_partnum : String Column
Manufacturer part number
manufacture_serial : String Column
Manufacturer serial number
manufacture_vendor : String Column
Vendor name
port0_ad : Integer Column
???
port0_link_asymmetry_ps : Integer Column
Port 0 total link asymmetry in picosec
port0_manual_phase_ps : Integer Column
??? Port 0 manual phase adjustment in picosec
port0_clock_offset_ps : Integer Column
Port 0 Clock offset in picosec
port0_cable_rt_delay_ps : Integer Column
Port 0 Cable round-trip delay in picosec
port0_master_slave_delay_ps : Integer Column
Port 0 Master-Slave delay in in picosec
port0_master_rx_phy_delay_ps : Integer Column
Port 0 Master RX PHY delay in picosec
port0_slave_rx_phy_delay_ps : Integer Column
Port 0 Slave RX PHY delay in picosec
port0_master_tx_phy_delay_ps : Integer Column
Port 0 Master TX PHY delay in picosec
port0_slave_tx_phy_delay_ps : Integer Column
Port 0 Slave TX PHY delay in picosec
port0_hd : Integer Column
???
port0_link : Boolean Column
Port 0 link up state
port0_lock : Boolean Column
Port 0 timing lock state
port0_md : Integer Column
???
port0_rt_time_ps : Integer Column
Port 0 round-trip time in picosec
port0_nsec : Integer Column
???
port0_packets_received : Integer Column
Port 0 number of packets received
port0_phase_setpoint_ps : Integer Column
Port 0 phase setpoint in picosec
port0_servo_state : String Column
Port 0 servo state
port0_sv : Integer Column
???
port0_sync_source : String Column
Port 0 source of synchronization (either 'wr0' or 'wr1')
port0_packets_sent : Integer Column
Port 0 number of packets transmitted
port0_update_counter : Integer Column
Port 0 update counter
port0_time : BigInteger Column
Port 0 current time in GPS seconds.
port1_ad : Integer Column
???
port1_link_asymmetry_ps : Integer Column
Port 1 total link asymmetry in picosec
port1_manual_phase_ps : Integer Column
??? Port 1 manual phase adjustment in picosec
port1_clock_offset_ps : Integer Column
Port 1 Clock offset in picosec
port1_cable_rt_delay_ps : Integer Column
Port 1 Cable round-trip delay in picosec
port1_master_slave_delay_ps : Integer Column
Port 1 Master-Slave delay in in picosec
port1_master_rx_phy_delay_ps : Integer Column
Port 1 Master RX PHY delay in picosec
port1_slave_rx_phy_delay_ps : Integer Column
Port 1 Slave RX PHY delay in picosec
port1_master_tx_phy_delay_ps : Integer Column
Port 1 Master TX PHY delay in picosec
port1_slave_tx_phy_delay_ps : Integer Column
Port 1 Slave TX PHY delay in picosec
port1_hd : Integer Column
???
port1_link : Boolean Column
Port 1 link up state
port1_lock : Boolean Column
Port 1 timing lock state
port1_md : Integer Column
???
port1_rt_time_ps : Integer Column
Port 1 round-trip time in picosec
port1_nsec : Integer Column
???
port1_packets_received : Integer Column
Port 1 number of packets received
port1_phase_setpoint_ps : Integer Column
Port 1 phase setpoint in picosec
port1_servo_state : String Column
Port 1 servo state
port1_sv : Integer Column
???
port1_sync_source : String Column
Port 1 source of synchronization (either 'wr0' or 'wr1')
port1_packets_sent : Integer Column
Port 1 number of packets transmitted
port1_update_counter : Integer Column
Port 1 update counter
port1_time : BigInteger Column
Port 1 current time in GPS seconds.
"""
__tablename__ = 'node_white_rabbit_status'
node_time = Column(BigInteger, primary_key=True)
node = Column(Integer, primary_key=True)
board_info_str = Column(String)
aliases = Column(String)
ip = Column(String)
mode = Column(String)
serial = Column(String)
temperature = Column(Float)
build_date = Column(BigInteger)
gw_date = Column(BigInteger)
gw_version = Column(String)
gw_id = Column(String)
build_hash = Column(String)
manufacture_tag = Column(String)
manufacture_device = Column(String)
manufacture_date = Column(BigInteger)
manufacture_partnum = Column(String)
manufacture_serial = Column(String)
manufacture_vendor = Column(String)
port0_ad = Column(Integer)
port0_link_asymmetry_ps = Column(Integer)
port0_manual_phase_ps = Column(Integer)
port0_clock_offset_ps = Column(Integer)
port0_cable_rt_delay_ps = Column(Integer)
port0_master_slave_delay_ps = Column(Integer)
port0_master_rx_phy_delay_ps = Column(Integer)
port0_slave_rx_phy_delay_ps = Column(Integer)
port0_master_tx_phy_delay_ps = Column(Integer)
port0_slave_tx_phy_delay_ps = Column(Integer)
port0_hd = Column(Integer)
port0_link = Column(Boolean)
port0_lock = Column(Boolean)
port0_md = Column(Integer)
port0_rt_time_ps = Column(Integer)
port0_nsec = Column(Integer)
port0_packets_received = Column(Integer)
port0_phase_setpoint_ps = Column(Integer)
port0_servo_state = Column(String)
port0_sv = Column(Integer)
port0_sync_source = Column(String)
port0_packets_sent = Column(Integer)
port0_update_counter = Column(Integer)
port0_time = Column(BigInteger)
port1_ad = Column(Integer)
port1_link_asymmetry_ps = Column(Integer)
port1_manual_phase_ps = Column(Integer)
port1_clock_offset_ps = Column(Integer)
port1_cable_rt_delay_ps = Column(Integer)
port1_master_slave_delay_ps = Column(Integer)
port1_master_rx_phy_delay_ps = Column(Integer)
port1_slave_rx_phy_delay_ps = Column(Integer)
port1_master_tx_phy_delay_ps = Column(Integer)
port1_slave_tx_phy_delay_ps = Column(Integer)
port1_hd = Column(Integer)
port1_link = Column(Boolean)
port1_lock = Column(Boolean)
port1_md = Column(Integer)
port1_rt_time_ps = Column(Integer)
port1_nsec = Column(Integer)
port1_packets_received = Column(Integer)
port1_phase_setpoint_ps = Column(Integer)
port1_servo_state = Column(String)
port1_sv = Column(Integer)
port1_sync_source = Column(String)
port1_packets_sent = Column(Integer)
port1_update_counter = Column(Integer)
port1_time = Column(BigInteger)
@classmethod
def create(cls, col_dict):
"""
Create a new node white rabbit status object.
Parameters
----------
col_dict : dict
dictionary that must contain the following entries:
node_time : astropy Time object
Astropy time object based on a timestamp reported by node.
node : int
Node number (within 1 to 30).
board_info_str : str
A raw string representing the WR-LEN's response to the `ver` command.
Relevant parts of this string are individually unpacked in other entries.
aliases : str
Hostname aliases of this node's WR-LEN (comma separated if more than one).
ip : str
IP address of this node's WR-LEN
mode : str
WR-LEN operating mode (eg. "WRC_SLAVE_WR0")
serial : str
Canonical HERA hostname (~= serial number) of this node's WR-LEN
temperature : float
WR-LEN temperature in degrees C
build_date : astropy Time object
Build date of WR-LEN software in floored GPS seconds.
gw_date : astropy Time object
WR-LEN gateware build date in floored GPS seconds.
gw_version : str
WR-LEN gateware version number
gw_id : str
WR-LEN gateware ID number
build_hash : str
WR-LEN build git hash
manufacture_tag : str
Custom manufacturer tag
manufacture_device : str
Manufacturer device name designation
manufacture_date : astropy Time object
Manufacturer invoice(?) date
manufacture_partnum : str
Manufacturer part number
manufacture_serial : str
Manufacturer serial number
manufacture_vendor : str
Vendor name
port0_ad : int
???
port0_link_asymmetry_ps : int
Port 0 total link asymmetry in picosec
port0_manual_phase_ps : int
??? Port 0 manual phase adjustment in picosec
port0_clock_offset_ps : int
Port 0 Clock offset in picosec
port0_cable_rt_delay_ps : int
Port 0 Cable round-trip delay in picosec
port0_master_slave_delay_ps : int
Port 0 Master-Slave delay in in picosec
port0_master_rx_phy_delay_ps : int
Port 0 Master RX PHY delay in picosec
port0_slave_rx_phy_delay_ps : int
Port 0 Slave RX PHY delay in picosec
port0_master_tx_phy_delay_ps : int
Port 0 Master TX PHY delay in picosec
port0_slave_tx_phy_delay_ps : int
Port 0 Slave TX PHY delay in picosec
port0_hd : int
???
port0_link : bool
Port 0 link up state
port0_lock : bool
Port 0 timing lock state
port0_md : int
???
port0_rt_time_ps : int
Port 0 round-trip time in picosec
port0_nsec : int
???
port0_packets_received : int
Port 0 number of packets received
port0_phase_setpoint_ps : int
Port 0 phase setpoint in picosec
port0_servo_state : str
Port 0 servo state
port0_sv : int
???
port0_sync_source : str
Port 0 source of synchronization (either 'wr0' or 'wr1')
port0_packets_sent : int
Port 0 number of packets transmitted
port0_update_counter : int
Port 0 update counter
port0_time : astropy Time object
Astropy Time object based on Port 0 current TAI time in seconds from UNIX epoch.
port1_ad : int
???
port1_link_asymmetry_ps : int
Port 1 total link asymmetry in picosec
port1_manual_phase_ps : int
??? Port 1 manual phase adjustment in picosec
port1_clock_offset_ps : int
Port 1 Clock offset in picosec
port1_cable_rt_delay_ps : int
Port 1 Cable round-trip delay in picosec
port1_master_slave_delay_ps : int
Port 1 Master-Slave delay in in picosec
port1_master_rx_phy_delay_ps : int
Port 1 Master RX PHY delay in picosec
port1_slave_rx_phy_delay_ps : int
Port 1 Slave RX PHY delay in picosec
port1_master_tx_phy_delay_ps : int
Port 1 Master TX PHY delay in picosec
port1_slave_tx_phy_delay_ps : int
Port 1 Slave TX PHY delay in picosec
port1_hd : int
???
port1_link : bool
Port 1 link up state
port1_lock : bool
Port 1 timing lock state
port1_md : int
???
port1_rt_time_ps : int
Port 1 round-trip time in picosec
port1_nsec : int
???
port1_packets_received : int
Port 1 number of packets received
port1_phase_setpoint_ps : int
Port 1 phase setpoint in picosec
port1_servo_state : str
Port 1 servo state
port1_sv : int
???
port1_sync_source : str
Port 1 source of synchronization (either 'wr0' or 'wr1')
port1_packets_sent : int
Port 1 number of packets transmitted
port1_update_counter : int
Port 1 update counter
port1_time : astropy Time object
Astropy Time object based on Port 1 current TAI time in seconds from UNIX epoch.
Returns
-------
NodeWhiteRabbitStatus object
"""
params_dict = {}
for col, value in col_dict.items():
if col == 'node_time':
if not isinstance(value, Time):
print(col)
raise ValueError(col + ' must be an astropy Time object')
params_dict[col] = floor(value.gps)
elif ((col in wr_datetime_keys or col in wr_tai_sec_keys)
and value is not None):
if not isinstance(value, Time):
print(col)
raise ValueError(col + ' must be an astropy Time object')
params_dict[col] = floor(value.gps)
else:
params_dict[col] = value
return cls(**params_dict)
def _get_wr_status_dict(node, nodeServerAddress=defaultServerAddress):
"""
Get node white rabbit status information from a nodeControl object.
Parameters
----------
node: int
Node number.
nodeServerAddress: str
Node redis address.
Returns
-------
datetime timestamp
Time of the status
dict
keys are values in `wr_key_dict`, values are sensor readings.
from hera_node_mc nodeControl.get_wr_status docstring:
If no stats exist for this White Rabbit endpoint, returns `None`.
Otherwise Returns a tuple `(timestamp, statii)`, where `timestamp` is a
python `datetime` object describing when the values were last updated
in redis, and `statii` is a dictionary of status values.
If a status value is not available it will be `None`
Valid status keywords are:
'board_info_str' (str) : A raw string representing the WR-LEN's
response to the `ver` command.
Relevant parts of this string are
individually unpacked in other entries.
'aliases' (list of strings) : Hostname aliases of this node's WR-LEN
'ip' (str) : IP address of this node's WR-LEN
'mode' (str) : WR-LEN operating mode (eg. "WRC_SLAVE_WR0")
'serial' (str) : Canonical HERA hostname (~= serial number)
of this node's WR-LEN
'temp' (float) : WR-LEN temperature in degrees C
'sw_build_date' (datetime) : Build date of WR-LEN software
'wr_gw_date' (datetime) : WR-LEN gateware build date
'wr_gw_version' (str) : WR-LEN gateware version number
'wr_gw_id' (str) : WR-LEN gateware ID number
'wr_build' (str) : WR-LEN build git hash
'wr_fru_custom' (str) : Custom manufacturer tag'
'wr_fru_device' (str) : Manufacturer device name designation
'wr_fru_fid' (datetime) : Manufacturer invoice(?) date
'wr_fru_partnum' (str) : Manufacturer part number
'wr_fru_serial' (str) : Manufacturer serial number
'wr_fru_vendor' (str) : Vendor name
The following entries are prefixed `wr0` or `wr1` for WR-LEN
ports 0 and 1, respectively.
Most values will only be not None for one of the two ports.
'wr[0|1]_ad' (int) : ???
'wr[0|1]_asym' (int) : Total link asymmetry (ps)
'wr[0|1]_aux' (int) : ??? Manual phase adjustment (ps)
'wr[0|1]_cko' (int) : Clock offset (ps)
'wr[0|1]_crtt' (int) : Cable round-trip delay (ps)
'wr[0|1]_dms' (int) : Master-Slave delay in (ps)
'wr[0|1]_drxm' (int) : Master RX PHY delay (ps)
'wr[0|1]_drxs' (int) : Slave RX PHY delay (ps)
'wr[0|1]_dtxm' (int) : Master TX PHY delay (ps)
'wr[0|1]_dtxs' (int) : Slave TX PHY delay (ps)
'wr[0|1]_hd' (int) : ???
'wr[0|1]_lnk' (bool) : Link up state
'wr[0|1]_lock' (bool) : Timing lock state
'wr[0|1]_md' (int) : ???
'wr[0|1]_mu' (int) : Round-trip time (ps)
'wr[0|1]_nsec' (int) : ???
'wr[0|1]_rx' (int) : Number of packets received
'wr[0|1]_setp' (int) : Phase setpoint (ps)
'wr[0|1]_ss' (str) : Servo state
'wr[0|1]_sv' (int) : ???
'wr[0|1]_syncs' (str) : Source of synchronization (either 'wr0' or 'wr1')
'wr[0|1]_tx' (int) : Number of packets transmitted
'wr[0|1]_ucnt' (int) : Update counter
'wr[0|1]_sec' (int) : Current TAI time in seconds from UNIX epoch
"""
import nodeControl
node_controller = nodeControl.NodeControl(
node, serverAddress=nodeServerAddress)
# Get the sensor data for this node, returned as a dict
return node_controller.get_wr_status()
def create_wr_status(nodeServerAddress=defaultServerAddress,
node_list=None, wr_status_dict=None):
"""
Return a list of node white rabbit status objects with data from the nodes.
Parameters
----------
nodeServerAddress: str
Address of server where the node redis database can be accessed.
node_list: list of int
A list of integers specifying which nodes to get data for,
primarily for testing purposes. If None, get_node_list() is called.
wr_status_dict: dict
A dict spoofing the return dict from _get_wr_status_dict for testing
purposes.
Returns
-------
A list of NodeWhiteRabbitStatus objects
"""
if node_list is None:
node_list = get_node_list(nodeServerAddress=nodeServerAddress)
wr_status_list = []
for node in node_list:
if wr_status_dict is None:
wr_retval = _get_wr_status_dict(node, nodeServerAddress=nodeServerAddress)
if wr_retval is not None:
timestamp, wr_data = wr_retval
else:
# No info for this node.
continue
else:
wr_data = wr_status_dict[str(node)]
timestamp = wr_data.pop('timestamp')
node_time = Time(timestamp, format='datetime', scale='utc')
col_dict = {'node_time': node_time, 'node': node}
for key, value in wr_key_dict.items():
# key is column name, value is related key into wr_data
wr_data_value = wr_data[value]
if isinstance(wr_data_value, float) and
|
np.isnan(wr_data_value)
|
numpy.isnan
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from copy import deepcopy
import torch
from torch.nn import Linear, MSELoss, CrossEntropyLoss, LogSoftmax, NLLLoss, functional as F
from torch.optim import SGD, Adam, RMSprop
from sklearn.model_selection import KFold
df = pd.read_excel("Supplementary file 2-2. Clinical data.xlsx", 'Sheet1')
df_train = df.iloc[:, 2:7]
df_train = pd.get_dummies(df_train)
label_columns = ['Histology_' + i for i in ['Normal', 'Inflammation', 'LGIN', 'HGIN', 'SM1', 'MM', 'SM2 or deeper']]
labels = df_train.loc[:, label_columns]
labels_gt = np.argmax(np.array(labels), 1)
data = df_train.drop(label_columns, axis=1)
data = np.array(data)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.20, random_state=10000, shuffle=True)
df_IPCLsNet = pd.read_excel(p + "\\Clinical_results.xlsx", 'Sheet1')
df_IPCLsNet = df_IPCLsNet.iloc[:, 2:6]
df_IPCLsNet = np.array(df_IPCLsNet)
# df_IPCLsNet = torch.from_numpy(df_IPCLsNet).to(device).float()
# df_IPCLsNet = df_IPCLsNet / torch.sum(df_IPCLsNet, 1, keepdim=True)
def random_shuffle(data, df_IPCLsNet, label):
randnum = np.random.randint(0, 1200)
np.random.seed(randnum)
np.random.shuffle(data)
np.random.seed(randnum)
np.random.shuffle(df_IPCLsNet)
np.random.seed(randnum)
np.random.shuffle(label)
return data, df_IPCLsNet, label
def cagegrary_normalize(x_train, y_train):
x_train = torch.from_numpy(x_train).to(device).float()
x_train = x_train / torch.sum(x_train, 1, keepdim=True)
y_train = torch.from_numpy(y_train).to(device).long()
return x_train, y_train
# test the model
def eval_model(model, x_test, y_test):
model.eval()
reg_accuracy = torch.mean((torch.argmax(model(x_test), 1) == y_test) * 1.0)
print('MSE: {}'.format(reg_accuracy))
return torch.mean(reg_accuracy)
# define the model
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = Linear(4, 64)
self.fc2 = Linear(64, 28)
self.fc3 = Linear(28, 28)
self.fc4 = Linear(28, 7)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
x = self.fc4(x)
return x
def data_augumentation(x_train, y_train):
for i in torch.unique(y_train):
id = i == y_train
A = torch.rand((torch.sum(id), torch.sum(id)), device=device)
A = A / torch.sum(A, 1, keepdim=True)
x_train[id] = torch.mm(A, x_train[id])
return x_train, y_train
# define the number of epochs and the data set size
nb_epochs = 1000
step = 25
data_size = 1000
def train_model(foldi, x_train, x_test, y_train, y_test, rec):
model = Net()
model = model.to(device)
print('# generator parameters:', sum(param.numel() for param in model.parameters()))
# define the loss function
critereon = CrossEntropyLoss()
# define the optimizer
optimizer = Adam(model.parameters(), lr=0.0003, weight_decay=0.01)
accuracy_best = 0
for i, epoch in enumerate(range(nb_epochs)):
# break
model.train()
epoch_loss = 0
for ix in range(x_train.shape[0]):
N = 64
idx = np.random.randint(0, x_train.shape[0], N * 2)
x = x_train[idx[:N], :]
y = y_train[idx[:N]]
x, y = data_augumentation(x, y)
y_pred = model(x)
loss = critereon(y_pred, y)
epoch_loss = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Epoch: {} Loss: {}".format(epoch, epoch_loss))
if epoch % step == 0:
train_loss = critereon(model(x_train), y_train)
rec['train_loss'].append(train_loss.detach().cpu().numpy().item())
test_loss = critereon(model(x_test), y_test)
rec['test_loss'].append(test_loss.detach().cpu().numpy().item())
eval_model(model, x_train, y_train)
accuracy = eval_model(model, x_test, y_test)
train_accuracy = eval_model(model, x_train, y_train)
rec['train_accuracy'].append(train_accuracy.detach().cpu().numpy().item())
test_accuracy = eval_model(model, x_test, y_test)
rec['test_accuracy'].append(test_accuracy.detach().cpu().numpy().item())
if accuracy_best < accuracy:
best_val_model = deepcopy(model.state_dict())
torch.save(model, 'trained_models/NetFCN_Fold{}.pt'.format(foldi))
accuracy_best = accuracy
model.load_state_dict(best_val_model)
return model
data, df_IPCLsNet, labels = random_shuffle(data, df_IPCLsNet, labels_gt)
data, _ = cagegrary_normalize(data, labels)
df_IPCLsNet, labels = cagegrary_normalize(df_IPCLsNet, labels)
kf = KFold(n_splits=5)
fold_accuracy = []
IPCLsNet_accuracy = []
rec = {'train_step': [], 'train_loss': [], 'test_loss': [], 'train_accuracy': [], 'test_accuracy': []}
for i, (train, test) in enumerate(kf.split(X=data, y=labels)):
print("fold: %d/10" % i)
x_train, x_test = data[train, :], data[test, :]
y_train, y_test = labels[train], labels[test]
IPCLsNet_test = df_IPCLsNet[test, :]
# x_train, x_test, y_train, y_test = torch_from_numpy(x_train, x_test, y_train, y_test)
model = train_model(i, x_train, x_test, y_train, y_test, rec)
rec['train_step'].extend(np.arange(nb_epochs // step))
fold_accuracy.append(eval_model(model, x_test, y_test).item())
IPCLsNet_accuracy.append(eval_model(model, IPCLsNet_test, y_test).item())
|
np.save('rec.npy', rec)
|
numpy.save
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to represent the density of states, etc.
"""
from __future__ import annotations
import functools
import warnings
import numpy as np
from monty.json import MSONable
from scipy.constants import value as _cd
from scipy.signal import hilbert
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.spectrum import Spectrum
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Orbital, OrbitalType, Spin
from pymatgen.util.coord import get_linear_interpolated_value
from pymatgen.util.typing import ArrayLike, SpeciesLike
class DOS(Spectrum):
"""
Replacement basic DOS object. All other DOS objects are extended versions
of this object. Work in progress.
.. attribute: energies
The sequence of energies
.. attribute: densities
A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]}
.. attribute: efermi
Fermi level
"""
XLABEL = "Energy"
YLABEL = "Density"
def __init__(self, energies: ArrayLike, densities: ArrayLike, efermi: float):
"""
Args:
energies: A sequence of energies
densities (ndarray): Either a Nx1 or a Nx2 array. If former, it is
interpreted as a Spin.up only density. Otherwise, the first column
is interpreted as Spin.up and the other is Spin.down.
efermi: Fermi level energy.
"""
super().__init__(energies, densities, efermi)
self.efermi = efermi
def get_interpolated_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
"""
if spin is None:
tdos = self.y if len(self.ydim) == 1 else
|
np.sum(self.y, axis=1)
|
numpy.sum
|
"""
A minial Interface to ecCodes based on CFFI
"""
import cffi
import numpy as np
import xarray
import struct
import threading
import platform
import logging
import os
# initialize the interface to the C-Library
ffi = cffi.FFI()
# type definition. No need to specify internals of the structs. just name them
ffi.cdef("typedef struct codes_handle codes_handle;")
ffi.cdef("typedef struct codes_context codes_context;")
ffi.cdef("typedef struct codes_keys_iterator codes_keys_iterator;")
# definition of the used functions
ffi.cdef("long codes_get_api_version (void);")
ffi.cdef("codes_handle* codes_handle_new_from_message(codes_context* c, const void* data, size_t data_len);")
ffi.cdef("int codes_handle_delete(codes_handle* h);")
ffi.cdef("int codes_get_long(codes_handle* h, const char* key, long* value);")
ffi.cdef("int codes_get_double(codes_handle* h, const char* key, double* value);")
ffi.cdef("int codes_get_string(codes_handle* h, const char* key, char* mesg, size_t* length);")
ffi.cdef("int codes_get_size(codes_handle* h, const char* key, size_t* size);")
ffi.cdef("int codes_get_long_array(codes_handle* h, const char* key, long* vals, size_t* length);")
ffi.cdef("int codes_get_double_array(codes_handle* h, const char* key, double* vals, size_t* length);")
ffi.cdef("int grib_get_native_type(codes_handle* h, const char* name, int* type);")
# functions for key-iterators
ffi.cdef("codes_keys_iterator* codes_keys_iterator_new(codes_handle *h, unsigned long filter_flags, const char* name_space);")
ffi.cdef("int codes_keys_iterator_next (codes_keys_iterator *kiter);")
ffi.cdef("const char* codes_keys_iterator_get_name(codes_keys_iterator *kiter);")
ffi.cdef("int codes_keys_iterator_delete(codes_keys_iterator *kiter);")
def __find_lib_ld_library_path(name):
"""
find a library on a linux system with the LD_LIBRARY_PATH, is defined.
Parameters
----------
name: str
name of the library, e.g. libeccodes
Returns
-------
str:
absolute path of the library if found. if not found, the name is returned
"""
LD_LIBRARY_PATH = os.getenv("LD_LIBRARY_PATH")
if LD_LIBRARY_PATH is not None:
components = LD_LIBRARY_PATH.split(":")
for one_component in components:
lib_path = os.path.join(one_component, name)
if os.path.exists(lib_path + ".so"):
return lib_path
return name
# load the actual c-library
if platform.system() == "Linux":
__libext = "so"
__libname = __find_lib_ld_library_path("libeccodes")
elif platform.system() == "Darwin":
__libext = "dylib"
__libname = "libeccodes"
elif platform.system() == "Windows":
__libext = "dll"
__libname = "eccodes"
else:
raise OSError("Unknown platform: %s" % platform.system())
try:
_eccodes = ffi.dlopen("{name}.{ext}".format(name=__libname, ext=__libext))
except OSError:
logging.warning("eccodes c-library not found, grib file support not available!")
# Constants for 'missing'
CODES_MISSING_DOUBLE = -1e+100
CODES_MISSING_LONG = 2147483647
# list of staggered variables in U-direction
# FIXME: this is COSMO-specific and related to issue #39
staggered_u = ["u", "aumfl_s"] #, "u_10m", "umfl_s"]
staggered_v = ["v", "avmfl_s"] #, "v_10m", "vmfl_s"]
# standard keys required by read_grib_file
standard_keys = ['bitsPerValue',
'cfName',
'cfVarName',
'dataDate',
'dataTime',
'discipline',
'editionNumber',
'gridDefinitionDescription',
'gridType',
'iDirectionIncrementInDegrees',
'indicatorOfParameter',
'jDirectionIncrementInDegrees',
'latitudeOfFirstGridPointInDegrees',
'latitudeOfLastGridPointInDegrees',
'latitudeOfSouthernPoleInDegrees',
'level',
'localActualNumberOfEnsembleNumber',
'longitudeOfFirstGridPointInDegrees',
'longitudeOfLastGridPointInDegrees',
'longitudeOfSouthernPoleInDegrees',
'missingValue',
'Ni',
'Nj',
'numberOfDataPoints',
'parameterCategory',
'parameterName',
'parameterNumber',
'parameterUnits',
'perturbationNumber',
'scaledValueOfFirstFixedSurface',
'scaledValueOfSecondFixedSurface',
'shortName',
'table2Version',
'typeOfLevel',
'validityDate',
'validityTime']
# allow only one read per time
read_msg_lock = threading.Lock()
# A representation of one grib message
class GribMessage():
def __init__(self, file, offset=0, read_data=False):
"""
create a message from the data buffer object
Parameters
----------
file : file-object
a file object which points already to the beginning of the message
offset : int
position of the file where the message starts
read_data : bool
False: read only the header of the message.
"""
# cache for all read operations of keys
self.cache = {}
self.has_data = read_data
# read the content of the message
self.buffer = _read_message_raw_data(file, offset, read_data=read_data)
# was there a message?
if self.buffer is None:
self.handle = ffi.NULL
return
# decode the message
with read_msg_lock:
# read the message itself
self.handle = _eccodes.codes_handle_new_from_message(ffi.NULL, ffi.from_buffer(self.buffer), len(self.buffer))
# pre-read common keys and don't care for errors
for one_key in standard_keys:
try:
self.__getitem__(one_key, use_lock=False)
except:
pass
# pre-read the values also if we read the data in memory
if read_data:
self.__getitem__("values", use_lock=False)
# was the reading successful?
if self.handle == ffi.NULL:
raise ValueError("unable to read grib message from buffer!")
def __getitem__(self, item, use_lock=True):
if item in self.cache:
result = self.cache[item]
if result is None:
raise KeyError("key '%s' not found in grib message!" % item)
return result
else:
try:
# lock if we do not yet have a lock from a calling function
if use_lock:
read_msg_lock.acquire()
# read the key
ckey = _cstr(item)
nelements = self.__codes_get_size(ckey)
if nelements > 1:
value = self.__codes_get_array(ckey, nelements)
else:
value = self.__codes_get(ckey)
self.cache[item] = value
except ValueError:
# store the error
self.cache[item] = None
# nothing found? Any error is interpreted as Key not found.
raise KeyError("key '%s' not found in grib message!" % item)
finally:
# unlock if locked
if use_lock:
read_msg_lock.release()
return value
def __contains__(self, item):
# is the value already in cache?
if item in self.cache:
if self.cache[item] is None:
return False
return True
else:
# The value is not cached, try to read it from the grib message
try:
self.__getitem__(item)
except KeyError:
return False
return True
def keys(self):
"""
returns all GRIB keys of this GRIB message
Returns
-------
list :
list of strings with the names of the keys
"""
result = []
with read_msg_lock:
# 128 is the value of the C-constant GRIB_KEYS_ITERATOR_DUMP_ONLY and reduces the set of keys to those
# really available
kiter = _eccodes.codes_keys_iterator_new(self.handle, 128, ffi.NULL)
while _eccodes.codes_keys_iterator_next(kiter) == 1:
result.append(ffi.string(_eccodes.codes_keys_iterator_get_name(kiter)).decode("utf-8"))
return result
def is_valid(self):
"""
returns true if the content of a message was readable
"""
return self.buffer is not None
def get_name(self, prefer_cf=True):
"""
find a name for this variable.
Parameters
----------
prefer_cf : bool
if True, the search order for the name is "cfName", "cfVarName", "shortName", otherwise it is
"shortName", "cfName", "cfVarName".
Returns
-------
string
name of the variable.
"""
if prefer_cf:
name_keys = ["cfName", "cfVarName", "shortName"]
else:
name_keys = ["shortName", "cfName", "cfVarName"]
for key in name_keys:
result = self.__getitem__(key)
if result != "unknown":
break
return result
def get_dimension(self, dimensions=None, dimension_names=None):
"""
get the shape of one message depending on the grid type
Returns
-------
tuple
(shape, dim-names)
"""
if self["gridType"] == "rotated_ll":
shape = (self["Nj"], self["Ni"])
# the dimension names differ for staggered variables like u and v
var_name = self["shortName"].lower()
if var_name in staggered_u and self["typeOfLevel"] not in ["heightAboveSea", "isobaricInhPa"]:
#breakpoint()
dim_names = ["rlat", "srlon"]
elif var_name in staggered_v and self["typeOfLevel"] not in ["heightAboveSea", "isobaricInhPa"]:
dim_names = ["srlat", "rlon"]
else:
dim_names = ["rlat", "rlon"]
elif self["gridType"] == "regular_ll":
shape = (self["Nj"], self["Ni"])
dim_names = ["lat", "lon"]
elif self["gridType"] in ["sh", "reduced_gg", "unstructured_grid"]:
shape = (self["numberOfDataPoints"],)
dim_names = ["cell"]
else:
raise ValueError("don't know how to calculate the shape for grid type %s" % self["gridType"])
# loop over all already used dims for comparison
if dimensions is not None and dimension_names is not None:
for one_var in dimensions.keys():
if dimension_names[one_var] == dim_names and dimensions[one_var] != shape:
for id, dn in enumerate(dim_names):
dim_names[id] = "%s%d" % (dn, 2)
return shape, dim_names
def get_coordinates(self, dimension_names):
"""
get the longitude and latitude coordinates for one message
Returns
-------
tuple:
((lon-dim-names, lon-coord), (lat-dim-names), lat-coord)
"""
# are coordinates available?
if "longitudes" not in self or "latitudes" not in self:
return None, None
if self["gridType"] == "rotated_ll":
lon = (dimension_names, np.array(self["longitudes"].reshape(self["Nj"], self["Ni"]), dtype=np.float32))
lat = (dimension_names, np.array(self["latitudes"].reshape(self["Nj"], self["Ni"]), dtype=np.float32))
elif self["gridType"] in ["sh", "reduced_gg", "unstructured_grid"]:
lon = (dimension_names[0], np.array(self["longitudes"], dtype=np.float32))
lat = (dimension_names[0], np.array(self["latitudes"], dtype=np.float32))
elif self["gridType"] == "regular_ll":
lon = (dimension_names[1], np.array(self["longitudes"].reshape(self["Nj"], self["Ni"])[0, :], dtype=np.float32))
lat = (dimension_names[0], np.array(self["latitudes"].reshape(self["Nj"], self["Ni"])[:, 0], dtype=np.float32))
else:
lon = (dimension_names[1], np.array(self["longitudes"], dtype=np.float32))
lat = (dimension_names[0], np.array(self["latitudes"], dtype=np.float32))
return lon, lat
def get_rotated_ll_info(self, dim_names):
"""
get the rotated pole and the rotated lon/lat coordinates
Parameters
----------
dim_names : list
names of the rlat and rlon dimensions
Returns
-------
"""
if self["gridType"] != "rotated_ll":
raise ValueError("The gridType '%s' has not rotated pole!" % self["gridType"])
rotated_pole_name = "rotated_pole"
if not dim_names[0].endswith("t"):
rotated_pole_name += dim_names[0][-1]
# create rotated pole description
rotated_pole = xarray.DataArray(np.zeros(1, dtype=np.int8), dims=(rotated_pole_name,))
rotated_pole.attrs["grid_mapping_name"] = "rotated_latitude_longitude"
rotated_pole.attrs["grid_north_pole_latitude"] = self["latitudeOfSouthernPoleInDegrees"] * -1
rotated_pole.attrs["grid_north_pole_longitude"] = self["longitudeOfSouthernPoleInDegrees"] - 180
# create rotated coordinate arrays
# perform calculations on large integers to avoid rounding errors
factor = 10 ** 10
first_lon = int(self["longitudeOfFirstGridPointInDegrees"] * factor)
last_lon = int(self["longitudeOfLastGridPointInDegrees"] * factor)
first_lat = int(self["latitudeOfFirstGridPointInDegrees"] * factor)
last_lat = int(self["latitudeOfLastGridPointInDegrees"] * factor)
if last_lon < first_lon and first_lon > 180 * factor:
first_lon -= 360 * factor
# using linspace instead of array and the stored increment to ensure the correct number of values.
rlon_int = np.linspace(first_lon, last_lon, self["Ni"], dtype=np.int64)
rlon = xarray.DataArray(np.asarray(rlon_int / factor, dtype=np.float32), dims=(dim_names[-1],))
rlon.attrs["long_name"] = "longitude in rotated pole grid"
rlon.attrs["units"] = "degrees"
rlon.attrs["standard_name"] = "grid_longitude"
rlat_int = np.linspace(first_lat, last_lat, self["Nj"], dtype=np.int64)
rlat = xarray.DataArray(np.asarray(rlat_int / factor, dtype=np.float32), dims=(dim_names[-2],))
rlat.attrs["long_name"] = "latitude in rotated pole grid"
rlat.attrs["units"] = "degrees"
rlat.attrs["standard_name"] = "grid_latitude"
return rotated_pole_name, rotated_pole, rlat, rlon
def get_level(self):
"""
gets the center value of the level coordinate, or if available first and second layer
"""
if self["typeOfLevel"] in ["generalVerticalLayer", "isobaricInhPa"]:
return self["level"]
if not "scaledValueOfFirstFixedSurface" in self or not "scaledValueOfSecondFixedSurface" in self:
return self["level"]
first_surface = self["scaledValueOfFirstFixedSurface"]
second_surface = self["scaledValueOfSecondFixedSurface"]
first_missing = first_surface == CODES_MISSING_LONG or first_surface == CODES_MISSING_DOUBLE
second_missing = second_surface == CODES_MISSING_LONG or second_surface == CODES_MISSING_DOUBLE
if first_missing and not second_missing:
return second_surface
elif not first_missing and second_missing:
return first_surface
elif first_missing and second_missing:
return self["level"]
else:
return first_surface, second_surface
def get_values(self, shape=None, dtype=None, missing=None):
"""
read the encoded values from the message
Parameters
----------
dtype : np.dtype
values are returned in an array of the specified type
missing : float
value used within the grib message the mark missing values. The returned array will contain NaN at this
locations.
Returns
-------
np.ndarray
"""
# do we have data in this message?
if not self.has_data:
raise ValueError("this message was created from the header only. No data is available!")
values = self["values"]
if shape is not None:
values = values.reshape(shape)
if dtype is not None and dtype != np.float64:
values = np.array(values, dtype=dtype)
# replace fill values with nan
values = np.where(values == missing, np.nan, values)
return values
def __grib_get_native_type(self, key):
"""
Get the native type of a specific grib key
"""
itype = ffi.new("int[1]")
err = _eccodes.grib_get_native_type(self.handle, key, itype)
if err != 0:
raise ValueError("unable to get type of key '%s'" % ffi.string(key))
if itype[0] == 1:
return int
elif itype[0] == 2:
return float
else:
return str
def __codes_get_size(self, key):
"""
get the number of elements for a given key
Parameters
----------
key : cstr
name of the key
Returns
-------
int :
number of elements
"""
size = ffi.new("size_t[1]")
err = _eccodes.codes_get_size(self.handle, key, size)
if err != 0:
raise ValueError("unable to get number of elements for key '%s'" % ffi.string(key))
return size[0]
def __codes_get(self, key):
"""
get the value of a non-array key
Parameters
----------
key : cstr
name of the key
Returns
-------
int or float or str
"""
key_type = self.__grib_get_native_type(key)
if key_type == int:
value_ptr = ffi.new("long[1]")
err = _eccodes.codes_get_long(self.handle, key, value_ptr)
value = value_ptr[0]
elif key_type == float:
value_ptr = ffi.new("double[1]")
err = _eccodes.codes_get_double(self.handle, key, value_ptr)
value = value_ptr[0]
else:
value_buffer = np.zeros(1024, dtype=np.uint8)
value_buffer_length = ffi.new("size_t[1]", init=[1024])
err = _eccodes.codes_get_string(self.handle, key, ffi.from_buffer(value_buffer), value_buffer_length)
if value_buffer_length[0] == 1024:
value_buffer_length[0] = np.where(value_buffer == 0)[0][0]
value = value_buffer[:value_buffer_length[0]-1].tobytes().decode("utf-8")
if err != 0:
raise ValueError("unable to get value for key '%s'" % ffi.string(key))
return value
def __codes_get_array(self, key, nelements):
"""
Get a values for a key with multiple values
Parameters
----------
key : cstr
name of the key
nelements : int
size the array to retrieve
Returns
-------
np.ndarray
"""
key_type = self.__grib_get_native_type(key)
length = ffi.new("size_t[1]")
length[0] = nelements
if key_type == int:
values = np.empty(nelements, dtype=np.int64)
err = _eccodes.codes_get_long_array(self.handle, key, ffi.cast("long*", ffi.from_buffer(values)), length)
elif key_type == float:
values = np.empty(nelements, dtype=np.float64)
err = _eccodes.codes_get_double_array(self.handle, key, ffi.cast("double*", ffi.from_buffer(values)), length)
else:
raise ValueError("string arrays are not yet supported!")
if err != 0:
raise ValueError("unable to get value for key '%s'" % ffi.string(key))
return values
def __del__(self):
"""
free up the memory
"""
with read_msg_lock:
if self.handle != ffi.NULL:
err = _eccodes.codes_handle_delete(self.handle)
self.handle = ffi.NULL
if err != 0:
raise ValueError("unable to free memory of grib message!")
def _cstr(pstr):
"""
convert a python string object into a c string object (copy).
Parameters
----------
pstr : str
python string
Returns
-------
const char*
"""
buffer = np.frombuffer(pstr.encode("utf8") + b"\x00", dtype=np.uint8)
result = ffi.from_buffer(buffer)
return result
def _read_message_raw_data(infile, offset, read_data=False):
"""
Read the header of a grib message and return an byte array with the length of the full message, but without
the actual data
Parameters
----------
infile
Returns
-------
"""
# find the start word GRIB. Allow up to 1k junk in front of the actual message
infile.seek(offset)
start = infile.read(1024)
istart = start.find(b"GRIB")
if istart == -1:
return None
offset += istart
# find at first the grib edition to account for different formats
infile.seek(offset + 7)
edition = struct.unpack(">B", infile.read(1))[0]
# get the length of the total message
if edition == 1:
# read the first section
infile.seek(offset)
section0 = infile.read(8)
length_total = struct.unpack(">I", b'\x00' + section0[4:7])[0]
# check if the length is correct, the message is supposed to end with 7777
# this is a workaround, apparently, the length of grib1 messages is sometimes wrong.
infile.seek(offset + length_total - 4)
section5 = infile.read(4)
if section5 != b"7777":
# the maximal length of a grib1 message is 16MB. Read this amount of data and search for the end
infile.seek(offset)
maxdata = infile.read(16777216)
endpos = maxdata.find(b"7777")
if endpos == -1:
return None
else:
length_total = endpos + 4
read_data = True
infile.seek(offset + 8)
# create an numpy array with the total size of the message
bytes =
|
np.zeros(length_total, dtype=np.uint8)
|
numpy.zeros
|
import vnmrjpy as vj
import numpy as np
from math import sin, cos
def make_cubic_affine(data):
"""Make Nifti affine assuming imaging volume is a cube
Args:
data (np.ndarray) -- 3D or 4D input data to make affine for
Return:
affine (np.ndarray) -- 4x4 or 3x3? diagonal matrix
Useful for easy viewing, but don!t use it for serios work.
"""
x = 1/data.shape[0]
y = 1/data.shape[1]
z = 1/data.shape[2]
dim_arr = np.array([x,y,z,1])
affine = np.eye(4)*dim_arr
return affine
def make_rat_anatomical_affine(procpar):
"""Make rat_anatomical affine for Nifti header.
Almost the same as make_scanner_affine
Args:
data (np.ndarra) -- input data, 4D or 5D
procpar
Return:
affine (np.ndarray) -- 4x4 diagonal matrix
"""
p = vj.io.ProcparReader(procpar).read()
#arr = dimensions of (phase, read, slice/phase2)
arr = vj.util.get_local_pixdim(p)
# params to transform to scanner from local
swaparr, flipaxis, sliceaxis = get_swap_array(p['orient'])
sortedarr = sorted(zip(swaparr,arr))
arr = [sortedarr[i][1] for i in range(3)]
# params to trnasform from scanner to rat_anat
swaparr2rat = [0,2,1]
sortedarr2 = sorted(zip(swaparr2rat,arr))
arr = [sortedarr2[i][1] for i in range(3)]
arr = arr + [1]
#TODO make translations
affine = np.array(np.eye(4,dtype=float)*arr,dtype=float)
return affine
def make_scanner_affine(procpar):
"""Make appropriate scanner affine for Nifti header.
This is needed for viewing and saving nifti in scanner space
Prabably safer to do it from scratch than transforming...
Translations are accounted for. Gaps are added to thinckness
Args:
data (np.ndarra) -- input data, 4D or 5D
procpar
Return:
affine (np.ndarray) -- 4x4 diagonal matrix
"""
p = vj.io.ProcparReader(procpar).read()
orient = p['orient']
arr = get_local_pixdim(p)
swaparr, flipaxis, sliceaxis = get_swap_array(p['orient'])
sortedarr = sorted(zip(swaparr,arr))
arr = [sortedarr[i][1] for i in range(3)]+[1.0]
#TODO make translations
affine = np.array(np.eye(4,dtype=float)*arr,dtype=float)
return affine
def scanner_to_rat_anatomical(data):
"""Transform from scanner to rat anatomical
"""
datadims = len(data.shape)
olddims = [0,1,2] # scanner [x,y,z]
newdims = [0,2,1]
if datadims == 4:
olddims = olddims+[3]
newdims = newdims+[3]
if datadims == 5:
olddims = [0,1,2,3,4]
newdims = [0,1,3,2,4]
# change axis
if datadims == 6: # [x,y,z,time,rcvrs,etc]
olddims = [0,1,2,3,4,5]
newdims = [0,2,1,3,4,5]
data = np.moveaxis(data,olddims, newdims)
return data
def to_rat_anatomical_space(indata, procpar, flip_gradients=True):
"""Transform data to rat_anatomical coordinates, similar to scanner space.
Args:
indata
procpar
flip_gradients
Return:
data -- data with swapped axes
"""
data = to_scanner_space(indata,procpar,flip_gradients)
return scanner_to_rat_anatomical(data)
def to_scanner_space(indata, procpar, flip_gradients=True):
"""Transform data to scanner coordinate space by properly swapping axes
Standard vnmrj orientation - meaning rotations are 0,0,0 - is axial, with
x,y,z axes (as global gradient axes) corresponding to phase, readout, slice.
vnmrjpy defaults to handling numpy arrays of:
(receivers, phase, readout, slice/phase2, time*echo)
but arrays of
(receivers, x,y,z, time*echo)
is also desirable in some cases (for example registration in FSL flirt)
Euler angles of rotations are psi, phi, theta,
Also corrects reversed X gradient and sliceorder
Args:
data (3,4, or 5D np ndarray) -- input data to transform
procpar (path/to/file) -- Varian procpar file of data
Return:
swapped_data (np.ndarray)
"""
def _orthotransform(data,p):
"""Simple swaping of axes in case of orthogonal scanner-image axes
This was done to avoid thinking. Would benefit from rewriting.
"""
orient = p['orient']
sliceorder = int(p['sliceorder'])
dims = len(data.shape)
newarr, flipaxis, sliceaxis = get_swap_array(orient)
#flipping slices before axis transform
if '2D' in p['apptype'] or '2d' in p['apptype']:
# sliceorder can be 0,1,2,3 odd means interleaved,
# > 2 means reversed order
if int(p['sliceorder']) < 2:
data = flip_sliceaxis(data)
# flipping gradients before axis transform
if flip_gradients==True:
data = flip_peaxis(data)
data = flip_roaxis(data)
if '3D' in p['apptype'] or '3d' in p['apptype']:
data = flip_pe2axis(data)
# moving around axes according to orient
if dims == 3:
data = np.moveaxis(data, [0,1,2], newarr)
elif dims == 4:
newarr = newarr+[3]
data = np.moveaxis(data, [0,1,2,3], newarr)
#data = np.moveaxis(data, newarr,[0,1,2,3])
elif dims == 5:
if flipaxis != None:
flipaxis = np.array(flipaxis)+1
newarr = [i+1 for i in newarr]
newarr = [0]+newarr+[4]
data = np.moveaxis(data, [0,1,2,3,4], newarr)
elif dims == 6:
if flipaxis != None:
flipaxis = flipaxis
newarr = newarr+[3,4,5]
print(newarr)
data =
|
np.moveaxis(data, [0,1,2,3,4,5], newarr)
|
numpy.moveaxis
|
import numpy as np
import math
import random
import cv2
import torch
from assets.detectors.detector import LocalDetector
def ssc(keypoints, num_ret_points, tolerance, cols, rows):
# https://github.com/BAILOOL/ANMS-Codes
exp1 = rows + cols + 2 * num_ret_points
exp2 = (4 * cols + 4 * num_ret_points + 4 * rows * num_ret_points + rows * rows + cols * cols -
2 * rows * cols + 4 * rows * cols * num_ret_points)
exp3 = math.sqrt(exp2)
exp4 = num_ret_points - 1
sol1 = -round(float(exp1 + exp3) / exp4) # first solution
sol2 = -round(float(exp1 - exp3) / exp4) # second solution
high = sol1 if (sol1 > sol2) else sol2 # binary search range initialization with positive solution
low = math.floor(math.sqrt(len(keypoints) / num_ret_points))
prev_width = -1
selected_keypoints = []
result_list = []
result = []
complete = False
k = num_ret_points
k_min = round(k - (k * tolerance))
k_max = round(k + (k * tolerance))
while not complete:
width = low + (high - low) / 2
if width == prev_width or low > high: # needed to reassure the same radius is not repeated again
result_list = result # return the keypoints from the previous iteration
break
c = width / 2 # initializing Grid
num_cell_cols = int(math.floor(cols / c))
num_cell_rows = int(math.floor(rows / c))
covered_vec = [[False for _ in range(num_cell_cols + 1)] for _ in range(num_cell_rows + 1)]
result = []
for i in range(len(keypoints)):
'''
row = int(math.floor(keypoints[i].pt[1] / c)) # get position of the cell current point is located at
col = int(math.floor(keypoints[i].pt[0] / c))
'''
row = int(math.floor(keypoints[i][0] / c)) # get position of the cell current point is located at
col = int(math.floor(keypoints[i][1] / c))
if not covered_vec[row][col]: # if the cell is not covered
result.append(i)
# get range which current radius is covering
row_min = int((row - math.floor(width / c)) if ((row - math.floor(width / c)) >= 0) else 0)
row_max = int(
(row + math.floor(width / c)) if (
(row + math.floor(width / c)) <= num_cell_rows) else num_cell_rows)
col_min = int((col - math.floor(width / c)) if ((col - math.floor(width / c)) >= 0) else 0)
col_max = int(
(col + math.floor(width / c)) if (
(col + math.floor(width / c)) <= num_cell_cols) else num_cell_cols)
for row_to_cover in range(row_min, row_max + 1):
for col_to_cover in range(col_min, col_max + 1):
if not covered_vec[row_to_cover][col_to_cover]:
# cover cells within the square bounding box with width w
covered_vec[row_to_cover][col_to_cover] = True
if k_min <= len(result) <= k_max: # solution found
result_list = result
complete = True
elif len(result) < k_min:
high = width - 1 # update binary search range
else:
low = width + 1
prev_width = width
for i in range(len(result_list)):
selected_keypoints.append(keypoints[result_list[i]])
return selected_keypoints
class HomographyAugmenter(object):
""" Homography augmentation class """
def __init__(self, crop_hw=None,
crop_min_scale_factor=0.8,
crop_max_rotation=np.pi*(15./180),
crop_min_distort_factors_xy=(0.6, 0.6)):
self.crop_height, self.crop_width = crop_hw
self.crop_min_scale_factor = crop_min_scale_factor
self.crop_max_rotation = crop_max_rotation
self.crop_min_distort_factors_xy = crop_min_distort_factors_xy
self.crop_points = np.float32(
[
[0, 0],
[self.crop_width-1, 0],
[0, self.crop_height-1],
[self.crop_width-1, self.crop_height-1]
])
self.crop_corners = np.array([
[-(self.crop_width-1)/2, -(self.crop_height-1)/2],
[(self.crop_width-1)/2, -(self.crop_height-1)/2],
[-(self.crop_width-1)/2, (self.crop_height-1)/2],
[(self.crop_width-1)/2, (self.crop_height-1)/2]
]).astype(np.float32)
def get_min_image_hw(self):
""" Minimum height and width of an image for cropping """
# assumes 45 degree rotation
crop_max_diameter = np.sqrt(self.crop_width ** 2 + self.crop_height ** 2)
crop_max_diameter += 1
# TODO incorporate crop_max_rotation
crop_max_diameter = int(crop_max_diameter+0.5)
return crop_max_diameter, crop_max_diameter
def get_random_homography(self, image_hw, crop_center=None, jitter=None):
""" Generate random homography transformation """
image_height, image_width = image_hw
if jitter is None:
scale_jitter = torch.rand(1).numpy().astype(np.float32)[0]
center_jitter = torch.rand(2).numpy().astype(np.float32)
perspective_jitter = torch.rand(2).numpy().astype(np.float32)
rotation_jitter = torch.rand(1).numpy().astype(np.float32)[0]
else:
scale_jitter = jitter[0]
center_jitter = jitter[1:3]
perspective_jitter = jitter[3:5]
rotation_jitter = jitter[5]
# decide on scale of the crop. We can only zoom into the crop
crop_zoom_factor = self.crop_min_scale_factor * scale_jitter + 1 * (1 - scale_jitter)
# decide if distorting horizontal or vertical sides, i.e.
# sides parallel to x-axis or sides parallel to y-axis
# horver = 0 means distorting top and bottom sides
# horver = 1 means distorting left and right sides
horver = np.int32(torch.rand(1).numpy()[0] < 0.5)
# depending on horver and scale_factor, compute the maximum radius of the crop
crop_max_radius = 0.5 * np.sqrt(
(crop_zoom_factor*self.crop_width)**2 + (crop_zoom_factor*self.crop_height)**2)
if crop_center is None:
# decide on crop center
crop_center = crop_max_radius + center_jitter * (
np.array([image_width, image_height])-2*crop_max_radius)
else:
def rnd_sign():
return 1 if random.random() < .5 else -1
# apply a 10% jitter in pixels to crop center
crop_center += 0.1 * np.array([self.crop_width * rnd_sign(),
self.crop_height * rnd_sign()])
# decide on scale of the crop's left/top and right/bottom side
crop_distort_factors = self.crop_min_distort_factors_xy[horver] * perspective_jitter + \
1 * (1-perspective_jitter)
# decide on crop rotation
rotation_jitter = 2*rotation_jitter - 1
rotation_jitter *= self.crop_max_rotation
cosa = np.cos(rotation_jitter)
sina = np.sin(rotation_jitter)
# zoom into crop
scaled_crop_corners = self.crop_corners.copy() * crop_zoom_factor
# perspective distort
if horver == 0:
# distort in x-axis
scaled_crop_corners[0, 0] *= crop_distort_factors[0]
scaled_crop_corners[1, 0] *= crop_distort_factors[0]
scaled_crop_corners[2, 0] *= crop_distort_factors[1]
scaled_crop_corners[3, 0] *= crop_distort_factors[1]
else:
# distort in y-axis
scaled_crop_corners[0, 1] *= crop_distort_factors[0]
scaled_crop_corners[2, 1] *= crop_distort_factors[0]
scaled_crop_corners[1, 1] *= crop_distort_factors[1]
scaled_crop_corners[3, 1] *= crop_distort_factors[1]
# rotate crop corners
rotated_crop_corners = scaled_crop_corners.copy()
rotated_crop_corners[:, 0] = (cosa*scaled_crop_corners[:, 0] -
sina*scaled_crop_corners[:, 1])
rotated_crop_corners[:, 1] = (sina*scaled_crop_corners[:, 0] +
cosa*scaled_crop_corners[:, 1])
# shift crop center
image_points = np.float32(crop_center + rotated_crop_corners)
# make source and destination points
cv_image_to_crop = cv2.getPerspectiveTransform(image_points, self.crop_points)
cv_crop_to_image = cv2.getPerspectiveTransform(self.crop_points, image_points)
# in pytorch crop to image
hmats_toimage =
|
np.array(cv_image_to_crop, dtype=np.float32)
|
numpy.array
|
# coding=utf-8
"""
<NAME>, CC3501-Tarea2c
Catmull-Rom splines using python and numpy and matplotlib
"""
import numpy as np
import matplotlib.pyplot as mpl
from mpl_toolkits.mplot3d import Axes3D
def generateT(t):
return np.array([[1, t, t**2, t**3]]).T #.T significa transpuesto
def catmullRomMatrix(P0, P1, P2, P3):
# Generate a matrix concatenating the columns
G = np.concatenate((P0, P1, P2, P3), axis=1)
# Bezier base matrix is a constant
Mcr = np.array([[0, -1/2, 1, -1/2], [1, 0, -5/2, 3/2], [0, 1/2, 2, -3/2], [0, 0, -1/2, 1/2]])
return np.matmul(G, Mcr)
# M is the cubic curve matrix, N is the number of samples between 0 and 1
def evalCurve(M, N):
# The parameter t should move between 0 and 1
ts = np.linspace(0.0, 1.0, N)
# The computed value in R3 for each sample will be stored here
curve = np.ndarray(shape=(N, 3), dtype=float)
for i in range(len(ts)): #Para cada ts genero un vector T
T = generateT(ts[i])
curve[i, 0:3] = np.matmul(M, T).T
# Recordar que en este caso curve es una matriz de Nx3 (N puntos con sus coordenadas x,y,z)
return curve
def plotCurve(ax, curve, label, color=(0,0,1)):
# Recordar que en este caso curve es una matriz de 50x3 (50 puntos con sus coordenadas x,y,z)
xs = curve[:, 0] #Toda la primera columna
ys = curve[:, 1] #Toda la segunda columna
zs = curve[:, 2] #Toda la tercera columna
ax.plot(xs, ys, zs, label=label, color=color) #ax.plot(xs, ys, zs, "-or") -> linea con puntos y roja
#Los N puntos que definen la curva
def curvePoints(curve,N):
points = []
for i in range(N):
points.append(curve[i])
return points
# Función que recibe una matriz de puntos en R^3 transpuestos, para apartir de ahi
# generar las splines correspondientes y retornar los N puntos que definen ese conjunto de splines
def merchSplinesPoints(pointsList, N=1000):
largo = len(pointsList)
trayectoria = []
for k in range(largo-3):
GMcr = catmullRomMatrix(pointsList[k], pointsList[k+1], pointsList[k+2], pointsList[k+3])
catmullRomSpline = evalCurve(GMcr, N)
points = curvePoints(catmullRomSpline, N)
trayectoria += points
return trayectoria
if __name__ == "__main__":
"""
Example for Catmull-Rom splines
"""
P0 =
|
np.array([[0, 1, 0]])
|
numpy.array
|
import numpy as np
from numpy import matlib
from trlib.policies.policy import Policy
from trlib.policies.qfunction import QFunction
class ValueBased(Policy):
"""
A value-based policy is a policy that chooses actions based on their value.
The action-space is always discrete for this kind of policy.
"""
def __init__(self,actions,Q):
self._actions = np.array(actions)
self._n_actions = len(actions)
self.Q = Q
@property
def actions(self):
return self._actions
@property
def Q(self):
return self._Q
@Q.setter
def Q(self,value):
if not isinstance(value, QFunction):
raise TypeError("The argument must be a QFunction")
self._Q = value
def __call__(self, state):
"""
Computes the policy value in the given state
Parameters
----------
state: S-dimensional vector
Returns
-------
An A-dimensional vector containing the probabilities pi(.|s)
"""
raise NotImplementedError
def _q_values(self, state):
if len(self._actions.shape) > 1:
action_vec = self._actions
else:
action_vec = self._actions[:,np.newaxis]
return self._Q.values(np.concatenate((matlib.repmat(state, self._n_actions, 1), action_vec), 1))
def _multiple_q_values(self, state):
if len(self._actions.shape) > 1:
action_vec = self._actions
else:
action_vec = self._actions[:,np.newaxis]
sa_arrays = np.concatenate([np.concatenate((matlib.repmat(s, self._n_actions, 1), action_vec), 1)
for s in state])
q_values = self._Q.values(sa_arrays)
q_values = q_values.reshape(state.shape[0], self._n_actions)
return q_values # hardcoded for discrete actions!
class EpsilonGreedy(ValueBased):
"""
The epsilon-greedy policy.
The parameter epsilon defines the probability of taking a random action.
Set epsilon to zero to have a greedy policy.
"""
def __init__(self,actions,Q,epsilon):
super().__init__(actions, Q)
self.epsilon = epsilon
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self,value):
if value < 0 or value > 1:
raise AttributeError("Epsilon must be in [0,1]")
self._epsilon = value
def __call__(self, state):
probs = np.ones(self._n_actions) * self._epsilon / self._n_actions
probs[np.argmax(self._q_values(state))] += 1 - self._epsilon
return probs
def sample_action(self, state):
if
|
np.random.uniform()
|
numpy.random.uniform
|
# Python modules
# 3rd party modules
import numpy as np
import xml.etree.cElementTree as ElementTree
# Our modules
import vespa.datasim.util_datasim as util_datasim
import vespa.common.minf_parabolic_info as minf
import vespa.common.constants as common_constants
import vespa.common.mrs_experiment as mrs_experiment
import vespa.common.util.xml_ as util_xml
import vespa.common.util.generic_spectral as util_spectral
from vespa.common.constants import Deflate
from vespa.datasim.util_datasim import calc_lw
DEFAULT_MMOL_FLAGS = [False,False,False,False,False,True,False]
DEFAULT_MMOL_PPMS = [2.346,2.89,2.142,1.638,1.357,0.9,3.81]
DEFAULT_MMOL_AREAS = [0.5,0.5,1,1,1,1,6.0]
DEFAULT_MMOL_WIDTHS = [0.1575,0.1575,0.2363,0.2756,0.2756,0.3543,0.9449] # in ppm, in hz [20,20,30,35,35,45,120]
DEFAULT_MMOL_PHASES = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
DEFAULT_BASE_FLAGS = [False,False]
DEFAULT_BASE_PPMS = [4.69,1.0]
DEFAULT_BASE_AREAS = [10.0,20.0]
DEFAULT_BASE_WIDTHS = [0.3543,0.7] # damping coeff in [sec] ~ 45 Hz
DEFAULT_BASE_PHASES = [0.0,0.0]
class Datasim(object):
""" A container for simulated magnetic resonance spectroscopy data. """
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
"""
Define parameters to describe how MRS data is simulated.
"""
self.datasim_filename = ''
# Spectral parameter settings
self.dims = [2048,1,1,1]
self.frequency = 123.9 # in MHz
self.sw = 2048.0 # in Hz
self.linewidth = 3.0 # in Hz
self.resppm = 4.7 # in ppm
self.ta = 0.300 # in sec - only for Tab LW display calc, Metab Ta vals control indiv T2
self.tb = 0.105 # in sec - controls T2* globally
self.phase0 = 0.0 # in deg
self.phase1 = 0.0 # in deg
self.phase_1_pivot = 4.7 # in ppm
self.b0shift = 0.0 # in Hz
self.left_shift = 0 # in points of FID
self.zero_fill_multiplier = 1.0 # placeholder for completeness, not read or saved
self.echopeak = 0.0 # placeholder for completeness, not read or saved
self.comment = ''
# simulated metabolite signal basis settings
self.loop = [0,0,0] # selected loop indices
self.experiment = None
self.mets_flags = None
self.mets_scales = None
self.mets_decays = None
self.mets_ppm_start = self.pts2ppm(self.dims[0]-1)
self.mets_ppm_end = self.pts2ppm(0)
# macromolecule signal contributions
self.mmol_flags = np.array(DEFAULT_MMOL_FLAGS)
self.mmol_ppms = np.array(DEFAULT_MMOL_PPMS)
self.mmol_areas =
|
np.array(DEFAULT_MMOL_AREAS)
|
numpy.array
|
import time
import unittest
import gym
import numpy as np
from tqdm import tqdm
class Efficiency(unittest.TestCase):
boardsize = 9
iterations = 64
def setUp(self) -> None:
self.env = gym.make('gym_go:go-v0', size=self.boardsize, reward_method='real')
def testOrderedTrajs(self):
durs = []
for _ in tqdm(range(self.iterations)):
start = time.time()
self.env.reset()
for a in range(self.boardsize ** 2 - 2):
self.env.step(a)
end = time.time()
dur = end - start
durs.append(dur)
avg_time =
|
np.mean(durs)
|
numpy.mean
|
# Scalar Quantized Matrix implementation
# This file contains an improved implementation of Scalar Quantization matrix based on columns.
# This implementations avoid the excesive computation of previous implementation by computing all levels of quantization at once.
import numpy as np
class QMatrix:
# Construction
# Parameters:
# - func: Compute matrix-vector product with the matrix to quantize
# - m, n: Problem size
# - bit_list: Number of bits to use in each level
# - norm_tol: A stopping tolerance for Frobenious norm
def __init__(self, func, m, n, bit_list=[4, 4], norm_tol=1e-5):
# Problem size
self.m = m
self.n = n
# Number of packs
self.p = n//8
# Number of levels
self.n_levels = 0
# List of diagonal scaling matrices
self.D_list = []
# Frobenius norm of the quantized matrix, the original matrix and the approximation
self.qmat_norm = 0
self.mat_norm = 0
self.error_norm = 0
# Bit list
self.bit_list = []
# Shift list
self.S_list = []
# List of compression matrices
self.M_list = []
# Fill initial data of hypothetic number of levels
self.n_levels = len(bit_list)
for i in range(self.n_levels):
self.D_list.append(np.zeros(n))
self.bit_list.append(bit_list[i])
self.S_list.append( 2**(bit_list[i]-1)-1 )
self.M_list.append( np.zeros((self.m, self.p, self.bit_list[i]), dtype=np.uint8) )
# List of norms to track progress
qmat_norm_list = self.n_levels*[0]
self.error_norm_list = self.n_levels*[0]
# Canonical vector
e = np.zeros(n)
# Counter for number of columns, current pack and a matrix to compress
count_cols = 0
current_pack = 0
data_to_compress = []
for i in range(self.n_levels):
data_to_compress.append(np.zeros((self.m, 8, self.bit_list[i]), dtype=np.uint8))
# Iteration to quantize each column
for k in range(n):
# Get the k-th column
e[k] = 1
aux = func(e)
e[k] = 0
# Sum vector for norm computation
sum_aux = np.zeros(m)
# Increase columns processed
count_cols += 1
# Compute norm of original matrix
self.mat_norm += np.linalg.norm(aux)**2
# Compute vectors for each level
for i in range(self.n_levels):
current_aux = np.zeros(m, dtype=np.uint8)
# Get max value
P = np.max(np.abs(aux))
# Discard small values
if not np.allclose(P, 0):
coef = P/self.S_list[i]
current_aux = np.rint(aux/coef + self.S_list[i])
self.D_list[i][k] = coef
# Compute norms
approx_aux = self.D_list[i][k]*(current_aux - self.S_list[i])
sum_aux = sum_aux + approx_aux
qmat_norm_list[i] = qmat_norm_list[i] + np.linalg.norm(sum_aux)**2
error_col = aux - approx_aux
self.error_norm_list[i] = self.error_norm_list[i] + np.linalg.norm(error_col)**2
# Compress the data
current_aux = current_aux.reshape((current_aux.shape[0], 1)).astype(np.uint8)
# Get the binary representation
M = np.unpackbits(current_aux, axis=1)[:, -self.bit_list[i]:]
data_to_compress[i][:, count_cols-1, :] = M
# The error is the new aux to quantize
aux = error_col.copy()
# Compres if we have already 8 columns
if count_cols == 8:
count_cols = 0
for i in range(self.n_levels):
self.M_list[i][:, current_pack, :] = np.packbits(data_to_compress[i], axis=1)[:,0,:]
current_pack += 1
# Evaluate the achieved norm
self.mat_norm = np.sqrt(self.mat_norm)
idx = self.n_levels
for i in range(self.n_levels):
if np.sqrt(self.error_norm_list[i])/self.mat_norm < norm_tol:
idx = i+1
break
# Get the number of element that achieves the norm
if idx != self.n_levels:
self.n_levels = idx
self.D_list = self.D_list[:idx]
self.bit_list = self.bit_list[:idx]
self.S_list = self.S_list[:idx]
self.M_list = self.M_list[:idx]
self.error_norm_list = np.sqrt(np.array(self.error_norm_list))
self.error_norm = self.error_norm_list[idx-1]
self.qmat_norm = np.sqrt(qmat_norm_list[idx-1])
# Computes J^T * v. Vector v size is m
def tdot(self, vector, nlayers = None):
y = np.zeros(self.n)
s = np.sum(vector)
if nlayers == None:
nlayers = self.n_levels
for i in range(nlayers):
aux = np.zeros(self.n)
for k in range(self.p):
H = np.unpackbits(self.M_list[i][:, k, :], axis=1).T
r = np.dot(H, vector)
for j in range(self.bit_list[i]):
aux[8*k:8*(k+1)] = aux[8*k:8*(k+1)] + 2**(self.bit_list[i] - 1 - j) * r[8*j:8*(j+1)]
aux = aux - s*self.S_list[i]
aux = ( self.D_list[i] * aux)
y = y + aux
return y
# Computes J*v. Vector v size is n
def dot(self, vector, nlayers = None):
y = np.zeros(self.m)
if nlayers == None:
nlayers = self.n_levels
for i in range(nlayers):
Dv = self.D_list[i]*vector
s = np.sum(Dv)
aux =
|
np.zeros(self.m)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/19 20:19
# @Author : Alan
# @Email : <EMAIL>
# @File : train.py
# @Software: PyCharm
import time
import logging
import numpy as np
import tensorflow as tf
import os
import tqdm
import sys
from copy import deepcopy
stdout = sys.stdout
from data_helper import *
from model import SiameseQACNN
from model_utils import *
# 创建一个logger
logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)
# 创建一个handler,用于写入日志文件
timestamp = str(int(time.time()))
fh = logging.FileHandler('./log/log_' + timestamp +'.txt')
fh.setLevel(logging.DEBUG)
# 定义handler的输出格式
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ## %(message)s')
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# 给logger添加handler
logger.addHandler(fh)
# logger.addHandler(ch)
class NNConfig(object):
def __init__(self, embeddings=None):
# 输入问题(句子)长度
self.ques_length = 25
# 输入答案长度
self.ans_length = 90
# 循环数
self.num_epochs = 100
# batch大小
self.batch_size = 128
# 不同类型的filter,对应不同的尺寸
self.window_sizes = [1, 2, 3, 5, 7, 9]
# 隐层大小
self.hidden_size = 128
self.output_size = 128
self.keep_prob = 0.5
# 每种filter的数量
self.n_filters = 128
# margin大小
self.margin = 0.5
# 词向量大小
self.embeddings =
|
np.array(embeddings)
|
numpy.array
|
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Classes for sampling grasps.
Author: <NAME>
"""
"""
This is the PointnetGPD sampler with only GPG sampler for real-world grasping
The code is from the author's repo of PointnetGPD https://github.com/lianghongzhuo/PointNetGPD
We Used it for benchmark purpose only.
"""
from abc import ABCMeta, abstractmethod
import itertools
import logging
import numpy as np
# import os, IPython, sys
import random
import time
import scipy.stats as stats
import pcl
import scipy
from scipy.spatial.transform import Rotation as sciRotation
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
'''
try:
from mayavi import mlab
except ImportError:
mlab = None
logger.warning('Do not have mayavi installed, please set the vis to False')
'''
mlab = None
class GraspSampler:
""" Base class for various methods to sample a number of grasps on an object.
Should not be instantiated directly.
"""
__metaclass__ = ABCMeta
def __init__(self, config):
self.config = config
@abstractmethod
def sample_grasps(self, graspable, num_grasps_generate, vis, **kwargs):
"""
Create a list of candidate grasps for a given object.
Must be implemented for all grasp sampler classes.
Parameters
---------
graspable : :obj:`GraspableObject3D`
object to sample grasps on
num_grasps_generate : int
vis : bool
"""
grasp = []
return grasp
# pass
def show_points(self, point, color='lb', scale_factor=.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
elif color == 'lb': # light blue
color_f = (0.22, 1, 1)
else:
color_f = (1, 1, 1)
if point.size == 3: # vis for only one point, shape must be (3,), for shape (1, 3) is not work
point = point.reshape(3, )
mlab.points3d(point[0], point[1], point[2], color=color_f, scale_factor=scale_factor)
else: # vis for multiple points
mlab.points3d(point[:, 0], point[:, 1], point[:, 2], color=color_f, scale_factor=scale_factor)
def show_line(self, un1, un2, color='g', scale_factor=0.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
else:
color_f = (1, 1, 1)
mlab.plot3d([un1[0], un2[0]], [un1[1], un2[1]], [un1[2], un2[2]], color=color_f, tube_radius=scale_factor)
def show_grasp_norm_oneside(self, grasp_bottom_center,
grasp_normal, grasp_axis, minor_pc, scale_factor=0.001):
un2 = grasp_bottom_center
self.show_points(grasp_bottom_center, color='g', scale_factor=scale_factor * 4)
# self.show_points(un1, scale_factor=scale_factor * 4)
# self.show_points(un3, scale_factor=scale_factor * 4)
# self.show_points(un5, scale_factor=scale_factor * 4)
# self.show_line(un1, un2, color='g', scale_factor=scale_factor) # binormal/ major pc
# self.show_line(un3, un4, color='b', scale_factor=scale_factor) # minor pc
# self.show_line(un5, un6, color='r', scale_factor=scale_factor) # approach normal
mlab.quiver3d(un2[0], un2[1], un2[2], grasp_axis[0], grasp_axis[1], grasp_axis[2],
scale_factor=.03, line_width=0.25, color=(0, 1, 0), mode='arrow')
mlab.quiver3d(un2[0], un2[1], un2[2], minor_pc[0], minor_pc[1], minor_pc[2],
scale_factor=.03, line_width=0.1, color=(0, 0, 1), mode='arrow')
mlab.quiver3d(un2[0], un2[1], un2[2], grasp_normal[0], grasp_normal[1], grasp_normal[2],
scale_factor=.03, line_width=0.05, color=(1, 0, 0), mode='arrow')
def get_hand_points(self, grasp_bottom_center, approach_normal, binormal):
hh = self.config['thickness_side'] #self.gripper.hand_height
fw = self.config['thickness'] #self.gripper.finger_width
hod = self.config['gripper_width'] + 2 * self.config['thickness'] #self.gripper.hand_outer_diameter
hd = self.config['hand_height'] #self.gripper.hand_depth
open_w = hod - fw * 2
minor_pc = np.cross(approach_normal, binormal)
minor_pc = minor_pc / np.linalg.norm(minor_pc)
p5_p6 = minor_pc * hh * 0.5 + grasp_bottom_center
p7_p8 = -minor_pc * hh * 0.5 + grasp_bottom_center
p5 = -binormal * open_w * 0.5 + p5_p6
p6 = binormal * open_w * 0.5 + p5_p6
p7 = binormal * open_w * 0.5 + p7_p8
p8 = -binormal * open_w * 0.5 + p7_p8
p1 = approach_normal * hd + p5
p2 = approach_normal * hd + p6
p3 = approach_normal * hd + p7
p4 = approach_normal * hd + p8
p9 = -binormal * fw + p1
p10 = -binormal * fw + p4
p11 = -binormal * fw + p5
p12 = -binormal * fw + p8
p13 = binormal * fw + p2
p14 = binormal * fw + p3
p15 = binormal * fw + p6
p16 = binormal * fw + p7
p17 = -approach_normal * hh + p11
p18 = -approach_normal * hh + p15
p19 = -approach_normal * hh + p16
p20 = -approach_normal * hh + p12
p = np.vstack([np.array([0, 0, 0]), p1, p2, p3, p4, p5, p6, p7, p8, p9, p10,
p11, p12, p13, p14, p15, p16, p17, p18, p19, p20])
return p
def show_grasp_3d(self, hand_points, color=(0.003, 0.50196, 0.50196)):
# for i in range(1, 21):
# self.show_points(p[i])
if color == 'd':
color = (0.003, 0.50196, 0.50196)
triangles = [(9, 1, 4), (4, 9, 10), (4, 10, 8), (8, 10, 12), (1, 4, 8), (1, 5, 8),
(1, 5, 9), (5, 9, 11), (9, 10, 20), (9, 20, 17), (20, 17, 19), (17, 19, 18),
(14, 19, 18), (14, 18, 13), (3, 2, 13), (3, 13, 14), (3, 6, 7), (3, 6, 2),
(3, 14, 7), (14, 7, 16), (2, 13, 15), (2, 15, 6), (12, 20, 19), (12, 19, 16),
(15, 11, 17), (15, 17, 18), (6, 7, 8), (6, 8, 5)]
mlab.triangular_mesh(hand_points[:, 0], hand_points[:, 1], hand_points[:, 2],
triangles, color=color, opacity=0.5)
def check_collision_square(self, grasp_bottom_center, approach_normal, binormal,
minor_pc, graspable, p, way, vis=False):
approach_normal = approach_normal.reshape(1, 3)
approach_normal = approach_normal / np.linalg.norm(approach_normal)
binormal = binormal.reshape(1, 3)
binormal = binormal / np.linalg.norm(binormal)
minor_pc = minor_pc.reshape(1, 3)
minor_pc = minor_pc /
|
np.linalg.norm(minor_pc)
|
numpy.linalg.norm
|
import math
from .common import *
from torchtrainer.meters.aggregators import batch, scale
class BaseMetricsTests(unittest.TestCase):
def measure_once(self, meter, batchs):
meter.reset()
for x_batch, y_batch in batchs:
meter.measure(x_batch, y_batch)
return meter.value()
def assertMeasureEqual(self, meter, batchs, measure):
self.assertEqual(self.measure_once(meter, batchs), measure)
def assertMeasureAlmostEqual(self, meter, batchs, measure):
self.assertAlmostEqual(self.measure_once(meter, batchs), measure)
class AccuracyMetricsTests(BaseMetricsTests):
def test_classification_metter_only_checks_indices_of_maximum_value(self):
a = torch.Tensor([[1]])
t = torch.LongTensor([0])
a1 = torch.Tensor([[0.5, 0.3, 0.2]])
t1 = torch.LongTensor([0])
a2 = torch.Tensor([[-1.0, 2.0, 1.0]])
t2 = torch.LongTensor([1])
a3 = torch.Tensor([[1.0, 2.0, 3.0]])
t3 = torch.LongTensor([2])
meter = meters.CategoricalAccuracy()
self.assertMeasureEqual(meter, [(a, t)], 1.0)
for i, a in enumerate([a1, a2, a3]):
for j, t in enumerate([t1, t2, t3]):
self.assertMeasureEqual(meter, [(a, t)], 1.0 if i == j else 0.0)
def test_classification_with_k_greater_than_one_search_top_k_indices(self):
a1 = torch.Tensor([[0.5, 0.4, 0.1]])
a2 = torch.Tensor([[20, 5, 10]])
a3 = torch.Tensor([[2, 3, 5]])
t1 = torch.LongTensor([2])
t2 = torch.LongTensor([1])
t3 = torch.LongTensor([0])
meter = meters.CategoricalAccuracy(k=2)
for i, a in enumerate([a1, a2, a3]):
for j, t in enumerate([t1, t2, t3]):
self.assertMeasureEqual(meter, [(a, t)], 0.0 if j == i else 1.0)
def test_aggregators_works_over_the_batch_dimension(self):
a = torch.Tensor([[0.55, 0.45],
[-1.0, 2.0]])
t1 = torch.LongTensor([0, 0])
t2 = torch.LongTensor([0, 1])
t3 = torch.LongTensor([1, 0])
t4 = torch.LongTensor([1, 1])
meter_normalized = meters.CategoricalAccuracy(aggregator=batch.Average())
meter_sum = meters.CategoricalAccuracy(aggregator=batch.Sum())
meter_percentage = meters.CategoricalAccuracy(aggregator=scale.percentage(batch.Average()))
meter_maximum = meters.CategoricalAccuracy(aggregator=batch.Maximum())
meter_minimum = meters.CategoricalAccuracy(aggregator=batch.Minimum())
self.assertMeasureAlmostEqual(meter_normalized, [(a, t1)], 1/2)
self.assertMeasureAlmostEqual(meter_normalized, [(a, t2)], 1)
self.assertMeasureAlmostEqual(meter_normalized, [(a, t3)], 0)
self.assertMeasureAlmostEqual(meter_normalized, [(a, t4)], 1/2)
self.assertMeasureAlmostEqual(meter_sum, [(a, t1)], 1)
self.assertMeasureAlmostEqual(meter_sum, [(a, t2)], 2)
self.assertMeasureAlmostEqual(meter_sum, [(a, t3)], 0)
self.assertMeasureAlmostEqual(meter_sum, [(a, t4)], 1)
self.assertMeasureAlmostEqual(meter_percentage, [(a, t1)], 50.0)
self.assertMeasureAlmostEqual(meter_percentage, [(a, t2)], 100.0)
self.assertMeasureAlmostEqual(meter_percentage, [(a, t3)], 0.0)
self.assertMeasureAlmostEqual(meter_percentage, [(a, t4)], 50.0)
self.assertMeasureAlmostEqual(meter_maximum, [(a, t1)], 1)
self.assertMeasureAlmostEqual(meter_maximum, [(a, t2)], 1)
self.assertMeasureAlmostEqual(meter_maximum, [(a, t3)], 0)
self.assertMeasureAlmostEqual(meter_maximum, [(a, t4)], 1)
self.assertMeasureAlmostEqual(meter_minimum, [(a, t1)], 0)
self.assertMeasureAlmostEqual(meter_minimum, [(a, t2)], 1)
self.assertMeasureAlmostEqual(meter_minimum, [(a, t3)], 0)
self.assertMeasureAlmostEqual(meter_minimum, [(a, t4)], 0)
def test_cannot_measure_with_1d_tensors(self):
a = torch.Tensor([0.1])
t = torch.LongTensor([0])
meter = meters.CategoricalAccuracy()
try:
meter.measure(a,t)
self.fail()
except ValueError as e:
self.assertEqual(str(e), meter.INVALID_BATCH_DIMENSION_MESSAGE)
def test_cannot_measure_with_different_number_of_classes(self):
a = torch.Tensor([[0.1]])
b = torch.LongTensor([0, 0])
meter = meters.CategoricalAccuracy()
try:
meter.measure(a, b)
self.fail()
except ValueError as e:
self.assertEqual(str(e), meter.INVALID_BATCH_DIMENSION_MESSAGE)
def test_cannot_measure_inputs_other_than_tensors(self):
a = torch.Tensor([[0.1]])
t = torch.FloatTensor([0])
meter = meters.CategoricalAccuracy()
try:
meter.measure(a, t)
self.fail()
except TypeError as e:
self.assertEqual(str(e), meter.INVALID_INPUT_TYPE_MESSAGE)
def test_size_average_option_average_results_over_the_batch_dimension_on_multiples_passes(self):
a1 = torch.Tensor([[0.5, 0.3, 0.2],
[-1.0, 2.0, 1.0]])
a2 = torch.Tensor([[1.0, 2.0, 3.0]])
t1 = torch.LongTensor([0, 0])
t2 = torch.LongTensor([2])
meter_normalized = meters.CategoricalAccuracy(aggregator=batch.Average())
self.assertMeasureAlmostEqual(meter_normalized, [(a1, t1), (a2, t2)], 2/3)
def test_cannot_get_value_with_no_measures(self):
meter = meters.CategoricalAccuracy()
try:
meter.value()
self.fail()
except meters.ZeroMeasurementsError as e:
pass
def test_binary_meters_with_incresing_threholds_change_region_decision(self):
a1 = torch.Tensor([0.3])
a2 = torch.Tensor([0.5])
a3 = torch.Tensor([0.7])
t1 = torch.LongTensor([1])
t2 = torch.LongTensor([0])
meter_th_p2 = meters.BinaryAccuracy(aggregator=batch.Average(), threshold=0.2)
meter_th_p5 = meters.BinaryAccuracy(aggregator=batch.Average(), threshold=0.5)
meter_th_p8 = meters.BinaryAccuracy(aggregator=batch.Average(), threshold=0.8)
self.assertMeasureEqual(meter_th_p2, [(a1, t1)], 1.0)
self.assertMeasureEqual(meter_th_p5, [(a1, t1)], 0.0)
self.assertMeasureEqual(meter_th_p8, [(a1, t1)], 0.0)
self.assertMeasureEqual(meter_th_p2, [(a2, t1)], 1.0)
self.assertMeasureEqual(meter_th_p5, [(a2, t1)], 1.0)
self.assertMeasureEqual(meter_th_p8, [(a2, t1)], 0.0)
self.assertMeasureEqual(meter_th_p2, [(a3, t2)], 0.0)
self.assertMeasureEqual(meter_th_p5, [(a3, t2)], 0.0)
self.assertMeasureEqual(meter_th_p8, [(a3, t2)], 1.0)
def test_binary_accuracy_with_logits_applies_activation_applies_activation_before_regiion_decision(self):
a1 = torch.Tensor([-1])
a2 = torch.Tensor([0])
a3 = torch.Tensor([1])
t1 = torch.LongTensor([1])
t2 = torch.LongTensor([0])
meter_th_p2 = meters.BinaryWithLogitsAccuracy(aggregator=batch.Average(), threshold=0.2)
meter_th_p5 = meters.BinaryWithLogitsAccuracy(aggregator=batch.Average(), threshold=0.5)
meter_th_p8 = meters.BinaryWithLogitsAccuracy(aggregator=batch.Average(), threshold=0.8)
self.assertMeasureEqual(meter_th_p2, [(a1, t1)], 1.0)
self.assertMeasureEqual(meter_th_p5, [(a1, t1)], 0.0)
self.assertMeasureEqual(meter_th_p8, [(a1, t1)], 0.0)
self.assertMeasureEqual(meter_th_p2, [(a2, t1)], 1.0)
self.assertMeasureEqual(meter_th_p5, [(a2, t1)], 1.0)
self.assertMeasureEqual(meter_th_p8, [(a2, t1)], 0.0)
self.assertMeasureEqual(meter_th_p2, [(a3, t2)], 0.0)
self.assertMeasureEqual(meter_th_p5, [(a3, t2)], 0.0)
self.assertMeasureEqual(meter_th_p8, [(a3, t2)], 1.0)
def test_binary_accuracy_with_custom_activation_applies_that_activation(self):
a1 = torch.Tensor([-1])
a2 = torch.Tensor([0])
a3 = torch.Tensor([1])
t1 = torch.LongTensor([1])
t2 = torch.LongTensor([0])
meter_th_p2 = meters.BinaryWithLogitsAccuracy(aggregator=batch.Average(), threshold=-0.8, activation=nn.Tanh())
meter_th_p5 = meters.BinaryWithLogitsAccuracy(aggregator=batch.Average(), threshold=0, activation=nn.Tanh())
meter_th_p8 = meters.BinaryWithLogitsAccuracy(aggregator=batch.Average(), threshold=0.8, activation=nn.Tanh())
self.assertMeasureEqual(meter_th_p2, [(a1, t1)], 1.0)
self.assertMeasureEqual(meter_th_p5, [(a1, t1)], 0.0)
self.assertMeasureEqual(meter_th_p8, [(a1, t1)], 0.0)
self.assertMeasureEqual(meter_th_p2, [(a2, t1)], 1.0)
self.assertMeasureEqual(meter_th_p5, [(a2, t1)], 1.0)
self.assertMeasureEqual(meter_th_p8, [(a2, t1)], 0.0)
self.assertMeasureEqual(meter_th_p2, [(a3, t2)], 0.0)
self.assertMeasureEqual(meter_th_p5, [(a3, t2)], 0.0)
self.assertMeasureEqual(meter_th_p8, [(a3, t2)], 1.0)
class MSETests(BaseMetricsTests):
def test_meter_measure_is_the_square_of_the_difference(self):
meter = meters.MSE()
sqrt_meter = meters.RMSE()
self.assertMeasureEqual(meter, [(torch.ones(1,1), torch.ones(1,1))], 0)
self.assertMeasureEqual(meter, [(torch.ones(1,1), torch.zeros(1,1))], 1)
self.assertMeasureEqual(meter, [(torch.zeros(1,1), torch.ones(1,1))], 1)
self.assertMeasureEqual(meter, [(-torch.ones(1,2), torch.zeros(1,2))], 2/2)
self.assertMeasureEqual(meter, [(torch.zeros(1,2), -torch.ones(1,2))], 2/2)
self.assertMeasureEqual(meter, [(2*torch.ones(1,2), torch.zeros(1,2))], 2**2*2/2)
self.assertMeasureEqual(meter, [(torch.zeros(1,2), 2*torch.ones(1,2))], 2**2*2/2)
self.assertMeasureEqual(meter, [(-2*torch.ones(1,2), torch.zeros(1,2))], 2**2*2/2)
self.assertMeasureEqual(meter, [(torch.zeros(1,2), -2*torch.ones(1,2))], 2**2*2/2)
self.assertMeasureAlmostEqual(sqrt_meter, [(torch.zeros(1,2), -2*torch.ones(1,2))], math.sqrt(2**2*2/2))
def test_cannot_measure_with_1d_tensors(self):
a = torch.Tensor([0.2])
t = torch.Tensor([0.1])
meter = meters.MSE()
try:
meter.measure(a,t)
self.fail()
except ValueError as e:
self.assertEqual(str(e), meter.INVALID_BATCH_DIMENSION_MESSAGE)
def test_cannot_measure_with_different_shape_tensors(self):
a = torch.Tensor([[0.2, 0.1]])
t = torch.Tensor([[0.1]])
meter = meters.MSE()
try:
meter.measure(a,t)
self.fail()
except ValueError as e:
self.assertEqual(str(e), meter.INVALID_BATCH_DIMENSION_MESSAGE)
def test_meter_value_average_over_batch_dimention(self):
meter = meters.MSE()
sqrt_meter = meters.RMSE()
msle_meter = meters.MSLE()
rmsle_meter = meters.RMSLE()
self.assertMeasureEqual(meter, [(torch.ones(2,1), torch.zeros(2,1))], 1)
self.assertMeasureEqual(meter, [(torch.zeros(2,1), torch.ones(2,1))], 1)
self.assertMeasureEqual(meter, [(2*torch.ones(2,1), torch.zeros(2,1))], 4)
self.assertMeasureEqual(meter, [(torch.zeros(2,1), 2*torch.ones(2,1))], 4)
self.assertMeasureEqual(meter, [(torch.zeros(2,1), 2*torch.ones(2,1))], 4)
self.assertMeasureAlmostEqual(meter, [(torch.arange(0, 3).float().view(3, 1), torch.arange(3, 6).float().view(3, 1))], 3**2)
self.assertMeasureAlmostEqual(sqrt_meter, [(torch.arange(0, 3).float().view(3, 1), torch.arange(3, 6).float().view(3, 1))], 3)
self.assertMeasureAlmostEqual(msle_meter, [(torch.Tensor([[math.exp(2)-1, math.exp(1)-1]]),
torch.Tensor([[math.exp(4)-1, math.exp(2)-1]]))],
((2-4)**2 + (1-2)**2) / 2)
self.assertMeasureAlmostEqual(rmsle_meter, [(torch.Tensor([[math.exp(2)-1, math.exp(1)-1]]),
torch.Tensor([[math.exp(4)-1, math.exp(2)-1]]))],
math.sqrt(((2-4)**2 + (1-2)**2)/2))
def test_meter_value_average_over_sum_of_measured_batch_dimentions(self):
meter = meters.MSE()
sqrt_meter = meters.RMSE()
self.assertMeasureAlmostEqual(meter, [(torch.ones(2,1), torch.zeros(2,1)),
(2*torch.ones(2,1), torch.zeros(2,1))],
(2*1**2 + 2*2**2)/4)
self.assertMeasureAlmostEqual(sqrt_meter, [(torch.ones(2,1), torch.zeros(2,1)),
(2*torch.ones(2,1), torch.zeros(2,1))],
math.sqrt((2*1**2 + 2*2**2)/4))
def test_cannot_measure_with_different_type_of_tensors(self):
import numpy as np
a = [[0.2]]
meter = meters.MSE()
try:
meter.measure(
|
np.array(a)
|
numpy.array
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 qizai <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
This script will take the bedgraph file as input, and process it to create the output
binning intensity.
"""
import os
from pyBedGraph import BedGraph
import numpy as np
import pandas as pd
import scipy
from scipy.stats import binom_test
import ipdb
import argparse
import matplotlib.pyplot as plt
import matplotlib as mpl
def get_max_intensity_in_same_len_bins(bedGraph, nbins, left_start, chrom_left, right_end,
chrom_right=None, chrom_size = np.infty, flank_per=5):
'''
if chrom_right != None, then check if chrom_left == chrom_right.
pyBedGraph can only query [chr, start, end] tuple.
----
left_start: left anchor starting site
right_end: right anchor ending site
nbins: number of bins in the loop
flank_per: percent of loop length to extend on both side.
'''
if chrom_right != None:
if chrom_left != chrom_right:
raise ValueError('row has anchors in different chromosome {}, {}'.format(chrom_left,
left_start))
loop_length = right_end - left_start
assert loop_length > 0
flank_length = int(loop_length * flank_per / 100)
start_idx = max(left_start - flank_length, 0)
# ipdb.set_trace()
end_idx = min(right_end + flank_length, chrom_size.values[0] - 1)
if start_idx < 0 or start_idx > chrom_size.values[0] - 1:
ipdb.set_trace()
nbins_edges = np.linspace(start_idx, end_idx, nbins + 1, dtype=np.int32)
start_list = nbins_edges[:-1]
end_list = nbins_edges[1:]
try:
bin_values = bedGraph.stats(start_list=start_list,
end_list=end_list,
chrom_name=chrom_left,
stat='max')
except:
print(chrom_left)
print(end_list)
print(start_idx)
ipdb.set_trace()
return bin_values
def get_aggregated_inten_for_each_class(df_binned_intensity_per_loop, nbins, catag):
'''
nbins \in {100, 500, 1000}
catag \in {'bias', 'convergence', 'NULL motif'}
'''
bin_name = '{} binned intensity'.format(nbins)
set_of_label = set(df_binned_intensity_per_loop[catag])
label_list = list([x for x in set_of_label if x != 'na'])
label_list.sort()
total_num_loops_in_catag = (
df_binned_intensity_per_loop[catag] != 'na').sum()
chrom_list = list(set(df_binned_intensity_per_loop['chrom']))
chrom_list.sort(key=lambda x: int(x[3:]) if x != 'chrX' else 24)
chrom_list.append('whole genome')
df_aggregate_sum = pd.DataFrame(columns=label_list, index=chrom_list)
df_aggregate_mean = pd.DataFrame(columns=label_list, index=chrom_list)
df_aggregate_var = pd.DataFrame(columns=label_list, index=chrom_list)
for label in label_list:
label_loop_idx = (df_binned_intensity_per_loop[catag] == label)
for chrom in chrom_list[:-1]:
# avoid whole genome.
chrom_loop_idx = (df_binned_intensity_per_loop['chrom'] == chrom)
tmp_df = df_binned_intensity_per_loop.loc[chrom_loop_idx &
label_loop_idx]
sum_of_intensity = tmp_df[bin_name].sum()
mean_of_intensity = tmp_df[bin_name].mean()
var_of_intensity =
|
np.stack(tmp_df[bin_name])
|
numpy.stack
|
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.stats as ss
from scipy import optimize
from .. import inference as ifr
from .. import families as fam
from .. import output as op
from .. import tsm as tsm
from .. import data_check as dc
from .. import covariances as cov
from .. import results as res
from .. import gas as gas
from .kalman import *
from .llm import *
class NLLEV(tsm.TSM):
""" Inherits time series methods from TSM class.
**** NON-GAUSSIAN LOCAL LEVEL MODEL ****
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
integ : int (default : 0)
Specifies how many times to difference the time series.
target : str (pd.DataFrame) or int (np.array)
Specifies which column name or array index to use. By default, first
column/array will be selected as the dependent variable.
family :
e.g. pf.Normal(0,1)
"""
def __init__(self, data, family, integ=0, target=None):
# Initialize TSM object
super(NLLEV,self).__init__('NLLEV')
# Latent Variables
self.integ = integ
self.target = target
self.max_lag = 0
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
self.state_no = 1
# Format the data
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data = self.data.astype(np.float)
self.data_original = self.data
# Difference data
X = self.data
for order in range(self.integ):
X = np.diff(X)
self.data_name = "Differenced " + self.data_name
self.data = X
self.cutoff = 0
self.data_length = self.data.shape[0]
self._create_latent_variables()
self.family = family
self.model_name2, self.link, self.scale, self.shape, self.skewness, self.mean_transform, self.cythonized = self.family.setup()
self.model_name = self.model_name2 + " Local Level Model"
# Build any remaining latent variables that are specific to the family chosen
for no, i in enumerate(self.family.build_latent_variables()):
self.latent_variables.add_z(i[0],i[1],i[2])
self.latent_variables.z_list[no+1].start = i[3]
self.z_no = len(self.latent_variables.z_list)
def _get_scale_and_shape(self, parm):
""" Obtains appropriate model scale and shape latent variables
Parameters
----------
parm : np.array
Transformed latent variables vector
Returns
----------
None (changes model attributes)
"""
if self.scale is True:
if self.shape is True:
model_shape = parm[-1]
model_scale = parm[-2]
else:
model_shape = 0
model_scale = parm[-1]
else:
model_scale = 0
model_shape = 0
if self.skewness is True:
model_skewness = parm[-3]
else:
model_skewness = 0
return model_scale, model_shape, model_skewness
def neg_loglik(self, beta):
""" Creates negative loglikelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- Negative loglikelihood
"""
states = np.zeros([self.state_no, self.data_length])
states[0,:] = beta[self.z_no:self.z_no+self.data_length]
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) # transformed distribution parameters
scale, shape, skewness = self._get_scale_and_shape(parm)
return self.state_likelihood(beta, states) + self.family.neg_loglikelihood(self.data, self.link(states[0,:]), scale, shape, skewness) # negative loglikelihood for model
def likelihood_markov_blanket(self, beta):
""" Creates likelihood markov blanket of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- Negative loglikelihood
"""
states = beta[self.z_no:self.z_no+self.data_length] # the local level (untransformed)
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) # transformed distribution parameters
scale, shape, skewness = self._get_scale_and_shape(parm)
return self.family.markov_blanket(self.data, self.link(states), scale, shape, skewness) # negative loglikelihood for model
def state_likelihood(self, beta, alpha):
""" Returns likelihood of the states given the variance latent variables
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
alpha : np.array
State matrix
Returns
----------
State likelihood
"""
_, _, _, Q = self._ss_matrices(beta)
residuals = alpha[0][1:]-alpha[0][:-1]
return np.sum(ss.norm.logpdf(residuals, loc=0, scale=np.power(Q.ravel(),0.5)))
def state_likelihood_markov_blanket(self,beta,alpha,col_no):
""" Returns Markov blanket of the states given the variance latent variables
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
alpha : np.array
State matrix
Returns
----------
State likelihood
"""
_, _, _, Q = self._ss_matrices(beta)
blanket = np.append(0, ss.norm.logpdf(alpha[col_no][1:]-alpha[col_no][:-1], loc=0, scale=np.sqrt(Q[col_no][col_no])))
blanket[:-1] = blanket[:-1] + blanket[1:]
return blanket
def neg_logposterior(self, beta):
""" Returns negative log posterior
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
alpha : np.array
State matrix
Returns
----------
- Negative log posterior
"""
post = self.neg_loglik(beta)
for k in range(0,self.z_no):
post += -self.latent_variables.z_list[k].prior.logpdf(beta[k])
return post
def markov_blanket(self, beta, alpha):
""" Creates total Markov blanket for states
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
alpha : np.array
A vector of states
Returns
----------
Markov blanket for states
"""
likelihood_blanket = self.likelihood_markov_blanket(beta)
state_blanket = self.state_likelihood_markov_blanket(beta,alpha,0)
for i in range(self.state_no-1):
likelihood_blanket = np.append(likelihood_blanket,self.likelihood_markov_blanket(beta))
state_blanket = np.append(state_blanket,self.state_likelihood_markov_blanket(beta,alpha,i+1))
return likelihood_blanket + state_blanket
def evo_blanket(self, beta, alpha):
""" Creates Markov blanket for the variance latent variables
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
alpha : np.array
A vector of states
Returns
----------
Markov blanket for variance latent variables
"""
# Markov blanket for each state
evo_blanket = np.zeros(self.state_no)
for i in range(evo_blanket.shape[0]):
evo_blanket[i] = self.state_likelihood_markov_blanket(beta, alpha, i).sum()
# If the family has additional parameters, add their markov blankets
if self.z_no > 1:
evo_blanket = np.append([self.likelihood_markov_blanket(beta).sum()]*(self.z_no-1),evo_blanket)
return evo_blanket
def log_p_blanket(self, beta):
""" Creates complete Markov blanket for latent variables
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
Markov blanket for latent variables
"""
states = np.zeros([self.state_no, self.data_length])
for state_i in range(self.state_no):
states[state_i,:] = beta[(self.z_no + (self.data_length*state_i)):(self.z_no + (self.data_length*(state_i+1)))]
return np.append(self.evo_blanket(beta,states),self.markov_blanket(beta,states))
def _animate_bbvi(self,stored_latent_variables,stored_predictive_likelihood):
""" Produces animated plot of BBVI optimization
Returns
----------
None (changes model attributes)
"""
from matplotlib.animation import FuncAnimation, writers
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = BBVINLLMAnimate(ax,self.data,stored_latent_variables,self.index,self.z_no,self.link)
anim = FuncAnimation(fig, ud, frames=np.arange(stored_latent_variables.shape[0]), init_func=ud.init,
interval=10, blit=True)
plt.plot(self.data)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Sigma^2 level', fam.Flat(transform='exp'), fam.Normal(0,3))
def _model(self,data,beta):
""" Creates the structure of the model
Parameters
----------
data : np.array
Contains the time series
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
a,P,K,F,v : np.array
Filted states, filtered variances, Kalman gains, F matrix, residuals
"""
T, Z, R, Q, H = self._ss_matrices(beta)
return univariate_kalman(data,Z,H,T,Q,R,0.0)
def _preoptimize_model(self):
""" Preoptimizes the model by estimating a Gaussian state space models
Returns
----------
- Gaussian model latent variable object
"""
gaussian_model = LLEV(self.data, integ=self.integ, target=self.target)
gaussian_model.fit()
self.latent_variables.z_list[0].start = gaussian_model.latent_variables.get_z_values()[1]
if self.model_name2 == 't':
def temp_function(params):
return -np.sum(ss.t.logpdf(x=self.data, df=np.exp(params[0]),
loc=np.ones(self.data.shape[0])*params[1], scale=np.exp(params[2])))
p = optimize.minimize(temp_function,np.array([2.0,0.0,-1.0]),method='L-BFGS-B')
self.latent_variables.z_list[1].start = p.x[2]
self.latent_variables.z_list[2].start = p.x[0]
elif self.model_name2 == 'Skewt':
def temp_function(params):
return -np.sum(fam.Skewt.logpdf_internal(x=self.data,df=np.exp(params[0]),
loc=np.ones(self.data.shape[0])*params[1], scale=np.exp(params[2]),gamma=np.exp(params[3])))
p = optimize.minimize(temp_function,np.array([2.0,0.0,-1.0,0.0]),method='L-BFGS-B')
self.latent_variables.z_list[1].start = p.x[3]
self.latent_variables.z_list[2].start = p.x[2]
self.latent_variables.z_list[3].start = p.x[0]
return gaussian_model.latent_variables
def _ss_matrices(self, beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(1)
R = np.identity(1)
Z = np.identity(1)
Q = np.identity(1)*self.latent_variables.z_list[0].prior.transform(beta[0])
return T, Z, R, Q
def _general_approximating_model(self,beta,T,Z,R,Q,h_approx):
""" Creates simplest kind of approximating Gaussian model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
Value to use for the H matrix
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(self.data_length)*h_approx
mu = np.zeros(self.data_length)
return H, mu
def fit(self, optimizer='RMSProp', iterations=1000, print_progress=True, start_diffuse=False, **kwargs):
""" Fits the model
Parameters
----------
optimizer : string
Stochastic optimizer: either RMSProp or ADAM.
iterations: int
How many iterations to run
print_progress : bool
Whether tp print the ELBO progress or not
start_diffuse : bool
Whether to start from diffuse values (if not: use approx Gaussian)
Returns
----------
BBVI fit object
"""
return self._bbvi_fit(optimizer=optimizer,print_progress=print_progress,
start_diffuse=start_diffuse,iterations=iterations,**kwargs)
def initialize_approx_dist(self, phi, start_diffuse, gaussian_latents):
""" Initializes the appoximate distibution for the model
Parameters
----------
phi : np.ndarray
Latent variables
start_diffuse: boolean
Whether to start from diffuse values or not
gaussian_latents: LatentVariables object
Latent variables for the Gaussian approximation
Returns
----------
BBVI fit object
"""
# Starting values for approximate distribution
for i in range(len(self.latent_variables.z_list)):
approx_dist = self.latent_variables.z_list[i].q
if isinstance(approx_dist, fam.Normal):
self.latent_variables.z_list[i].q.mu0 = phi[i]
self.latent_variables.z_list[i].q.sigma0 = np.exp(-3.0)
q_list = [k.q for k in self.latent_variables.z_list]
# Get starting values for states
T, Z, R, Q = self._ss_matrices(phi)
H, mu = self.family.approximating_model(phi, T, Z, R, Q, gaussian_latents.get_z_values(transformed=True)[0], self.data)
a, V = self.smoothed_state(self.data, phi, H, mu)
V[0][0][0] = V[0][0][-1]
for item in range(self.data_length):
if start_diffuse is False:
q_list.append(fam.Normal(a[0][item], np.sqrt(np.abs(V[0][0][item]))))
else:
q_list.append(fam.Normal(self.family.itransform(
|
np.mean(self.data)
|
numpy.mean
|
#!/usr/bin/env python3
#coding=utf-8
#includes
import numpy as np
import pandas as pd
from evaluation import evaluate,remove_wrong_words,greedy_action
# constants
TERMO_POSSIBLE_WORDS_PATH = "docs/termo/all_possibles_words.txt"
TERMO_ALL_WORDS_PATH = "docs/termo/all_words.txt"
TERMO_INITIAL_SOLUTION_PATH = "results/termo/initial_rank_termo_words.csv"
def generate_initial_evaluate_list():
with open(TERMO_POSSIBLE_WORDS_PATH) as f:
possi_words_list = [line.rstrip('\n') for line in f]
possi_words = np.array(possi_words_list)
with open(TERMO_ALL_WORDS_PATH) as f:
all_words_list = [line.rstrip('\n') for line in f]
all_words = np.array(all_words_list)
all_words = np.append(all_words,possi_words)
# tranform dataframe to csv file#
rank_words = evaluate(possi_words,all_words)
rank_words = rank_words.sort_values(by=['good_word_prob'],ascending=False) #sort values
rank_words.to_csv(TERMO_INITIAL_SOLUTION_PATH,index=False)
print(rank_words)
def main():
with open(TERMO_POSSIBLE_WORDS_PATH) as f:
possible_solutions_list = [line.rstrip('\n') for line in f]
possible_solutions = np.array(possible_solutions_list)
with open(TERMO_ALL_WORDS_PATH) as f:
all_words_list = [line.rstrip('\n') for line in f]
all_words =
|
np.array(all_words_list)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Testing suite for ParamSpatial.
"""
from __future__ import print_function, division
import unittest as ut
import numpy as np
import numpy.testing as npt
import scipy.linalg as scl
from bekk import ParamSpatial
class ParamSpatialSpatialTestCase(ut.TestCase):
"""Test spatial parameter class."""
def test_get_weight(self):
"""Test construction of spatial weights from groups.
"""
groups = [[(0, 1), (2, 3)]]
weight = ParamSpatial.get_weight(groups=groups)
weight_exp = np.zeros((1, 4, 4))
weight_exp[0, 0, 1] = 1
weight_exp[0, 1, 0] = 1
weight_exp[0, 2, 3] = 1
weight_exp[0, 3, 2] = 1
npt.assert_almost_equal(weight, weight_exp)
groups = [[(0, 1, 2)]]
weight = ParamSpatial.get_weight(groups=groups)
weight_exp = np.array([[[0, .5, .5], [.5, 0, .5], [.5, .5, 0]]])
npt.assert_almost_equal(weight, weight_exp)
groups = [[(0, 1), (2, 3)], [(0, 2), (1, 3)]]
weight = ParamSpatial.get_weight(groups=groups)
weight_exp = np.zeros((len(groups), 4, 4))
weight_exp[0, :2, :2] = np.array([[0, 1], [1, 0]])
weight_exp[0, 2:, 2:] = np.array([[0, 1], [1, 0]])
weight_exp[1, 0:3:2, 0:3:2] = np.array([[0, 1], [1, 0]])
weight_exp[1, 1:4:2, 1:4:2] = np.array([[0, 1], [1, 0]])
npt.assert_almost_equal(weight, weight_exp)
groups = [[(0, 1), (2, 3, 4)]]
weight = ParamSpatial.get_weight(groups=groups)
weight_exp = np.zeros((len(groups), 5, 5))
weight_exp[0, :2, :2] = np.array([[0, 1], [1, 0]])
weight_exp[0, 2:, 2:] = np.array([[[0, .5, .5], [.5, 0, .5],
[.5, .5, 0]]])
npt.assert_almost_equal(weight, weight_exp)
def test_init_empty(self):
"""Test spatial specification."""
nstocks = 3
param = ParamSpatial(nstocks=nstocks)
self.assertEqual(param.amat.shape, (nstocks, nstocks))
self.assertEqual(param.bmat.shape, (nstocks, nstocks))
self.assertEqual(param.cmat.shape, (nstocks, nstocks))
self.assertEqual(param.avecs.shape, (2, nstocks))
self.assertEqual(param.bvecs.shape, (2, nstocks))
self.assertEqual(param.dvecs.shape, (2, nstocks))
self.assertEqual(param.weights.shape, (1, nstocks, nstocks))
def test_from_abdv(self):
"""Test spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
weights = ParamSpatial.get_weight(groups)
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs,
dvecs=dvecs, groups=groups)
amat = np.diag(avecs[0]) + np.diag(avecs[0]).dot(weights[0])
bmat = np.diag(bvecs[0]) + np.diag(bvecs[0]).dot(weights[0])
dmat = np.eye(nstocks) - np.diag(dvecs[1]).dot(weights[0])
dmat_inv = scl.inv(dmat)
ccmat = dmat_inv.dot(np.diag(dvecs[0])).dot(dmat_inv)
cmat = scl.cholesky(ccmat, 1)
npt.assert_array_equal(amat, param.amat)
npt.assert_array_equal(bmat, param.bmat)
npt.assert_array_equal(cmat, param.cmat)
npt.assert_array_equal(avecs, param.avecs)
npt.assert_array_equal(bvecs, param.bvecs)
npt.assert_array_equal(dvecs, param.dvecs)
npt.assert_array_equal(weights, param.weights)
mats = ParamSpatial.from_vecs_to_mat(avecs=avecs, bvecs=bvecs,
dvecs=dvecs, weights=weights)
amat_new, bmat_new, dmat_new = mats
npt.assert_array_equal(amat, amat_new)
npt.assert_array_equal(bmat, bmat_new)
npt.assert_array_equal(dmat, dmat_new)
def test_from_abcmat(self):
"""Test spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
weights = ParamSpatial.get_weight(groups)
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
amat = np.diag(avecs[0]) + np.diag(avecs[0]).dot(weights[0])
bmat = np.diag(bvecs[0]) + np.diag(bvecs[0]).dot(weights[0])
dmat = np.eye(nstocks) - np.diag(dvecs[1]).dot(weights[0])
dmat_inv = scl.inv(dmat)
cmat = dmat_inv.dot(np.diag(dvecs[0])).dot(dmat_inv)
param = ParamSpatial.from_abcmat(avecs=avecs, bvecs=bvecs, cmat=cmat,
groups=groups)
npt.assert_array_equal(amat, param.amat)
npt.assert_array_equal(bmat, param.bmat)
npt.assert_array_equal(cmat, param.cmat)
npt.assert_array_equal(avecs, param.avecs)
npt.assert_array_equal(bvecs, param.bvecs)
npt.assert_array_equal(None, param.dvecs)
npt.assert_array_equal(weights, param.weights)
def test_from_abt(self):
"""Test spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
weights = ParamSpatial.get_weight(groups)
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
amat = np.diag(avecs[0]) + np.diag(avecs[0]).dot(weights[0])
bmat = np.diag(bvecs[0]) + np.diag(bvecs[0]).dot(weights[0])
dmat = np.eye(nstocks) - np.diag(dvecs[1]).dot(weights[0])
dmat_inv = scl.inv(dmat)
ccmat = dmat_inv.dot(np.diag(dvecs[0])).dot(dmat_inv)
cmat = scl.cholesky(ccmat, 1)
target = ParamSpatial.find_stationary_var(amat=amat, bmat=bmat,
cmat=cmat)
cmat_new = ParamSpatial.find_cmat(amat=amat, bmat=bmat, target=target)
npt.assert_array_almost_equal(cmat[np.tril_indices(nstocks)],
cmat_new[np.tril_indices(nstocks)])
param = ParamSpatial.from_abt(avecs=avecs, bvecs=bvecs, target=target,
groups=groups, restriction='hetero')
npt.assert_array_equal(amat, param.amat)
npt.assert_array_equal(bmat, param.bmat)
npt.assert_array_almost_equal(cmat, param.cmat)
npt.assert_array_equal(avecs, param.avecs)
npt.assert_array_equal(bvecs, param.bvecs)
# npt.assert_array_equal(None, param.dvecs)
npt.assert_array_equal(weights, param.weights)
def test_get_theta_from_ab(self):
"""Test theta vector for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'hetero'
theta = np.concatenate([avecs.flatten(), bvecs.flatten()])
theta_exp = param.get_theta_from_ab(restriction=restriction)
npt.assert_array_equal(theta, theta_exp)
restriction = 'ghomo'
theta = np.concatenate([avecs[0], avecs[1:, :2].flatten(),
bvecs[0], bvecs[1:, :2].flatten()])
theta_exp = param.get_theta_from_ab(restriction=restriction)
npt.assert_array_equal(theta, theta_exp)
restriction = 'homo'
theta = np.concatenate([avecs[0], avecs[1:, 0],
bvecs[0], bvecs[1:, 0]])
theta_exp = param.get_theta_from_ab(restriction=restriction)
npt.assert_array_equal(theta, theta_exp)
restriction = 'shomo'
theta = np.concatenate([avecs[:, 0], bvecs[:, 0]])
theta_exp = param.get_theta_from_ab(restriction=restriction)
npt.assert_array_equal(theta, theta_exp)
def test_get_theta_hetero(self):
"""Test theta vector for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'hetero'
use_target = True
theta = np.concatenate([avecs.flatten(), bvecs.flatten()])
nparams = 2 * nstocks * (1 + ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
use_target = False
theta = [avecs.flatten(), bvecs.flatten(), dvecs.flatten()]
theta = np.concatenate(theta)
nparams = 3 * nstocks * (1 + ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
cfree = True
theta = [avecs.flatten(), bvecs.flatten(),
param.cmat[np.tril_indices(param.cmat.shape[0])]]
theta = np.concatenate(theta)
nparams = 2 * nstocks * (1 + ncat) + nstocks * (nstocks + 1) // 2
theta_exp = param.get_theta(restriction=restriction, cfree=cfree)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
def test_get_theta_ghomo(self):
"""Test theta vector for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = len(groups)
alpha = [.01, .02, .03]
beta = [.04, .05, .06]
delta = [.07, .08]
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks))
bvecs = np.ones((ncat+1, nstocks))
dvecs = np.ones((ncat+1, nstocks))
avecs[0, :] *= alpha[0]
avecs[1, :2] *= alpha[1]
avecs[1, 2:] *= alpha[2]
bvecs[0, :] *= beta[0]
bvecs[1, :2] *= beta[1]
bvecs[1, 2:] *= beta[2]
dvecs[1, :2] *= delta[0]
dvecs[1, 2:] *= delta[1]
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'ghomo'
use_target = True
theta = [avecs[0], [avecs[1, 0]], [avecs[1, 2]],
bvecs[0], [bvecs[1, 0]], [bvecs[1, 2]]]
theta = np.concatenate(theta)
nparams = 2 * (nstocks + 2 * ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
use_target = False
theta = [avecs[0], [avecs[1, 0]], [avecs[1, 2]],
bvecs[0], [bvecs[1, 0]], [bvecs[1, 2]],
dvecs[0], [dvecs[1, 0]], [dvecs[1, 2]]]
theta = np.concatenate(theta)
nparams = 3 * (nstocks + 2 * ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
cfree = True
theta = [avecs[0], [avecs[1, 0]], [avecs[1, 2]],
bvecs[0], [bvecs[1, 0]], [bvecs[1, 2]],
param.cmat[np.tril_indices(param.cmat.shape[0])]]
theta = np.concatenate(theta)
nparams = 2 * (nstocks + 2 * ncat) + nstocks * (nstocks + 1) // 2
theta_exp = param.get_theta(restriction=restriction, cfree=cfree)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
def test_get_theta_homo(self):
"""Test theta vector for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'homo'
use_target = True
theta = [avecs[0], avecs[1:, 0], bvecs[0], bvecs[1:, 0]]
theta = np.concatenate(theta)
nparams = 2 * (nstocks + ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
use_target = False
theta = [avecs[0], avecs[1:, 0], bvecs[0], bvecs[1:, 0],
dvecs[0], dvecs[1:, 0]]
theta = np.concatenate(theta)
nparams = 3 * (nstocks + ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
cfree = True
theta = [avecs[0], avecs[1:, 0], bvecs[0], bvecs[1:, 0],
param.cmat[np.tril_indices(param.cmat.shape[0])]]
theta = np.concatenate(theta)
nparams = 2 * (nstocks + ncat) + nstocks * (nstocks + 1) // 2
theta_exp = param.get_theta(restriction=restriction, cfree=cfree)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
def test_get_theta_shomo(self):
"""Test theta vector for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = 1
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'shomo'
use_target = True
theta = [avecs[:, 0], bvecs[:, 0]]
theta = np.concatenate(theta)
nparams = 2 * (1 + ncat)
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
use_target = False
theta = [avecs[:, 0], bvecs[:, 0], dvecs[0], dvecs[1:, 0]]
theta = np.concatenate(theta)
nparams = nstocks + 3 * ncat + 2
theta_exp = param.get_theta(restriction=restriction,
use_target=use_target)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
cfree = True
theta = [avecs[:, 0], bvecs[:, 0],
param.cmat[np.tril_indices(param.cmat.shape[0])]]
theta = np.concatenate(theta)
nparams = 2 * (1 + ncat) + nstocks * (nstocks + 1) // 2
theta_exp = param.get_theta(restriction=restriction, cfree=cfree)
self.assertEqual(nparams, theta_exp.size)
self.assertEqual(nparams, theta.size)
npt.assert_array_equal(theta, theta_exp)
def test_from_theta_hetero(self):
"""Test init from theta for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = len(groups)
alpha, beta, gamma = .01, .16, .09
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks)) * alpha**.5
bvecs = np.ones((ncat+1, nstocks)) * beta**.5
dvecs = np.vstack([np.ones((1, nstocks)),
np.ones((ncat, nstocks)) * gamma**.5])
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'hetero'
target = None
theta = [avecs.flatten(), bvecs.flatten(), dvecs.flatten()]
theta = np.concatenate(theta)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(param.cmat, param_new.cmat)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
npt.assert_array_equal(param.dvecs, param_new.dvecs)
target = param.get_uvar()
theta = [avecs.flatten(), bvecs.flatten()]
theta = np.concatenate(theta)
cmat = param.find_cmat(amat=param.amat, bmat=param.bmat, target=target)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(cmat, param_new.cmat)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
# npt.assert_array_equal(None, param_new.dvecs)
target = None
theta = [avecs.flatten(), bvecs.flatten(), dvecs.flatten()]
theta = np.concatenate(theta)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(param.cmat, param_new.cmat)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
npt.assert_array_equal(param.dvecs, param_new.dvecs)
target = param.get_uvar()
theta = [avecs.flatten(), bvecs.flatten()]
theta = np.concatenate(theta)
cmat = param.find_cmat(amat=param.amat, bmat=param.bmat, target=target)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(cmat, param_new.cmat)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
# npt.assert_array_equal(None, param_new.dvecs)
cfree = True
theta = [avecs.flatten(), bvecs.flatten(),
param.cmat[np.tril_indices(nstocks)]]
theta = np.concatenate(theta)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
cfree=cfree)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(np.tril(param.cmat), param_new.cmat)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
npt.assert_array_equal(None, param_new.dvecs)
cfree = True
theta = [avecs.flatten(), bvecs.flatten(),
param.cmat[np.tril_indices(nstocks)]]
theta = np.concatenate(theta)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
cfree=cfree)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(np.tril(param.cmat), param_new.cmat)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
npt.assert_array_equal(None, param_new.dvecs)
def test_from_theta_ghomo(self):
"""Test group init from theta for spatial specification."""
nstocks = 4
groups = [[(0, 1), (2, 3)]]
ncat = len(groups)
alpha = [.01, .02, .03, .04, .05, .06]
beta = [.07, .08, .09, .1, .11, .12]
delta = [.13, .14, .15, .16, .17, .18]
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks))
bvecs = np.ones((ncat+1, nstocks))
dvecs = np.ones((ncat+1, nstocks))
avecs[0] = alpha[:4]
avecs[1, :2] *= alpha[-2]
avecs[1, 2:] *= alpha[-1]
bvecs[0] = beta[:4]
bvecs[1, :2] *= beta[-2]
bvecs[1, 2:] *= beta[-1]
dvecs[0] = delta[:4]
dvecs[1, :2] *= delta[-2]
dvecs[1, 2:] *= delta[-1]
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
restriction = 'ghomo'
target = param.get_uvar()
theta = np.concatenate([alpha, beta])
cmat = param.find_cmat(amat=param.amat, bmat=param.bmat, target=target)
uvar = param.find_stationary_var(amat=param.amat, bmat=param.bmat,
cmat=cmat)
npt.assert_array_almost_equal(target, uvar)
param_new = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
npt.assert_array_equal(param.avecs, param_new.avecs)
npt.assert_array_equal(param.bvecs, param_new.bvecs)
# npt.assert_array_equal(None, param_new.dvecs)
npt.assert_array_equal(param.amat, param_new.amat)
npt.assert_array_equal(param.bmat, param_new.bmat)
npt.assert_array_equal(cmat, param_new.cmat)
npt.assert_array_almost_equal(cmat, param.cmat)
def test_from_theta_ghomo_special(self):
"""Test group init from theta for spatial specification."""
nstocks = 4
groups = [[(0, 1, 2, 3)]]
ncat = len(groups)
alpha = [.01, .02, .03, .04, .05]
beta = [.07, .08, .09, .1, .11]
delta = [.13, .14, .15, .16, .17]
# A, B, C - n x n matrices
avecs = np.ones((ncat+1, nstocks))
bvecs = np.ones((ncat+1, nstocks))
dvecs = np.ones((ncat+1, nstocks))
avecs[0] = alpha[:4]
avecs[1] *= alpha[-1]
bvecs[0] = beta[:4]
bvecs[1] *= beta[-1]
dvecs[0] = delta[:4]
dvecs[1] *= delta[-1]
param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs,
groups=groups)
target = param.get_uvar()
theta = np.concatenate([alpha, beta])
cmat = param.find_cmat(amat=param.amat, bmat=param.bmat, target=target)
uvar = param.find_stationary_var(amat=param.amat, bmat=param.bmat,
cmat=cmat)
npt.assert_array_almost_equal(target, uvar)
restriction = 'homo'
param_homo = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
theta_homo = param_homo.get_theta(restriction=restriction,
use_target=False)
restriction = 'ghomo'
param_ghomo = ParamSpatial.from_theta(theta=theta, groups=groups,
restriction=restriction,
target=target)
theta_ghomo = param_ghomo.get_theta(restriction=restriction,
use_target=False)
npt.assert_array_almost_equal(cmat, param.cmat)
|
npt.assert_array_almost_equal(theta_ghomo, theta_homo, decimal=3)
|
numpy.testing.assert_array_almost_equal
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transport module."""
import numpy as np
from ravens import utils
from ravens.models.resnet import ResNet43_8s
import tensorflow as tf
import tensorflow_addons as tfa
class Transport:
"""Transport module."""
def __init__(self, in_shape, n_rotations, crop_size, preprocess):
"""Transport module for placing.
Args:
in_shape: shape of input image.
n_rotations: number of rotations of convolving kernel.
crop_size: crop size around pick argmax used as convolving kernel.
preprocess: function to preprocess input images.
"""
self.iters = 0
self.n_rotations = n_rotations
self.crop_size = crop_size # crop size must be N*16 (e.g. 96)
self.preprocess = preprocess
self.pad_size = int(self.crop_size / 2)
self.padding = np.zeros((3, 2), dtype=int)
self.padding[:2, :] = self.pad_size
in_shape = np.array(in_shape)
in_shape[0:2] += self.pad_size * 2
in_shape = tuple(in_shape)
# Crop before network (default for Transporters in CoRL submission).
# kernel_shape = (self.crop_size, self.crop_size, in_shape[2])
if not hasattr(self, 'output_dim'):
self.output_dim = 3
if not hasattr(self, 'kernel_dim'):
self.kernel_dim = 3
# 2 fully convolutional ResNets with 57 layers and 16-stride
in0, out0 = ResNet43_8s(in_shape, self.output_dim, prefix='s0_')
in1, out1 = ResNet43_8s(in_shape, self.kernel_dim, prefix='s1_')
self.model = tf.keras.Model(inputs=[in0, in1], outputs=[out0, out1])
self.optim = tf.keras.optimizers.Adam(learning_rate=1e-4)
self.metric = tf.keras.metrics.Mean(name='loss_transport')
# if not self.six_dof:
# in0, out0 = ResNet43_8s(in_shape, output_dim, prefix="s0_")
# if self.crop_bef_q:
# # Passing in kernels: (64,64,6) --> (64,64,3)
# in1, out1 = ResNet43_8s(kernel_shape, kernel_dim, prefix="s1_")
# else:
# # Passing in original images: (384,224,6) --> (394,224,3)
# in1, out1 = ResNet43_8s(in_shape, output_dim, prefix="s1_")
# else:
# in0, out0 = ResNet43_8s(in_shape, output_dim, prefix="s0_")
# # early cutoff just so it all fits on GPU.
# in1, out1 = ResNet43_8s(
# kernel_shape, kernel_dim, prefix="s1_", cutoff_early=True)
# def set_bounds_pixel_size(self, bounds, pixel_size):
# self.bounds = bounds
# self.pixel_size = pixel_size
def correlate(self, in0, in1, softmax):
"""Correlate two input tensors."""
output = tf.nn.convolution(in0, in1, data_format='NHWC')
if softmax:
output_shape = output.shape
output = tf.reshape(output, (1, np.prod(output.shape)))
output = tf.nn.softmax(output)
output = np.float32(output).reshape(output_shape[1:])
return output
def forward(self, in_img, p, softmax=True):
"""Forward pass."""
img_unprocessed = np.pad(in_img, self.padding, mode='constant')
input_data = self.preprocess(img_unprocessed.copy())
in_shape = (1,) + input_data.shape
input_data = input_data.reshape(in_shape)
in_tensor = tf.convert_to_tensor(input_data, dtype=tf.float32)
# Rotate crop.
pivot = np.array([p[1], p[0]]) + self.pad_size
rvecs = self.get_se2(self.n_rotations, pivot)
# Crop before network (default for Transporters in CoRL submission).
# crop = tf.convert_to_tensor(input_data.copy(), dtype=tf.float32)
# crop = tf.repeat(crop, repeats=self.n_rotations, axis=0)
# crop = tfa.image.transform(crop, rvecs, interpolation="NEAREST")
# crop = crop[:, p[0]:(p[0] + self.crop_size),
# p[1]:(p[1] + self.crop_size), :]
# logits, kernel_raw = self.model([in_tensor, crop])
# Crop after network (for receptive field, and more elegant).
logits, crop = self.model([in_tensor, in_tensor])
# crop = tf.identity(kernel_bef_crop)
crop = tf.repeat(crop, repeats=self.n_rotations, axis=0)
crop = tfa.image.transform(crop, rvecs, interpolation='NEAREST')
kernel_raw = crop[:, p[0]:(p[0] + self.crop_size),
p[1]:(p[1] + self.crop_size), :]
# Obtain kernels for cross-convolution.
kernel_paddings = tf.constant([[0, 0], [0, 1], [0, 1], [0, 0]])
kernel = tf.pad(kernel_raw, kernel_paddings, mode='CONSTANT')
kernel = tf.transpose(kernel, [1, 2, 3, 0])
return self.correlate(logits, kernel, softmax)
def train(self, in_img, p, q, theta, backprop=True):
"""Transport pixel p to pixel q.
Args:
in_img: input image.
p: pixel (y, x)
q: pixel (y, x)
theta: rotation label in radians.
backprop: True if backpropagating gradients.
Returns:
loss: training loss.
"""
self.metric.reset_states()
with tf.GradientTape() as tape:
output = self.forward(in_img, p, softmax=False)
itheta = theta / (2 * np.pi / self.n_rotations)
itheta = np.int32(
|
np.round(itheta)
|
numpy.round
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Source separation algorithms attempt to extract recordings of individual
sources from a recording of a mixture of sources. Evaluation methods for
source separation compare the extracted sources from reference sources and
attempt to measure the perceptual quality of the separation.
See also the bss_eval MATLAB toolbox:
http://bass-db.gforge.inria.fr/bss_eval/
Conventions
-----------
An audio signal is expected to be in the format of a 1-dimensional array where
the entries are the samples of the audio signal. When providing a group of
estimated or reference sources, they should be provided in a 2-dimensional
array, where the first dimension corresponds to the source number and the
second corresponds to the samples.
Metrics
-------
* :func:`mir_eval.separation.bss_eval_sources`: Computes the bss_eval_sources
metrics from bss_eval, which optionally optimally match the estimated sources
to the reference sources and measure the distortion and artifacts present in
the estimated sources as well as the interference between them.
* :func:`mir_eval.separation.bss_eval_sources_framewise`: Computes the
bss_eval_sources metrics on a frame-by-frame basis.
* :func:`mir_eval.separation.bss_eval_images`: Computes the bss_eval_images
metrics from bss_eval, which includes the metrics in
:func:`mir_eval.separation.bss_eval_sources` plus the image to spatial
distortion ratio.
* :func:`mir_eval.separation.bss_eval_images_framewise`: Computes the
bss_eval_images metrics on a frame-by-frame basis.
References
----------
.. [#vincent2006performance] <NAME>, <NAME>, and <NAME>, "Performance measurement in blind audio source separation," IEEE
Trans. on Audio, Speech and Language Processing, 14(4):1462-1469, 2006.
This code is licensed under the MIT License:
The MIT License (MIT)
Copyright (c) 2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Please see http://craffel.github.io/mir_eval/ for more information
'''
import numpy as np
import scipy.fftpack
from scipy.linalg import toeplitz
from scipy.signal import fftconvolve
import collections
import itertools
import warnings
#from . import util
# The maximum allowable number of sources (prevents insane computational load)
MAX_SOURCES = 100
def filter_kwargs(_function, *args, **kwargs):
"""Given a function and args and keyword args to pass to it, call the function
but using only the keyword arguments which it accepts. This is equivalent
to redefining the function with an additional \*\*kwargs to accept slop
keyword args.
If the target function already accepts \*\*kwargs parameters, no filtering
is performed.
Parameters
----------
_function : callable
Function to call. Can take in any number of args or kwargs
"""
if has_kwargs(_function):
return _function(*args, **kwargs)
# Get the list of function arguments
func_code = six.get_function_code(_function)
function_args = func_code.co_varnames[:func_code.co_argcount]
# Construct a dict of those kwargs which appear in the function
filtered_kwargs = {}
for kwarg, value in list(kwargs.items()):
if kwarg in function_args:
filtered_kwargs[kwarg] = value
# Call the function with the supplied args and the filtered kwarg dict
return _function(*args, **filtered_kwargs)
def validate(reference_sources, estimated_sources):
"""Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
"""
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES))
def _any_source_silent(sources):
"""Returns true if the parameter sources has any silent first dimensions"""
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1))
def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True, fft_window=512):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, fft_window)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, fft_window)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt)
def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False``
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm
def bss_eval_images(reference_sources, estimated_sources,
compute_permutation=True):
"""Implementation of the bss_eval_images function from the
BSS_EVAL Matlab toolbox.
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
This method also provides the ISR measure.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_images.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc,)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``(1,2,...,nsrc)``
if ``compute_permutation`` is ``False``.
References
----------
.. [#] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), \
np.array([]), np.array([])
# determine size parameters
nsrc = estimated_sources.shape[0]
nsampl = estimated_sources.shape[1]
nchan = estimated_sources.shape[2]
# does the user desire permutation?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
isr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt_images(
reference_sources,
np.reshape(
estimated_sources[jest],
(nsampl, nchan),
order='F'
),
jtrue,
512
)
sdr[jest, jtrue], isr[jest, jtrue], \
sir[jest, jtrue], sar[jest, jtrue] = \
_bss_image_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(range(nsrc)))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], isr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
isr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
Gj = [0] * nsrc # prepare G matrics with zeroes
G = np.zeros(1)
for j in range(nsrc):
# save G matrix to avoid recomputing it every call
s_true, e_spat, e_interf, e_artif, Gj_temp, G = \
_bss_decomp_mtifilt_images(reference_sources,
np.reshape(estimated_sources[j],
(nsampl, nchan),
order='F'),
j, 512, Gj[j], G)
Gj[j] = Gj_temp
sdr[j], isr[j], sir[j], sar[j] = \
_bss_image_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, isr, sir, sar, popt)
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_sources`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm
def _bss_decomp_mtifilt(reference_sources, estimated_source, j, flen):
"""Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
"""
nsampl = estimated_source.size
# decomposition
# true source image
s_true = np.hstack((reference_sources[j], np.zeros(flen - 1)))
# spatial (or filtering) distortion
e_spat = _project(reference_sources[j, np.newaxis, :], estimated_source,
flen) - s_true
# interference
e_interf = _project(reference_sources,
estimated_source, flen) - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:nsampl] += estimated_source
return (s_true, e_spat, e_interf, e_artif)
def _bss_decomp_mtifilt_images(reference_sources, estimated_source, j, flen,
Gj=None, G=None):
"""Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
Adapted version to work with multichannel sources.
Improved performance can be gained by passing Gj and G parameters initially
as all zeros. These parameters store the results from the computation of
the G matrix in _project_images and then return them for subsequent calls
to this function. This only works when not computing permuations.
"""
nsampl = np.shape(estimated_source)[0]
nchan = np.shape(estimated_source)[1]
# are we saving the Gj and G parameters?
saveg = Gj is not None and G is not None
# decomposition
# true source image
s_true = np.hstack((np.reshape(reference_sources[j],
(nsampl, nchan),
order="F").transpose(),
np.zeros((nchan, flen - 1))))
# spatial (or filtering) distortion
if saveg:
e_spat, Gj = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen, Gj)
else:
e_spat = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen)
e_spat = e_spat - s_true
# interference
if saveg:
e_interf, G = _project_images(reference_sources,
estimated_source, flen, G)
else:
e_interf = _project_images(reference_sources,
estimated_source, flen)
e_interf = e_interf - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:, :nsampl] += estimated_source.transpose()
# return Gj and G only if they were passed in
if saveg:
return (s_true, e_spat, e_interf, e_artif, Gj, G)
else:
return (s_true, e_spat, e_interf, e_artif)
def _project(reference_sources, estimated_source, flen):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj
def _project_images(reference_sources, estimated_source, flen, G=None):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1.
Passing G as all zeros will populate the G matrix and return it so it can
be passed into the next call to avoid recomputing G (this will only works
if not computing permutations).
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
nchan = reference_sources.shape[2]
reference_sources = np.reshape(np.transpose(reference_sources, (2, 0, 1)),
(nchan*nsrc, nsampl), order='F')
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nchan*nsrc, flen - 1))))
estimated_source = \
np.hstack((estimated_source.transpose(), np.zeros((nchan, flen - 1))))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
if G is None:
saveg = False
G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
for i in range(nchan * nsrc):
for j in range(i+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
else: # avoid recomputing G (only works if no permutation is desired)
saveg = True # return G
if np.all(G == 0): # only compute G if passed as 0
G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
for i in range(nchan * nsrc):
for j in range(i+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros((nchan * nsrc * flen, nchan))
for k in range(nchan * nsrc):
for i in range(nchan):
ssef = sf[k] * np.conj(sef[i])
ssef = np.real(scipy.fftpack.ifft(ssef))
D[k * flen: (k+1) * flen, i] = \
np.hstack((ssef[0], ssef[-1:-flen:-1])).transpose()
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nchan*nsrc, nchan, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nchan*nsrc, nchan,
order='F')
# Filtering
sproj = np.zeros((nchan, nsampl + flen - 1))
for k in range(nchan * nsrc):
for i in range(nchan):
sproj[i] += fftconvolve(C[:, k, i].transpose(),
reference_sources[k])[:nsampl + flen - 1]
# return G only if it was passed in
if saveg:
return sproj, G
else:
return sproj
def _bss_source_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given source in terms of
filtered true source, interference and artifacts.
"""
# energy ratios
s_filt = s_true + e_spat
sdr = _safe_db(
|
np.sum(s_filt**2)
|
numpy.sum
|
from scipy.fftpack import fft,ifft
from scipy.io.wavfile import read,write
from scipy.signal import get_window
import numpy as np
from pdb import set_trace
from os.path import join,split
from glob import glob
def triang_win(width,center=0.5):
win = []
cpos = center * width
for i in range(width + 1):
if i <= cpos:
win.append(1.0 / cpos * i)
else:
win.append(float(width - i) / (width - cpos))
return np.array(win)[0:width]
class Signal:
def __init__(self,filename=None,data=None,rate=None):
if filename!=None:
self.rate, self.data = read(filename)
self.dtype = self.data.dtype
# During of signal in sec
self.during = float(len(self.data))/self.rate
else:
self.rate = rate
self.data = data
self.dtype = self.data.dtype
self.during = float(len(self.data))/self.rate
def write(self,filename):
write(filename,self.rate,self.data)
# Cut signal. start and end are in sec
def cut(self,start,end):
self.data = self.data[int(start*self.rate):int(end*self.rate)]
def amplify(self,gain):
gain = max(1.0,gain)
self.data = np.array(self.data,dtype=np.float64)*gain
self.data = np.array(self.data,dtype=self.dtype)
def attenuate(self,gain):
gain = max(0.0,gain)
gain = min(1.0,gain)
self.data = np.array(self.data,dtype=np.float64)*gain
self.data = np.array(self.data,dtype=self.dtype)
def moving_average_filter(self,N=5):
x = self.data
N = max(2,N)
N = min(len(x),N)
y = []
cum = sum(x[0:N])
for i in range(len(x)):
y.append(cum/float(N))
cum -= x[i]
cum += x[(i+N)%len(x)]
self.data = np.array(y,x.dtype)
def get_band_energy(self,num_bands,fft_during=None):
if fft_during == None:
if len(self.data)%2==0:
band_width = (len(self.data)+2)/(num_bands+1)
else:
band_width = (len(self.data)+1)/(num_bands+1)
if len(self.data)%2==0:
spectrum = fft(self.data)[0:len(self.data)//2+1]
else:
spectrum = fft(self.data)[0:(len(self.data)-1)//2+1]
linear_step = self.rate/2/num_bands
linear_center = [0.0]+list(map(lambda i:(i+0.5)*linear_step,range(num_bands)))+[self.rate/2]
banks = []
if len(self.data)%2==0:
freq_unit = self.rate/(len(self.data)+2)
else:
freq_unit = self.rate/(len(self.data)+1)
for i in range(num_bands):
length = linear_center[i+2]-linear_center[i]
center = (linear_center[i+1]-linear_center[i])/length
win_size = int(length/freq_unit)
banks.append(triang_win(win_size,center))
energy = []
for i in range(num_bands):
start = int(linear_center[i]/freq_unit)
energy.append(sum(list(map(lambda x:np.power(np.abs(x),2),spectrum[start:start + len(banks[i])] * banks[i]))))
return np.array(energy)
else:
num_frames = int(2*self.during/fft_during-1)
fft_size = int(fft_during*self.rate)
time_step = fft_size//2
hann_win = get_window('hann',fft_size)
energy = np.zeros(num_bands)
for i in range(num_frames):
start = i*time_step
frame = self.data[start:start+fft_size]*hann_win
signal = Signal(data=frame,rate=self.rate)
energy += signal.get_band_energy(num_bands)
return energy/num_frames
def noise_removal(self,noise,sensitivity=2.0):
fft_size = 256
hann_win = get_window('hann',fft_size)
band_width = 16
# scipy.signal.get_window returned triangular window has non zero start point, which is undesired
triang_bank = triang_win(band_width)
freq_step = band_width//2
freq_supp_size = fft_size//2+1
num_bands = (freq_supp_size-band_width-1)//freq_step+1
num_bands += 2
# Get threshold for each frequency band
thresholds = np.log(noise.get_band_energy(num_bands,fft_during=fft_size/self.rate))+np.ones(num_bands)*sensitivity
# Pad the original signal to its end
time_step = fft_size//2
pad_size = time_step-len(self.data)%time_step+1
num_frames = (len(self.data)-fft_size+pad_size)//time_step+1
zeros =
|
np.zeros(pad_size,self.dtype)
|
numpy.zeros
|
import re
import cv2
import numpy as np
from .model import Model
class DetectionWithLandmarks:
def __init__(self, xmin, ymin, xmax, ymax, score, id, landmarks):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.score = score
self.id = id
self.landmarks = landmarks
class RetinaFace(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert len(self.net.input_info) == 1, "Expected 1 input blob"
assert len(self.net.outputs) == 12 or len(self.net.outputs) == 9, "Expected 12 or 9 output blobs"
self.image_blob_name = next(iter(self.net.input_info))
self._output_layer_names = self.net.outputs
self.n, self.c, self.h, self.w = self.net.input_info[self.image_blob_name].input_data.shape
@staticmethod
def _resize_image(frame, size, keep_aspect_ratio=False):
if not keep_aspect_ratio:
resized_frame = cv2.resize(frame, size)
else:
h, w = frame.shape[:2]
scale = min(size[1] / h, size[0] / w)
resized_frame = cv2.resize(frame, None, fx=scale, fy=scale)
return resized_frame
def unify_inputs(self, inputs) -> dict:
if not isinstance(inputs, dict):
inputs_dict = {self.image_blob_name: inputs}
else:
inputs_dict = inputs
return inputs_dict
def preprocess(self, inputs):
img = self._resize_image(inputs[self.image_blob_name], (self.w, self.h))
h, w = img.shape[:2]
meta = {'original_shape': inputs[self.image_blob_name].shape,
'resized_shape': img.shape}
if h != self.h or w != self.w:
img = np.pad(img, ((0, self.h - h), (0, self.w - w), (0, 0)),
mode='constant', constant_values=0)
img = img.transpose((2, 0, 1)) # Change data layout from HWC to CHW
img = img.reshape((self.n, self.c, self.h, self.w))
inputs[self.image_blob_name] = img
return inputs, meta
def postprocess(self, outputs, meta):
postprocessor = RetinaFacePostprocessor(True)
scale_x = meta['resized_shape'][1] / meta['original_shape'][1]
scale_y = meta['resized_shape'][0] / meta['original_shape'][0]
outputs = postprocessor.process_output(outputs, scale_x, scale_y, 0.8)
return outputs
class RetinaFacePostprocessor(object):
def __init__(self, detect_masks=False):
self._detect_masks = detect_masks
_ratio = (1.,)
self._anchor_cfg = {
32: {'SCALES': (32, 16), 'BASE_SIZE': 16, 'RATIOS': _ratio},
16: {'SCALES': (8, 4), 'BASE_SIZE': 16, 'RATIOS': _ratio},
8: {'SCALES': (2, 1), 'BASE_SIZE': 16, 'RATIOS': _ratio}
}
self._features_stride_fpn = [32, 16, 8]
self._anchors_fpn = dict(zip(self._features_stride_fpn, self.generate_anchors_fpn(cfg=self._anchor_cfg)))
self._num_anchors = dict(zip(
self._features_stride_fpn, [anchors.shape[0] for anchors in self._anchors_fpn.values()]
))
self.landmark_std = 0.2 if detect_masks else 1.0
@staticmethod
def generate_anchors_fpn(cfg):
def generate_anchors(base_size=16, ratios=(0.5, 1, 2), scales=2 ** np.arange(3, 6)):
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])])
return anchors
def _ratio_enum(anchor, ratios):
w, h, x_ctr, y_ctr = _generate_wh_ctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _make_anchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
w, h, x_ctr, y_ctr = _generate_wh_ctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _make_anchors(ws, hs, x_ctr, y_ctr)
return anchors
def _generate_wh_ctrs(anchor):
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _make_anchors(ws, hs, x_ctr, y_ctr):
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
))
return anchors
rpn_feat_stride = [int(k) for k in cfg]
rpn_feat_stride.sort(reverse=True)
anchors = []
for stride in rpn_feat_stride:
feature_info = cfg[stride]
bs = feature_info['BASE_SIZE']
__ratios = np.array(feature_info['RATIOS'])
__scales = np.array(feature_info['SCALES'])
anchors.append(generate_anchors(bs, __ratios, __scales))
return anchors
@staticmethod
def nms(x1, y1, x2, y2, scores, thresh):
areas = (x2 - x1) * (y2 - y1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
intersection = w * h
union = (areas[i] + areas[order[1:]] - intersection)
overlap = np.divide(intersection, union, out=np.zeros_like(intersection, dtype=float), where=union != 0)
order = order[np.where(overlap <= thresh)[0] + 1] # pylint: disable=W0143
return keep
def process_output(self, raw_output, scale_x, scale_y, face_prob_threshold):
bboxes_outputs = [raw_output[name][0] for name in raw_output if re.search('.bbox.', name)]
bboxes_outputs.sort(key=lambda x: x.shape[1])
scores_outputs = [raw_output[name][0] for name in raw_output if re.search('.cls.', name)]
scores_outputs.sort(key=lambda x: x.shape[1])
landmarks_outputs = [raw_output[name][0] for name in raw_output if re.search('.landmark.', name)]
landmarks_outputs.sort(key=lambda x: x.shape[1])
if self._detect_masks:
type_scores_outputs = [raw_output[name][0] for name in raw_output if re.search('.type.', name)]
type_scores_outputs.sort(key=lambda x: x.shape[1])
proposals_list = []
scores_list = []
landmarks_list = []
mask_scores_list = []
for idx, s in enumerate(self._features_stride_fpn):
anchor_num = self._num_anchors[s]
scores = self._get_scores(scores_outputs[idx], anchor_num)
bbox_deltas = bboxes_outputs[idx]
height, width = bbox_deltas.shape[1], bbox_deltas.shape[2]
anchors_fpn = self._anchors_fpn[s]
anchors = self.anchors_plane(height, width, int(s), anchors_fpn)
anchors = anchors.reshape((height * width * anchor_num, 4))
proposals = self._get_proposals(bbox_deltas, anchor_num, anchors)
threshold_mask = scores >= face_prob_threshold
proposals, scores = proposals[threshold_mask, :], scores[threshold_mask]
if scores.size != 0:
x_mins, y_mins, x_maxs, y_maxs = proposals.T
keep = self.nms(x_mins, y_mins, x_maxs, y_maxs, scores, 0.5)
proposals_list.extend(proposals[keep])
scores_list.extend(scores[keep])
landmarks = self._get_landmarks(landmarks_outputs[idx], anchor_num, anchors)[threshold_mask, :]
landmarks_list.extend(landmarks[keep, :])
if self._detect_masks:
mask_scores_list.extend(self._get_mask_scores(type_scores_outputs[idx],
anchor_num)[threshold_mask][keep])
detections = []
landmarks_regression = []
result = []
if len(scores_list) != 0:
scores = np.reshape(scores_list, -1)
mask_scores_list = np.reshape(mask_scores_list, -1)
x_mins, y_mins, x_maxs, y_maxs = np.array(proposals_list).T # pylint: disable=E0633
x_mins /= scale_x
x_maxs /= scale_x
y_mins /= scale_y
y_maxs /= scale_y
detections = np.array([x_mins / scale_x, y_mins / scale_y, x_maxs / scale_x, y_maxs / scale_y, scores])
landmarks_x_coords = np.array(landmarks_list)[:, :, ::2].reshape(len(landmarks_list), -1) / scale_x
landmarks_y_coords = np.array(landmarks_list)[:, :, 1::2].reshape(len(landmarks_list), -1) / scale_y
landmarks_regression = [landmarks_x_coords, landmarks_y_coords]
result = []
for i in range(len(scores_list)):
result.append(DetectionWithLandmarks(x_mins[i], y_mins[i], x_maxs[i], y_maxs[i], scores[i], 0,
[landmarks_x_coords[i], landmarks_y_coords[i]]))
return result
def _get_proposals(self, bbox_deltas, anchor_num, anchors):
bbox_deltas = bbox_deltas.transpose((1, 2, 0))
bbox_pred_len = bbox_deltas.shape[2] // anchor_num
bbox_deltas = bbox_deltas.reshape((-1, bbox_pred_len))
proposals = self.bbox_pred(anchors, bbox_deltas)
return proposals
@staticmethod
def _get_scores(scores, anchor_num):
scores = scores[anchor_num:, :, :]
scores = scores.transpose((1, 2, 0)).reshape(-1)
return scores
@staticmethod
def _get_mask_scores(type_scores, anchor_num):
mask_scores = type_scores[anchor_num * 2:, :, :]
mask_scores = mask_scores.transpose((1, 2, 0)).reshape(-1)
return mask_scores
def _get_landmarks(self, landmark_deltas, anchor_num, anchors):
landmark_pred_len = landmark_deltas.shape[0] // anchor_num
landmark_deltas = landmark_deltas.transpose((1, 2, 0)).reshape((-1, 5, landmark_pred_len // 5))
landmark_deltas *= self.landmark_std
landmarks = self.landmark_pred(anchors, landmark_deltas)
return landmarks
@staticmethod
def bbox_pred(boxes, box_deltas):
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0:1]
dy = box_deltas[:, 1:2]
dw = box_deltas[:, 2:3]
dh = box_deltas[:, 3:4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes =
|
np.zeros(box_deltas.shape)
|
numpy.zeros
|
import os, sys
import numpy as np
from typing import Union
from pyproj import CRS
from osgeo import gdal
from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal, qgis_enabled, found_path
if not qgis_enabled:
raise EnvironmentError('Unable to find qgis directory in {}'.format(found_path))
from HSTB.kluster.gui.backends._qt import qgis_core, qgis_gui
from HSTB.kluster import __file__ as klusterdir
from HSTB.kluster.gdal_helpers import gdal_raster_create, VectorLayer, gdal_output_file_exists, ogr_output_file_exists
from HSTB.kluster import kluster_variables
acceptedlayernames = ['hillshade', 'depth', 'density', 'vertical_uncertainty', 'horizontal_uncertainty']
invert_colormap_layernames = ['vertical_uncertainty', 'horizontal_uncertainty']
class DistanceTool(qgis_gui.QgsMapTool):
"""
Render a green line and give distance from start to end point using the WGS84 ellipsoid curvature. Each click
resets the map tool. Distance is given in meters (if the tool finds a different unit is being provided, it raises
an exception as I think that might be indicative of an issue with the ellipsoid set.
"""
def __init__(self, canvas):
self.canvas = canvas
qgis_gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis_gui.QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(QtCore.Qt.darkGreen)
self.rubberBand.setFillColor(QtCore.Qt.transparent)
self.rubberBand.setWidth(4)
self.start_point = None
self.end_point = None
self.reset()
def reset(self):
"""
Clear the line
"""
self.start_point = None
self.end_point = None
self.rubberBand.reset(qgis_core.QgsWkbTypes.LineGeometry)
def canvasPressEvent(self, e):
"""
Start a new line
"""
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.showLine(self.start_point, self.end_point)
def canvasReleaseEvent(self, e):
"""
Finish the line on releasing the mouse. If the start and end point are the same, it just resets. Otherwise
prints the distance in meters.
"""
l = self.line()
if l is not None:
distance = qgis_core.QgsDistanceArea()
distance.setEllipsoid('WGS84')
m = distance.measureLine(self.start_point, self.end_point)
units_enum = distance.lengthUnits()
if units_enum != 0:
raise ValueError('Something wrong with the distance units, got {} instead of 0=meters'.format(units_enum))
print('******************************************************')
print('Distance of {} meters'.format(round(m, 3)))
print('******************************************************')
self.start_point = None
else:
self.reset()
def canvasMoveEvent(self, e):
"""
Mouse movement resets and shows the new line where the end point is the current mouse position
"""
if self.start_point is None:
return
self.end_point = self.toMapCoordinates(e.pos())
self.showLine(self.start_point, self.end_point)
def showLine(self, start_point: qgis_core.QgsPoint, end_point: qgis_core.QgsPoint):
"""
Show the rubberband object from the provided start point to the end point.
Parameters
----------
start_point
QgsPoint for the start of the line
end_point
QgsPoint for the end of the line
"""
self.rubberBand.reset(qgis_core.QgsWkbTypes.LineGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
point1 = qgis_core.QgsPointXY(start_point.x(), start_point.y())
point2 = qgis_core.QgsPointXY(end_point.x(), end_point.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, True)
self.rubberBand.show()
def line(self):
"""
Return the linestring if the start and end points are valid
"""
if self.start_point is None or self.end_point is None:
return None
elif self.start_point.x() == self.end_point.x() or self.start_point.y() == self.end_point.y():
return None
return qgis_core.QgsLineString(self.start_point, self.end_point)
def deactivate(self):
"""
Turn the tool off, make sure to clear the rubberband as well
"""
self.reset()
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
class QueryTool(qgis_gui.QgsMapTool):
"""
Get the value for all raster layers loaded at the mouse position. We filter out vector layers and any loaded
WMS background layers. Should just get surface layers
"""
def __init__(self, parent):
self.parent = parent
qgis_gui.QgsMapTool.__init__(self, self.parent.canvas)
def canvasPressEvent(self, e):
"""
On press we print out the tooltip text to the stdout
"""
text = self._get_cursor_data(e)
print('******************************************************')
print(text)
print('******************************************************')
def canvasMoveEvent(self, e):
"""
On moving the mouse, we get the new raster information at mouse position and show a new tooltip
"""
text = self._get_cursor_data(e)
QtWidgets.QToolTip.showText(self.parent.canvas.mapToGlobal(self.parent.canvas.mouseLastXY()), text,
self.parent.canvas, QtCore.QRect(), 1000000)
def deactivate(self):
"""
Deactivate the tool
"""
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
def _get_cursor_data(self, e):
"""
Get the mouse position, transform it to the map coordinates, build the text that feeds the tooltip and the
print on mouseclick event. Only query non-WMS raster layers. WMS is background stuff, we don't care about those
values. If the raster layer is a virtual file system object (vsimem) we trim that part of the path off for display.
"""
x = e.pos().x()
y = e.pos().y()
point = self.parent.canvas.getCoordinateTransform().toMapCoordinates(x, y)
text = 'Latitude: {}, Longitude: {}'.format(round(point.y(), 7), round(point.x(), 7))
for name, layer in self.parent.project.mapLayers().items():
if layer.type() == qgis_core.QgsMapLayerType.RasterLayer:
# if 'hillshade' in layer.name():
# continue
if layer.dataProvider().name() != 'wms':
if layer.name() in self.parent.layer_manager.shown_layer_names:
try:
layer_point = self.parent.map_point_to_layer_point(layer, point)
ident = layer.dataProvider().identify(layer_point, qgis_core.QgsRaster.IdentifyFormatValue)
if ident:
lname = layer.name()
if lname[0:8] == '/vsimem/':
lname = lname[8:]
bands_under_cursor = ident.results()
band_exists = False
for ky, val in bands_under_cursor.items():
band_name, band_value = layer.bandName(ky), round(val, 3)
if not band_exists and band_name:
text += '\n\n{}'.format(lname)
text += '\n{}: {}'.format(band_name, band_value)
except: # point is outside of the transform
pass
return text
class SelectTool(qgis_gui.QgsMapToolEmitPoint):
"""
Allow the user to drag select a box and this tool will emit the corner coordinates using the select Signal. We use
this in Kluster to select lines.
"""
# minlat, maxlat, minlon, maxlon in Map coordinates (WGS84 for Kluster)
select = Signal(float, float, float, float)
def __init__(self, canvas):
self.canvas = canvas
qgis_gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis_gui.QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(QtCore.Qt.transparent)
self.rubberBand.setFillColor(QtGui.QColor(0, 0, 255, 50))
self.start_point = None
self.end_point = None
self.reset()
def reset(self):
"""
Clear the rubberband obj and points
"""
self.start_point = None
self.end_point = None
self.isEmittingPoint = False
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
def canvasPressEvent(self, e):
"""
Set the start position of the rectangle on click
"""
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.isEmittingPoint = True
self.showRect(self.start_point, self.end_point)
def canvasReleaseEvent(self, e):
"""
Finish the rectangle and emit the corner coordinates in map coordinate system
"""
self.isEmittingPoint = False
r = self.rectangle()
if r is not None:
self.select.emit(r.yMinimum(), r.yMaximum(), r.xMinimum(), r.xMaximum())
self.reset()
def canvasMoveEvent(self, e):
"""
On move update the rectangle
"""
if not self.isEmittingPoint:
return
self.end_point = self.toMapCoordinates(e.pos())
self.showRect(self.start_point, self.end_point)
def showRect(self, start_point: qgis_core.QgsPoint, end_point: qgis_core.QgsPoint):
"""
Show the rubberband object from the provided start point to the end point. Clear out any existing rect.
Parameters
----------
start_point
QgsPoint for the start of the rect
end_point
QgsPoint for the end of the rect
"""
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
point1 = qgis_core.QgsPointXY(start_point.x(), start_point.y())
point2 = qgis_core.QgsPointXY(start_point.x(), end_point.y())
point3 = qgis_core.QgsPointXY(end_point.x(), end_point.y())
point4 = qgis_core.QgsPointXY(end_point.x(), start_point.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, False)
self.rubberBand.addPoint(point3, False)
self.rubberBand.addPoint(point4, True) # true to update canvas
self.rubberBand.show()
def rectangle(self):
"""
Return the QgsRectangle object for the drawn start/end points
"""
if self.start_point is None or self.end_point is None:
return None
elif self.start_point.x() == self.end_point.x() or self.start_point.y() == self.end_point.y():
return None
return qgis_core.QgsRectangle(self.start_point, self.end_point)
def deactivate(self):
"""
Turn off the tool
"""
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
class RectangleMapTool(qgis_gui.QgsMapToolEmitPoint):
"""
Draw a persistent black rectangle on the screen and emit the coordinates for the rect in map coordinate system.
"""
# minlat, maxlat, minlon, maxlon in Map coordinates (WGS84 for Kluster)
select = Signal(object, float)
clear_box = Signal(bool)
def __init__(self, canvas, show_direction: bool = True):
self.base_color = QtCore.Qt.black
self.canvas = canvas
qgis_gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis_gui.QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(self.base_color)
self.rubberBand.setFillColor(QtCore.Qt.transparent)
self.rubberBand.setWidth(3)
if show_direction:
self.direction_arrow = qgis_gui.QgsRubberBand(self.canvas, False)
self.direction_arrow.setColor(self.base_color)
self.direction_arrow.setWidth(4)
else:
self.direction_arrow = None
self.isEmittingPoint = False
self.enable_rotation = False
self.start_point = None
self.end_point = None
self.final_start_point = None
self.final_end_point = None
self.start_azimuth = 0
self.azimuth = 0
self.first_click = True
self.second_click = False
self.reset()
def reset(self):
"""
Clear the rectangle
"""
self.rubberBand.setColor(self.base_color)
self.rubberBand.setFillColor(QtCore.Qt.transparent)
if self.direction_arrow:
self.direction_arrow.setColor(self.base_color)
self.start_point = None
self.end_point = None
self.final_start_point = None
self.final_end_point = None
self.azimuth = 0
self.start_azimuth = 0
self.first_click = False
self.second_click = False
self.isEmittingPoint = False
self.enable_rotation = False
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
if self.direction_arrow:
self.direction_arrow.reset(qgis_core.QgsWkbTypes.LineGeometry)
self.clear_box.emit(True)
def keyPressEvent(self, e):
ctrl_pressed = e.key() == 16777249
if ctrl_pressed:
self.enable_rotation = True
def keyReleaseEvent(self, e):
ctrl_released = e.key() == 16777249
if ctrl_released:
self.enable_rotation = False
def return_azimuth(self, start_x, start_y, end_x, end_y):
"""
build a new azimuth in radians from the given start/end points
"""
centerx = end_x - start_x
centery = end_y - start_y
az = np.arctan2(centerx, centery)
return az
def canvasPressEvent(self, e):
"""
Lay down the start point of the rectangle and reset the end point to the start point.
"""
left_click = e.button() == 1
right_click = e.button() == 2
if left_click: # first click sets the origin of the rectangle
if not self.first_click and not self.second_click:
self.reset()
self.first_click = True
self.second_click = False
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.isEmittingPoint = True
self.showRect(self.start_point, self.end_point)
elif self.first_click: # second click sets the end point and fixes the rectangle in place
self.final_start_point = self.toCanvasCoordinates(self.start_point)
self.final_end_point = e.pos()
self.first_click = False
self.second_click = True
self.isEmittingPoint = False
elif self.second_click: # third click loads
self.first_click = False
self.second_click = False
self.rubberBand.setColor(QtCore.Qt.darkYellow)
self.rubberBand.setFillColor(QtCore.Qt.transparent)
self.rubberBand.update()
if self.direction_arrow:
self.direction_arrow.setColor(QtCore.Qt.darkYellow)
self.direction_arrow.update()
poly, az = self.rectangle()
if poly is not None:
self.select.emit(poly, az)
if right_click: # clear the rectangle
self.reset()
def canvasMoveEvent(self, e):
"""
On moving the mouse cursor, the rectangle continuously updates
"""
if (not self.isEmittingPoint and not self.enable_rotation) or (not self.first_click and not self.second_click):
return
self.end_point = self.toMapCoordinates(e.pos())
self.showRect(self.start_point, self.end_point)
e.accept()
def showRect(self, start_point: qgis_core.QgsPoint, end_point: qgis_core.QgsPoint):
"""
Show the rubberband object from the provided start point to the end point. Clear out any existing rect.
Parameters
----------
start_point
QgsPoint for the start of the rect
end_point
QgsPoint for the end of the rect
"""
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
point1 = qgis_core.QgsPointXY(start_point.x(), start_point.y())
point2 = qgis_core.QgsPointXY(start_point.x(), end_point.y())
point3 = qgis_core.QgsPointXY(end_point.x(), end_point.y())
point4 = qgis_core.QgsPointXY(end_point.x(), start_point.y())
if self.enable_rotation and self.second_click:
point1 = qgis_core.QgsPointXY(self.final_start_point.x(), self.final_start_point.y())
point2 = qgis_core.QgsPointXY(self.final_start_point.x(), self.final_end_point.y())
point3 = qgis_core.QgsPointXY(self.final_end_point.x(), self.final_end_point.y())
point4 = qgis_core.QgsPointXY(self.final_end_point.x(), self.final_start_point.y())
center_pixel = qgis_core.QgsPointXY(((point3.x() - point1.x()) / 2) + point1.x(),
((point3.y() - point1.y()) / 2) + point1.y())
arryposition = point1.y() + (point2.y() - point1.y()) / 2
arrpoint1 = qgis_core.QgsPointXY(int(point2.x()), int(arryposition))
arrpoint2 = qgis_core.QgsPointXY(int(point2.x() - 15), int(arryposition))
arrpoint3 = qgis_core.QgsPointXY(int(point2.x() - 10), int(arryposition - 5))
arrpoint4 = qgis_core.QgsPointXY(int(point2.x() - 10), int(arryposition + 5))
arrpoint5 = qgis_core.QgsPointXY(int(point2.x() - 15), int(arryposition))
az = self.return_azimuth(start_point.x(), start_point.y(), end_point.x(), end_point.y())
if not self.start_azimuth:
self.start_azimuth = az
self.azimuth = az - self.start_azimuth
cos_az =
|
np.cos(self.azimuth)
|
numpy.cos
|
import copy
from math import atan2, cos, hypot, radians, sin
import numpy
class Vector(object):
"""
Vector data for a :class:`FlowSolution`, also the base for
:class:`GridCoordinates`.
In Cartesian coordinates, array indexing order is x,y,z;
so an 'i-face' is a y,z surface.
In cylindrical coordinates, array indexing order is z,r,t;
so an 'i-face' is an r,t surface.
"""
def __init__(self):
self.x = None
self.y = None
self.z = None
self.r = None
self.t = None
self._ghosts = (0, 0, 0, 0, 0, 0)
def _get_ghosts(self):
return self._ghosts
def _set_ghosts(self, ghosts):
if len(ghosts) < 2*len(self.shape):
raise ValueError('ghosts must be a %d-element array'
% (2*len(self.shape)))
for i in ghosts:
if i < 0:
raise ValueError('All ghost values must be >= 0')
self._ghosts = ghosts
ghosts = property(_get_ghosts, _set_ghosts,
doc='Number of ghost/rind planes for each index direction.')
@property
def shape(self):
""" Data index limits, not including 'ghost/rind' planes. """
ijk = self.real_shape
if len(ijk) < 1:
return ()
ghosts = self._ghosts
imax = ijk[0] - (ghosts[0] + ghosts[1])
if len(ijk) < 2:
return (imax,)
jmax = ijk[1] - (ghosts[2] + ghosts[3])
if len(ijk) < 3:
return (imax, jmax)
kmax = ijk[2] - (ghosts[4] + ghosts[5])
return (imax, jmax, kmax)
@property
def real_shape(self):
""" Data index limits, including any 'ghost/rind' planes. """
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
return arr.shape
return ()
def is_equivalent(self, other, name, logger, tolerance=0.):
"""
Test if self and `other` are equivalent.
other: :class:`Vector`
The vector to check against.
name: string
Name of this vector, used for reporting differences.
logger: :class:`Logger` or None
Used to log debug messages that will indicate what if anything is
not equivalent.
tolerance: float
The maximum relative difference in array values to be considered
equivalent.
"""
if not isinstance(other, Vector):
logger.debug('other is not a Vector object.')
return False
for component in ('x', 'y', 'z', 'r', 't'):
if not self._check_equivalent(other, name, component, logger,
tolerance):
return False
if other.ghosts != self.ghosts:
logger.debug('ghost cell counts are not equal: %s vs. %s.',
other.ghosts, self.ghosts)
return False
return True
def _check_equivalent(self, other, name, component, logger, tolerance):
""" Check equivalence to a component array. """
arr = getattr(self, component)
other_arr = getattr(other, component)
if arr is None:
if other_arr is not None:
logger.debug("%s has no %s component but 'other' does.", name,
component.upper())
return False
else:
if tolerance > 0.:
if not numpy.allclose(other_arr, arr, tolerance, tolerance):
logger.debug("%s %s values are not 'close'.", name,
component.upper())
return False
else:
try:
if (other_arr != arr).any():
logger.debug('%s %s values are not equal.', name,
component.upper())
return False
except Exception as exc:
logger.debug('%s %s: %r vs. %r: %s', name, component.upper(),
other_arr, arr, exc)
logger.debug('!=: %r', other_arr != arr)
return False
return True
def extract(self, imin, imax, jmin=None, jmax=None, kmin=None, kmax=None,
ghosts=None):
"""
Construct a new :class:`Vector` from data extracted from the
specified region.
imin, imax, jmin, jmax, kmin, kmax: int
Specifies the region to extract.
Negative values are relative to the size in that dimension,
so -1 refers to the last element. For 2D zones omit kmin and kmax.
For 1D zones omit jmin, jmax, kmin, and kmax.
ghosts: int[]
Numer of ghost/rind planes for the new zone.
If ``None`` the existing specification is used.
"""
ghosts = ghosts or self._ghosts
i = len(self.shape)
if i == 3:
if kmin is None or kmax is None or jmin is None or jmax is None:
raise ValueError('3D extract requires jmin, jmax, kmin, and kmax')
return self._extract_3d(imin, imax, jmin, jmax, kmin, kmax, ghosts)
elif i == 2:
if kmin is not None or kmax is not None:
raise ValueError('2D extract undefined for kmin or kmax')
if jmin is None or jmax is None:
raise ValueError('2D extract requires jmin and jmax')
return self._extract_2d(imin, imax, jmin, jmax, ghosts)
elif i == 1:
if kmin is not None or kmax is not None:
raise ValueError('1D extract undefined for jmin, jmax, kmin, or kmax')
return self._extract_1d(imin, imax, ghosts)
else:
raise RuntimeError('Vector is empty!')
def _extract_3d(self, imin, imax, jmin, jmax, kmin, kmax, new_ghosts):
""" 3D (index space) extraction. """
ghosts = self._ghosts
# Support end-relative indexing and adjust for existing ghost planes.
vec_imax, vec_jmax, vec_kmax = self.shape
if imin < 0:
imin += vec_imax
imin += ghosts[0]
if imax < 0:
imax += vec_imax
imax += ghosts[0]
if jmin < 0:
jmin += vec_jmax
jmin += ghosts[2]
if jmax < 0:
jmax += vec_jmax
jmax += ghosts[2]
if kmin < 0:
kmin += vec_kmax
kmin += ghosts[4]
if kmax < 0:
kmax += vec_kmax
kmax += ghosts[4]
# Adjust for new ghost/rind planes.
imin -= new_ghosts[0]
imax += new_ghosts[1]
jmin -= new_ghosts[2]
jmax += new_ghosts[3]
kmin -= new_ghosts[4]
kmax += new_ghosts[5]
# Check limits.
if imin < 0 or imax > vec_imax+ghosts[1] or \
jmin < 0 or jmax > vec_jmax+ghosts[3] or \
kmin < 0 or kmax > vec_kmax+ghosts[5]:
region = (imin, imax, jmin, jmax, kmin, kmax)
original = (0, vec_imax+ghosts[1], 0, vec_jmax+ghosts[3],
0, vec_kmax+ghosts[5])
raise ValueError('Extraction region %s exceeds original %s'
% (region, original))
# Extract.
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
setattr(vec, component,
arr[imin:imax+1, jmin:jmax+1, kmin:kmax+1])
return vec
def _extract_2d(self, imin, imax, jmin, jmax, new_ghosts):
""" 2D (index space) extraction. """
ghosts = self._ghosts
# Support end-relative indexing and adjust for existing ghost planes.
vec_imax, vec_jmax = self.shape
if imin < 0:
imin += vec_imax
imin += ghosts[0]
if imax < 0:
imax += vec_imax
imax += ghosts[0]
if jmin < 0:
jmin += vec_jmax
jmin += ghosts[2]
if jmax < 0:
jmax += vec_jmax
jmax += ghosts[2]
# Check limits.
if imin < 0 or imax > vec_imax+ghosts[1] or \
jmin < 0 or jmax > vec_jmax+ghosts[3]:
region = (imin, imax, jmin, jmax)
original = (0, vec_imax+ghosts[1], 0, vec_jmax+ghosts[3])
raise ValueError('Extraction region %s exceeds original %s'
% (region, original))
# Extract.
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
setattr(vec, component,
arr[imin:imax+1, jmin:jmax+1])
return vec
def _extract_1d(self, imin, imax, new_ghosts):
""" 1D (index space) extraction. """
ghosts = self._ghosts
# Support end-relative indexing and adjust for existing ghost planes.
vec_imax, = self.shape
if imin < 0:
imin += vec_imax
imin += ghosts[0]
if imax < 0:
imax += vec_imax
imax += ghosts[0]
# Check limits.
if imin < 0 or imax > vec_imax+ghosts[1]:
region = (imin, imax)
original = (0, vec_imax+ghosts[1])
raise ValueError('Extraction region %s exceeds original %s'
% (region, original))
# Extract.
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
setattr(vec, component, arr[imin:imax+1])
return vec
def extend(self, axis, delta, npoints):
"""
Construct a new :class:`Vector` by replication.
axis: 'i', 'j', or 'k'
Index axis to extend.
delta: float.
Direction. A negative value adds points before the current
zero-index of `axis`.
npoints: int > 0
Number of points to add in `axis` dimension.
"""
if not delta:
raise ValueError('delta must be non-zero')
if npoints < 1:
raise ValueError('npoints must be >= 1')
i = len(self.shape)
if i == 3:
if axis not in ('i', 'j', 'k'):
raise ValueError('axis must be i, j, or k')
return self._extend_3d(axis, delta, npoints)
elif i == 2:
if axis not in ('i', 'j'):
raise ValueError('axis must be i or j')
return self._extend_2d(axis, delta, npoints)
elif i == 1:
if axis != 'i':
raise ValueError('axis must be i')
return self._extend_1d(delta, npoints)
else:
raise RuntimeError('Vector is empty!')
def _extend_3d(self, axis, delta, npoints):
""" 3D (index space) extension. """
imax, jmax, kmax = self.real_shape
if axis == 'i':
new_shape = (imax + npoints, jmax, kmax)
indx = imax if delta > 0 else npoints
elif axis == 'j':
new_shape = (imax, jmax + npoints, kmax)
indx = jmax if delta > 0 else npoints
else:
new_shape = (imax, jmax, kmax + npoints)
indx = kmax if delta > 0 else npoints
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
new_arr = numpy.zeros(new_shape)
if axis == 'i':
if delta > 0:
new_arr[0:indx, :, :] = arr
for i in range(npoints):
new_arr[indx+i, :, :] = arr[-1, :, :]
else:
new_arr[indx:, :, :] = arr
for i in range(npoints):
new_arr[i, :, :] = arr[0, :, :]
elif axis == 'j':
if delta > 0:
new_arr[:, 0:indx, :] = arr
for j in range(npoints):
new_arr[:, indx+j, :] = arr[:, -1, :]
else:
new_arr[:, indx:, :] = arr
for j in range(npoints):
new_arr[:, j, :] = arr[:, 0, :]
else:
if delta > 0:
new_arr[:, :, 0:indx] = arr
for k in range(npoints):
new_arr[:, :, indx+k] = arr[:, :, -1]
else:
new_arr[:, :, indx:] = arr
for k in range(npoints):
new_arr[:, :, k] = arr[:, :, 0]
setattr(vec, component, new_arr)
vec.ghosts = copy.copy(self._ghosts)
return vec
def _extend_2d(self, axis, delta, npoints):
""" 2D (index space) extension. """
imax, jmax = self.real_shape
if axis == 'i':
new_shape = (imax + npoints, jmax)
indx = imax if delta > 0 else npoints
else:
new_shape = (imax, jmax + npoints)
indx = jmax if delta > 0 else npoints
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
new_arr = numpy.zeros(new_shape)
if axis == 'i':
if delta > 0:
new_arr[0:indx, :] = arr
for i in range(npoints):
new_arr[indx+i, :] = arr[-1, :]
else:
new_arr[indx:, :] = arr
for i in range(npoints):
new_arr[i, :] = arr[0, :]
else:
if delta > 0:
new_arr[:, 0:indx] = arr
for j in range(npoints):
new_arr[:, indx+j] = arr[:, -1]
else:
new_arr[:, indx:] = arr
for j in range(npoints):
new_arr[:, j] = arr[:, 0]
setattr(vec, component, new_arr)
vec.ghosts = copy.copy(self._ghosts)
return vec
def _extend_1d(self, delta, npoints):
""" 1D (index space) extension. """
imax, = self.real_shape
new_shape = (imax + npoints,)
indx = imax if delta > 0 else npoints
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
new_arr = numpy.zeros(new_shape)
if delta > 0:
new_arr[0:indx] = arr
for i in range(npoints):
new_arr[indx+i] = arr[-1]
else:
new_arr[indx:] = arr
for i in range(npoints):
new_arr[i] = arr[0]
setattr(vec, component, new_arr)
vec.ghosts = copy.copy(self._ghosts)
return vec
def flip_z(self):
""" Convert to other-handed coordinate system. """
if self.z is None:
raise AttributeError('flip_z: no Z component')
self.z *= -1.
def make_cartesian(self, grid, axis='z'):
"""
Convert to Cartesian coordinate system.
grid: :class:`GridCoordinates`
Must be in cylindrical form.
axis: string
Specifies which is the cylinder axis ('z' or 'x').
"""
if grid.shape != self.shape:
raise NotImplementedError('make_cartesian: grid shape mismatch'
' not supported')
gt_flat = grid.t.flat
r_flat = self.r.flat
t_flat = self.t.flat
if axis == 'z' or self.z is None:
self.x = self.r.copy()
self.y = self.r.copy()
x_flat = self.x.flat
y_flat = self.y.flat
for i in range(self.r.size):
gt = gt_flat[i]
sine = sin(gt)
cosine = cos(gt)
r = r_flat[i]
t = t_flat[i]
x_flat[i] = r*cosine - t*sine
y_flat[i] = r*sine + t*cosine
self.r = None
self.t = None
elif axis == 'x':
self.x = self.z
self.y = self.r.copy()
self.z = self.r.copy()
y_flat = self.y.flat
z_flat = self.z.flat
for i in range(self.r.size):
gt = gt_flat[i]
sine = sin(gt)
cosine = cos(gt)
r = r_flat[i]
t = t_flat[i]
y_flat[i] = r*cosine - t*sine
z_flat[i] = r*sine + t*cosine
self.r = None
self.t = None
else:
raise ValueError("axis must be 'z' or 'x'")
def make_cylindrical(self, grid, axis='z'):
"""
Convert to cylindrical coordinate system.
grid: :class:`GridCoordinates`
Must be in cylindrical form.
axis: string
Specifies which is the cylinder axis ('z' or 'x').
"""
if grid.shape != self.shape:
raise NotImplementedError('make_cylindrical: grid shape mismatch'
' not supported')
gt_flat = grid.t.flat
self.r = self.x.copy()
self.t = self.x.copy()
r_flat = self.r.flat
t_flat = self.t.flat
if axis == 'z' or self.z is None:
x_flat = self.x.flat
y_flat = self.y.flat
for i in range(self.x.size):
gt = gt_flat[i]
x = x_flat[i]
y = y_flat[i]
magnitude = hypot(x, y)
rel_theta = atan2(y, x) - gt
r_flat[i] = magnitude * cos(rel_theta)
t_flat[i] = magnitude * sin(rel_theta)
self.x = None
self.y = None
elif axis == 'x':
y_flat = self.y.flat
z_flat = self.z.flat
for i in range(self.y.size):
gt = gt_flat[i]
y = y_flat[i]
z = z_flat[i]
magnitude = hypot(y, z)
rel_theta = atan2(z, y) - gt
r_flat[i] = magnitude * cos(rel_theta)
t_flat[i] = magnitude * sin(rel_theta)
self.z = self.x
self.x = None
self.y = None
else:
raise ValueError("axis must be 'z' or 'x'")
def rotate_about_x(self, deg):
"""
Rotate about the X axis.
deg: float (degrees)
Amount of rotation.
"""
if self.y is None:
raise AttributeError('rotate_about_x: no Y component')
if self.z is None:
raise AttributeError('rotate_about_x: no Z component')
sine = sin(radians(deg))
cosine = cos(radians(deg))
y_new = self.y*cosine - self.z*sine
self.z = self.z*cosine + self.y*sine
self.y = y_new
def rotate_about_y(self, deg):
"""
Rotate about the Y axis.
deg: float (degrees)
Amount of rotation.
"""
if self.x is None:
raise AttributeError('rotate_about_y: no X component')
if self.z is None:
raise AttributeError('rotate_about_y: no Z component')
sine = sin(radians(deg))
cosine = cos(radians(deg))
x_new = self.x*cosine - self.z*sine
self.z = self.z*cosine + self.x*sine
self.x = x_new
def rotate_about_z(self, deg):
"""
Rotate about the Z axis.
deg: float (degrees)
Amount of rotation.
"""
if self.x is None:
raise AttributeError('rotate_about_z: no X component')
if self.y is None:
raise AttributeError('rotate_about_z: no Y component')
sine = sin(radians(deg))
cosine = cos(radians(deg))
x_new = self.x*cosine - self.y*sine
self.y = self.y*cosine + self.x*sine
self.x = x_new
def promote(self):
""" Promote from N-dimensional to N+1 dimensional index space. """
shape = self.real_shape
if len(shape) > 2:
raise RuntimeError('Vector is 3D')
elif len(shape) > 1:
imax, jmax = shape
if self.x is not None: # x,y -> x,y,z
new_arr = numpy.zeros((imax, jmax, 1))
new_arr[:, :, 0] = self.x[:, :]
self.x = new_arr
new_arr = numpy.zeros((imax, jmax, 1))
new_arr[:, :, 0] = self.y[:, :]
self.y = new_arr
if self.z is not None:
new_arr = numpy.zeros((1, imax, jmax))
new_arr[:, :, 0] = self.z[:, :]
self.z = new_arr
else:
self.z = numpy.zeros((imax, jmax, 1))
else: # r,t -> z,r,t (note index order change!)
new_arr = numpy.zeros((1, imax, jmax))
new_arr[0, :, :] = self.r[:, :]
self.r = new_arr
new_arr = numpy.zeros((1, imax, jmax))
new_arr[0, :, :] = self.t[:, :]
self.t = new_arr
if self.z is not None:
new_arr = numpy.zeros((1, imax, jmax))
new_arr[0, :, :] = self.z[:, :]
self.z = new_arr
else:
self.z = numpy.zeros((1, imax, jmax))
elif len(shape) > 0:
imax = shape[0]
if self.x is not None: # x -> x,y[,z]
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.x[:]
self.x = new_arr
if self.y is not None:
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.y[:]
self.y = new_arr
if self.z is not None:
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.z[:]
self.z = new_arr
else:
self.y = numpy.zeros((imax, 1))
else: # r,t -> r,t[,z]
new_arr =
|
numpy.zeros((imax, 1))
|
numpy.zeros
|
import numpy as np
import scipy.special as special
def free_energy(m, m0, L, L0, L_inv, s, s0, c, c0, k, J):
f_new = -0.5 * ((m - m0).T @ L0 @ (m - m0) +
|
np.trace(L_inv @ L0)
|
numpy.trace
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch
from matplotlib.patches import Polygon, Rectangle
import matplotlib.colors as colors
from matplotlib import cm
from matplotlib import rc
__author__ = 'ernesto'
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preview'] = True
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
#####################################
# PARAMETROS - Puede ser modificado #
#####################################
# percentil
gamma = 0.9
# número de muestras
n = 5
# rangos de interés
# media muestral
xmin = -1
xmax = 1
# lambda
lmin = -1
lmax = 1
#####################
# FIN DE PARAMETROS #
#####################
ns = 400
x = np.linspace(xmin, xmax, ns)
lam = np.linspace(lmin, lmax, ns)
xx, ll = np.meshgrid(x, lam)
quad1 = (ll - xx) ** 2
delta = 1 - gamma
z_u = norm.ppf(1 - delta / 2, loc=0, scale=1)
plane = (z_u ** 2) * ll / n
phi = np.pi/4
xm = xx * np.cos(phi) + ll * np.sin(phi)
ym = -xx *
|
np.sin(phi)
|
numpy.sin
|
"""
Agricultural Semantic Segentation Challenge Dataset Interface
Original author: <NAME> (<EMAIL>)
Updated by <NAME> (April 2020)
"""
import copy
import logging
import os
import pprint
import shutil
import typing
import h5py
import numpy as np
import torch.utils
import torch.utils.data
import tqdm
import thelper.data
import thelper.tasks
import thelper.utils
from thelper.data.parsers import Dataset
logger = logging.getLogger(__name__)
class_names = [
"background", # optional, depending on task
"cloud_shadow",
"double_plant",
"planter_skip",
"standing_water",
"waterway",
"weed_cluster",
]
approx_weight_map = {
"background": 0.7754729398810614,
"cloud_shadow": 0.02987549383646342,
"double_plant": 0.006768273283806349,
"planter_skip": 0.0016827190442308664,
"standing_water": 0.015964306228958156,
"waterway": 0.012930148362618188,
"weed_cluster": 0.1573061193628617
}
dontcare = 255
class Hdf5AgricultureDataset(Dataset):
def __init__(
self,
hdf5_path: typing.AnyStr,
group_name: typing.AnyStr,
transforms: typing.Any = None,
use_global_normalization: bool = True,
keep_file_open: bool = False,
load_meta_keys: bool = False,
copy_to_slurm_tmpdir: bool = False,
):
super().__init__(transforms, deepcopy=False)
if copy_to_slurm_tmpdir:
assert os.path.isfile(hdf5_path), f"invalid input hdf5 path: {hdf5_path}"
slurm_tmpdir = thelper.utils.get_slurm_tmpdir()
assert slurm_tmpdir is not None, "undefined SLURM_TMPDIR env variable"
dest_hdf5_path = os.path.join(slurm_tmpdir, "agrivis.hdf5")
if not os.path.isfile(dest_hdf5_path):
shutil.copyfile(hdf5_path, dest_hdf5_path)
hdf5_path = dest_hdf5_path
logger.info(f"reading AgriVis challenge {group_name} data from: {hdf5_path}")
self.hdf5_path = hdf5_path
self.group_name = group_name
self.load_meta_keys = load_meta_keys
with h5py.File(self.hdf5_path, "r") as archive:
assert group_name in archive, \
"unexpected dataset name (should be train/val/test)"
dataset = archive[group_name]
expected_keys = ["boundaries", "features", "keys"]
if group_name != "test":
expected_keys += ["labels", "n_labelled_pixels"]
assert all([k in dataset.keys() for k in expected_keys]), \
"missing at least one of the expected dataset group keys"
assert all([len(dataset[k]) == len(dataset["keys"]) for k in expected_keys]), \
"dataset sample count mismatch across all subgroups"
if group_name != "test":
assert dataset["labels"].shape[-1] == len(class_names) - 1, \
"unexpected dataset label map count while accounting for background"
meta_iter = zip(dataset["keys"], dataset["n_labelled_pixels"])
else:
meta_iter = zip(dataset["keys"], [None] * len(dataset["keys"]))
self.samples = [{ # list pre-fill
"image": None,
"label_map": None,
"key": key,
"mask": None,
"pxcounts": pxcounts,
} for key, pxcounts in meta_iter]
logger.info(f"loaded metadata for {len(self.samples)} patches")
self.task = thelper.tasks.Segmentation(
class_names=class_names, input_key="image", label_map_key="label_map",
meta_keys=["key", "mask", "pxcounts"], dontcare=dontcare,
)
self.use_global_normalization = use_global_normalization
self.image_mean = np.asarray([
121.6028380635106,
118.52572985557143,
116.36513065674848,
108.47336023815292,
], dtype=np.float32)
self.image_stddev = np.asarray([
41.47667301013803,
41.782106439616534,
45.04215840534553,
44.53299631408866,
], dtype=np.float32)
self.hdf5_handle = h5py.File(self.hdf5_path, "r") if keep_file_open else None
# self.squished = 0
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self._getitems(idx)
assert idx < len(self.samples), "sample index is out-of-range"
if idx < 0:
idx = len(self.samples) + idx
label_map = None
if self.hdf5_handle is not None:
image = self.hdf5_handle[self.group_name]["features"][idx]
mask = self.hdf5_handle[self.group_name]["boundaries"][idx]
if self.group_name != "test":
label_map = self.hdf5_handle[self.group_name]["labels"][idx]
else:
with h5py.File(self.hdf5_path, mode="r") as archive:
image = archive[self.group_name]["features"][idx]
mask = archive[self.group_name]["boundaries"][idx]
if self.group_name != "test":
label_map = archive[self.group_name]["labels"][idx]
if self.use_global_normalization:
image = (image.astype(np.float32) - self.image_mean) / self.image_stddev
mask = mask.astype(np.int16)
if label_map is not None:
# note: we might squish some overlapping labels, but these are very rare... (<0.07%)
out_label_map = np.zeros((image.shape[0], image.shape[1]), dtype=np.int16)
for label_idx in range(1, len(class_names)):
orig_label_map_idx = label_idx - 1
curr_label_map = label_map[..., orig_label_map_idx]
# overlap = np.logical_and(out_label_map != 0, curr_label_map)
# self.squished += np.count_nonzero(overlap)
out_label_map = np.where(curr_label_map, np.int16(label_idx), out_label_map)
label_map = out_label_map
label_map = np.where(mask, label_map,
|
np.int16(dontcare)
|
numpy.int16
|
# MIT License
#
# Copyright (c) 2017 Laboratory for Computational and Statistical Learning
#
# authors: <NAME>, <NAME>
# email: <EMAIL>
# Website: http://lcsl.mit.edu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from collections import namedtuple
from .utils import check_random_state, stable_invert_root, get_progress_bar, evaluate_L_diagonal
CentersDictionary = namedtuple('CentersDictionary',
('idx', 'X', 'probs', 'lam', 'rls_oversample'))
def estimate_rls_bless(D, X, eval_L, lam_new):
"""Given a previously computed (eps, lambda)-accurate dictionary, it computes estimates
of all RLS using the estimator from :cite:`CaLaVa17`
:param CentersDictionary D: an (eps, lambda) accurate dictionary, see :ref:`bless`
:param array_like X: samples whose RLS we must approximate
:param callable eval_L: likelihood function
:param float lam_new: lambda regularization to use for the RLSs
:return: array of estimated RLS
:rtype:
array_like
"""
diag_norm = evaluate_L_diagonal(eval_L, X)
# (m x n) kernel matrix between samples in dictionary and dataset X
K_DU = eval_L(D.X, X)
# the estimator proposed in Calandriello et al. 2017 is
# diag(XX' - XX'S(SX'XS + lam*I)^(-1)SXX')/lam
# here for efficiency we collect an S inside the inverse and compute
# diag(XX' - XX'(X'X + lam*S^(-2))^(-1)XX')/lam
# note that in the second term we take care of dropping the rows/columns of X associated
# with 0 entries in S
U_DD, S_DD, _ = np.linalg.svd(eval_L(D.X, D.X) + lam_new * np.diag(D.probs))
U_DD, S_root_inv_DD = stable_invert_root(U_DD, S_DD)
E = S_root_inv_DD * U_DD.T
# compute (X'X + lam*S^(-2))^(-1/2)XX'
X_precond = E.dot(K_DU)
# the diagonal entries of XX'(X'X + lam*S^(-2))^(-1)XX' are just the squared
# ell-2 norm of the columns of (X'X + lam*S^(-2))^(-1/2)XX'
rls_estimate = (diag_norm - np.square(X_precond, out=X_precond).sum(axis=0)) / lam_new
if np.any(rls_estimate < 0.0):
raise ValueError('Some estimated RLS is negative, this should never happen.'
' Min prob: {}'.format(np.min(rls_estimate)))
return rls_estimate
def reduce_lambda(X_data,
eval_L,
intermediate_dict_bless,
lam_new,
rng,
rls_oversample_parameter=None):
"""Given a previously computed (eps, lambda)-accurate dictionary and a lambda' < lambda parameter,
it constructs an (eps, lambda')-accurate dictionary using approximate RLS sampling.
:param array_like X_data: dataset that we must approximate
:param callable eval_L: likelihood function
:param CentersDictionary intermediate_dict_bless: an (eps, lambda) accurate dictionary, see :ref:`bless`
:param float lam_new: lambda regularization for the new dictionary
:param np.random.RandomState rng: rng for sampling
:param float rls_oversample_parameter: Oversampling parameter to increase success probability, see :ref:`bless`
:return: An (eps, lam_new)-accurate dictionary with high probability
:rtype:
CentersDictionary
"""
n, d = X_data.shape
if rls_oversample_parameter is None:
rls_oversample_parameter = intermediate_dict_bless.rls_oversample
red_ratio = intermediate_dict_bless.lam / lam_new
if red_ratio < 1.0:
raise ValueError(str(red_ratio))
diag = np.asarray(evaluate_L_diagonal(eval_L, X_data))
# compute upper confidence bound on RLS of each sample, overestimate (oversample) by a rls_oversample factor
# to boost success probability at the expenses of a larger sample (dictionary)
ucb = np.minimum(rls_oversample_parameter * diag / (diag + lam_new), 1.)
U = np.asarray(rng.rand(n)) <= ucb
u = U.sum()
if u <= 0:
raise ValueError('No point selected during uniform sampling step, try to increase rls_oversample_bless. '
'Expected number of points: {:.3f}'.format(n * ucb.mean()))
X_U = X_data[U, :]
rls_estimate = estimate_rls_bless(intermediate_dict_bless, X_U, eval_L, lam_new)
# same as before, oversample by a rls_oversample factor
probs = np.minimum(rls_oversample_parameter * rls_estimate, ucb[U])
probs_reject = probs / ucb[U]
if
|
np.any(probs < 0.0)
|
numpy.any
|
#!/usr/bin/env python
"""SAMPLER.PY - Sampler for periodic signals such as variable stars
"""
from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>'
__version__ = '20220320' # yyyymmdd
import time
import numpy as np
from dlnpyutils import utils as dln
from astropy.table import Table
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import stats
import copy
import emcee
import corner
class VariableSampler:
"""
Class for doing sampling of variable star lightcurve ddata.
Parameters
----------
catalog : table
Catalog of data points, just have mag, err, jd, band
template : table or function
Template information as a table with phase and mag columns or
function/method that takes phase array.
ampratios : dict, optional
Amplitude ratios. Keys should be the unique band names
and values should be the amplitue ratios.
If this is not input, then a ratio of 1.0 is used.
minerror : float, optional
Minimum error to use. Default is 0.02.
"""
def __init__(self,catalog,template,ampratios=None,minerror=0.02):
# Create the sampling for Period (pmin to pmax) and phase offset (0-1)
self._catalog = catalog
self.data = Table(catalog).copy()
for n in self.data.colnames:
self.data[n].name = n.lower() # change columns names to lower case
self.template = Table(template).copy()
for n in self.template.colnames:
self.template[n].name = n.lower() # change columns names to lower case
# "filter" not "band" input
if 'band' not in self.data.colnames and 'filter' in self.data.colnames:
self.data['band'] = self.data['filter']
# "mjd" not "jd" input
if 'jd' not in self.data.colnames and 'mjd' in self.data.colnames:
self.data['jd'] = self.data['mjd']
# Check that the catalog and template have the columns that we need
missingcols = []
for n in ['mag','err','jd','band']:
if n not in self.data.colnames:
missingcols.append(n)
if len(missingcols)>0:
raise ValueError('Missing catalog required columns: '+', '.join(missingcols))
missingcols = []
for n in ['phase','mag']:
if n not in self.template.colnames:
missingcols.append(n)
if len(missingcols)>0:
raise ValueError('Missing template required columns: '+', '.join(missingcols))
# Make sure the data are sorted by JD
si = np.argsort(self.data['jd'])
self.data = self.data[si]
# Add weights to internal catalog
self.data['wt'] = 1/np.maximum(self.data['err'],minerror)**2
data = self.data
# Only keep bands with 2+ observations
uband = np.unique(data['band'])
badind = np.array([],int)
for i,b in enumerate(uband):
ind, = np.where(data['band']==b)
if len(ind)<2:
print('band '+str(b)+' only has '+str(len(ind))+' observations. Not using')
badind = np.hstack((badind,ind))
if len(badind)>0:
data.remove_rows(badind)
ndata = len(data)
self.data = data # the data points that are left
self.ndata = ndata
print(str(ndata)+' data points')
print('time baseline = %.2f' % (np.max(data['jd'])-np.min(data['jd'])))
# Get band index
uband = np.unique(data['band'])
nband = len(uband)
bandindex = {}
for i,b in enumerate(uband):
ind, = np.where(data['band']==b)
bandindex[b] = ind
self.bands = uband
self.nbands = nband
self._bandindex = bandindex
print(str(len(uband))+' bands = ',', '.join(np.char.array(uband).astype(str)))
# No amplitude ratios input
if ampratios is None:
ampratios = {}
for b in uband:
ampratios[b] = 1.0
self.ampratios = ampratios
# Pre-calculate some terms that are constant
totwtdict = {}
totwtydict = {}
for b in uband:
ind = bandindex[b]
totwtdict[b] = np.sum(data['wt'][ind])
totwtydict[b] = np.sum(data['wt'][ind] * data['mag'][ind])
self._totwtdict = totwtdict
self._totwtydict = totwtydict
def solve(self,period,offset,amplitude=None):
"""
Solve for a given period and offset.
Parameters
----------
period : float or array
Period as scalar float or array.
offset : float or array
Phase offset as scalar float or array.
amplitude : float or array, optional
Amplitude. If this is not input, then the best amplitude
using linear least squares is derived.
Returns
-------
amplitude : float or array
The amplitudes.
constant : float or array
The best constant offset.
lnlikelihood : float or array
The log likelihood.
Example
-------
amp,const,lnlkhood = samp.solve(period,offset)
"""
nperiod = np.array(period).size
data = self.data
template = self.template
ampratios = self.ampratios
bandindex = self._bandindex
totwtdict = self._totwtdict
totwtydict = self._totwtydict
ndata = len(data)
# Get phase and template points
phase = (data['jd'].reshape(-1,1)/period.reshape(1,-1) + offset.reshape(1,-1)) % 1
if hasattr(template, '__call__'):
tmpl = template(phase.ravel())
else:
tmpl = np.interp(phase.ravel(),template['phase'],template['mag'])
tmpl = tmpl.reshape(ndata,nperiod)
# -- Find best fitting values for linear parameters ---
# Calculate amplitude
# term1 = Sum of XY
# term2 = Sum of X * Y / W
# term3 = Sum of X^2
# term4 = Sum of X * X / W
# amplitude = (term1 - term2)/(term3 - term4)
if amplitude is None:
term1,term2,term3,term4 = 0,0,0,0
if nperiod==1:
totwtxdict = {}
for b in bandindex.keys():
ind = bandindex[b]
totwtx1 = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b])
totwtxdict[b] = totwtx1
totwtx2 = np.sum(data['wt'][ind] * (tmpl[ind]*ampratios[b])**2)
totwtxy = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b] * data['mag'][ind])
term1 += totwtxy
term2 += totwtx1 * totwtydict[b] / totwtdict[b]
term3 += totwtx2
term4 += totwtx1**2 / totwtdict[b]
amplitude = (term1-term2)/(term3-term4)
else:
totwtxdict = {}
for b in bandindex.keys():
ind = bandindex[b]
totwtx1 = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b],axis=0)
totwtxdict[b] = totwtx1
totwtx2 = np.sum(data['wt'][ind].reshape(-1,1) * (tmpl[ind,:]*ampratios[b])**2,axis=0)
totwtxy = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b] * data['mag'][ind].reshape(-1,1),axis=0)
term1 += totwtxy
term2 += totwtx1 * totwtydict[b] / totwtdict[b]
term3 += totwtx2
term4 += totwtx1**2 / totwtdict[b]
amplitude = (term1-term2)/(term3-term4)
else:
if nperiod==1:
for b in bandindex.keys():
ind = bandindex[b]
totwtx1 = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b])
totwtxdict[b] = totwtx1
else:
totwtxdict = {}
for b in bandindex.keys():
ind = bandindex[b]
totwtx1 = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b],axis=0)
totwtxdict[b] = totwtx1
# Calculate best mean magnitudes
# mean mag = (Y - amplitude * X)/W
meanmag = {}
for b in bandindex.keys():
meanmag1 = (totwtydict[b] - amplitude * totwtxdict[b])/totwtdict[b]
meanmag[b] = meanmag1
# Calculate likelihood/chisq
if nperiod==1:
model = np.zeros(ndata,float)
resid = np.zeros(ndata,float)
wtresid = np.zeros(ndata,float)
for b in bandindex.keys():
ind = bandindex[b]
model1 = tmpl[ind]*ampratios[b]*amplitude+meanmag[b]
model[ind] = model1
resid[ind] = data['mag'][ind]-model1
wtresid[ind] = resid[ind]**2 * data['wt'][ind]
lnlkhood = -0.5*np.sum(wtresid + np.log(2*np.pi*data['err']**2))
else:
model = np.zeros((ndata,nperiod),float)
resid = np.zeros((ndata,nperiod),float)
wtresid = np.zeros((ndata,nperiod),float)
for b in bandindex.keys():
ind = bandindex[b]
model1 = tmpl[ind,:]*ampratios[b]*amplitude.reshape(1,-1)+meanmag[b].reshape(1,-1)
model[ind,:] = model1
resid[ind,:] = data['mag'][ind].reshape(-1,1)-model1
wtresid[ind,:] = resid[ind,:]**2 * data['wt'][ind].reshape(-1,1)
lnlikelihood = -0.5*np.sum(wtresid,axis=0)
lnlikelihood += -0.5*np.sum(np.log(2*np.pi*data['err']**2))
return amplitude,meanmag,lnlikelihood
def copy(self):
""" Make a copy."""
return copy.deepcopy(self)
def run(self,pmin=0.1,pmax=None,offsetrange=None,minsample=128,npoints=200000,
unirefine=True,keepnegamp=False,verbose=True):
"""
Run the sampler.
Parameters
----------
pmin : float, optional
Minimum period to search in days. Default is 0.1 days.
pmax : float, optional
Maximum period to search in days. Default is 2 x time baseline.
offsetrange : list, optional
Two-element range of phase offset values to explore. Default is [0,1].
minsample : int, optional
Mininum number of samples to return. Default is 128.
npoints : int, optional
Number of points to use per loop. Default is 200,000.
unirefine : boolean, optional
If a unimodal posterior distribution function, do a finer search
around the unimodal region. Default is True.
keepnegamp : boolean, optional
Keep negative amplitudes. Default is False.
verbose : boolean, optional
Print useful information to the screen. Default is True.
Returns
-------
samples : astropy table
The Monte Carlo samples that passed rejection sampling.
period, offset, amplitude, lnlikelihood, lnprob, meanmaxBAND.
trials: astropy table
All of the trials period and phase offset positions tried.
period, offset, amplitude, lnlikelihood, lnprob, meanmaxBAND.
best : dictionary
Dictionary of best values (in ln probability) across all of
the trials: period, offset, amplitude, meanmag, lnprob.
Example
-------
samples,trials,best = vs.run()
"""
data = self.data
ndata = self.ndata
template = self.template
uband = self.bands
nband = self.nbands
ampratios = self.ampratios
bandindex = self._bandindex
totwtdict = self._totwtdict
totwtydict = self._totwtydict
self.bestperiod = None
self.bestoffset = None
self.bestamplitude = None
self.bestmeanmag = None
self.bestlnprob = None
self.samples = None
self.trials = None
# Period range
if pmax is None:
pmax = (np.max(data['jd'])-np.min(data['jd']))*2
lgminp = np.log10(pmin)
lgmaxp = np.log10(pmax)
if verbose:
print('Pmin = %.3f' % pmin)
print('Pmax = %.3f' % pmax)
self._pmin = pmin
self._pmax = pmax
# Phase offset range
if offsetrange is not None:
offsetmin = offsetrange[0]
offsetmax = offsetrange[1]
else:
offsetmin = 0
offsetmax = 1
if offsetmin<0 or offsetmax>1:
raise ValueError('Phase offset range must be within 0 to 1')
if verbose:
print('Phase offset min = %.3f' % offsetmin)
print('Phase offset max = %.3f' % offsetmax)
# Loop until we have enough samples
nsamples = 0
samplelist = []
count = 0
dtt = [('period',float),('offset',float),('amplitude',float),('lnlikelihood',float),('lnprob',float)]
for b in uband:
dtt += [('mag'+str(b),float)]
trials = None
while (nsamples<minsample):
# Uniformly sample from log(pmin) to log(pmax)
period = np.random.rand(npoints)*(lgmaxp-lgminp)+lgminp
period = 10**period
# Uniformly sample from offsetmin to offsetmax
offset = np.random.rand(npoints)*(offsetmax-offsetmin)+offsetmin
# Solve for amplitude, constant and lnlikelihood
amplitude,meanmag,lnlikelihood = self.solve(period,offset)
# Calculate ln probability = ln prior + ln likelihood
# use flat prior, divide by area
lnprior = np.ones(npoints,float) + np.log(1/((lgmaxp-lgminp)*(offsetmax-offsetmin)))
lnprob = lnprior + lnlikelihood
# Save the information
trials1 = np.zeros(npoints,dtype=dtt)
trials1['period'] = period
trials1['offset'] = offset
trials1['amplitude'] = amplitude
for k in meanmag.keys():
trials1['mag'+str(k)] = meanmag[k]
trials1['lnlikelihood'] = lnlikelihood
trials1['lnprob'] = lnprob
if trials is None:
trials = trials1
else:
trials = np.hstack((trials,trials1))
# REJECT NEGATIVE AMPLITUDES??
# Rejection sampling
draw = np.random.rand(npoints)
if keepnegamp is False:
ind, = np.where((draw < np.exp(lnprob)) & (amplitude > 0))
else:
ind, = np.where(draw < np.exp(lnprob))
if len(ind)>0:
for i in ind:
samp = {'period':period[i],'offset':offset[i],'amplitude':amplitude[i]}
for k in meanmag.keys():
samp[k] = meanmag[k][i]
samp['lnlikelihood'] = lnlikelihood[i]
samp['lnprob'] = lnprob[i]
samplelist.append(samp)
nsamples += len(ind)
if verbose:
print(count+1,nsamples)
count += 1
# Convert sample list to table
dt = [('period',float),('offset',float),('amplitude',float)]
for k in meanmag.keys():
dt += [('mag'+str(k),float)]
dt += [('lnlikelihood',float),('lnprob',float)]
samples = np.zeros(len(samplelist),dtype=dt)
for i,samp in enumerate(samplelist):
samples['period'][i] = samp['period']
samples['offset'][i] = samp['offset']
samples['amplitude'][i] = samp['amplitude']
samples['lnlikelihood'][i] = samp['lnlikelihood']
samples['lnprob'][i] = samp['lnprob']
for k in meanmag.keys():
samples['mag'+str(k)][i] = samp[k]
# Convert to astropy tables
samples = Table(samples)
trials = Table(trials)
self.samples = samples
self.trials = trials
if keepnegamp is False:
posind, = np.where(trials['amplitude']>0)
best1 = np.argmax(trials['lnprob'][posind])
best = posind[best1]
else:
best = np.argmax(trials['lnprob'])
bestperiod = trials['period'][best]
bestoffset = trials['offset'][best]
bestlnprob = trials['lnprob'][best]
bestamplitude = trials['amplitude'][best]
bestmeanmag = {}
for b in uband:
bestmeanmag[b] = trials['mag'+str(b)][best]
if verbose:
print('Best period = %.4f' % bestperiod)
print('Best offset = %.4f' % bestoffset)
print('Best amplitude = %.4f' % bestamplitude)
print('Best lnprob = %.4f' % bestlnprob)
self.bestperiod = bestperiod
self.bestoffset = bestoffset
self.bestamplitude = bestamplitude
self.bestmeanmag = bestmeanmag
self.bestlnprob = bestlnprob
ntrials = npoints*count
if verbose:
print('ntrials = ',ntrials)
# If unimodal, run higher sampling
medperiod = np.median(samples['period'])
delta = (4*medperiod**2)/(2*np.pi*(np.max(data['jd'])-np.min(data['jd'])))
deltap = medperiod**2/(2*np.pi*(np.max(data['jd'])-np.min(data['jd'])))
rmsperiod = np.sqrt(np.mean((samples['period']-medperiod)**2))
unimodal = False
if rmsperiod < delta and unirefine:
print('Unimodal PDF, finer sampling')
unimodal = True
# Fine sampling around the maximum
rmsperiod = dln.mad(samples['period'].data-medperiod,zero=True)
pmin2 = np.maximum(medperiod - 3*rmsperiod,pmin)
pmax2 = medperiod + 3*rmsperiod
medoffset = np.median(samples['offset'])
rmsoffset = dln.mad(samples['offset'].data-medoffset,zero=True)
offsetmin2 = np.maximum(medoffset-3*rmsoffset,offsetmin)
offsetmax2 = medoffset+3*rmsoffset
medamplitude = np.median(samples['amplitude'])
rmsamplitude = dln.mad(samples['amplitude'].data-medamplitude,zero=True)
ampmin2 = np.maximum(medamplitude-3*rmsamplitude,0)
ampmax2 = medamplitude+3*rmsamplitude
# Uniformly sample from min to max
period2 = np.random.rand(npoints)*(pmax2-pmin2)+pmin2
offset2 = np.random.rand(npoints)*(offsetmax2-offsetmin2)+offsetmin2
amplitude2 = np.random.rand(npoints)*(ampmax2-ampmin2)+ampmin2
# Calculate amplitude, constant and lnlikelihood
amplitude3,meanmag2,lnlikelihood2 = self.solve(period2,offset2,amplitude2)
# Calculate ln probability = ln prior + ln likelihood
# use flat prior, divide by area
lnprior2 = np.ones(npoints,float) + np.log(1/((pmax2-pmin2)*(offsetmax2-offsetmin2)*(ampmax2-ampmin2)))
lnprob2 = lnprior2 + lnlikelihood2
# Save trial information
trials0 = trials
del trials
trials = np.zeros(npoints,dtype=dtt)
trials['period'] = period2
trials['offset'] = offset2
trials['amplitude'] = amplitude2
for k in meanmag.keys():
trials['mag'+str(k)] = meanmag2[k]
trials['lnlikelihood'] = lnlikelihood2
trials['lnprob'] = lnprob2
# Rejection sampling
draw = np.random.rand(npoints)
if keepnegamp is False:
ind, = np.where((draw < np.exp(lnprob)) & (amplitude > 0))
else:
ind, = np.where(draw < np.exp(lnprob))
if len(ind)>0:
# Convert sample list to table
dt = [('period',float),('offset',float),('amplitude',float)]
for k in meanmag.keys():
dt += [('mag'+str(k),float)]
dt += [('lnlikelihood',float),('lnprob',float)]
samples = np.zeros(len(ind),dtype=dt)
samples['period'] = period2[ind]
samples['offset'] = offset2[ind]
samples['amplitude'] = amplitude2[ind]
samples['lnlikelihood'] = lnlikelihood2[ind]
samples['lnprob'] = lnprob2[ind]
for k in meanmag.keys():
samples['mag'+str(k)] = meanmag[k][ind]
samples = Table(samples)
trials = Table(trials)
self.samples = samples
self.trials = trials
# Get best values
best = np.argmax(trials['lnprob'])
bestperiod = trials['period'][best]
bestoffset = trials['offset'][best]
bestlnprob = trials['lnprob'][best]
bestamplitude = trials['amplitude'][best]
bestmeanmag = {}
for b in uband:
bestmeanmag[b] = trials['mag'+str(b)][best]
if verbose:
print('Best period = %.4f' % bestperiod)
print('Best offset = %.4f' % bestoffset)
print('Best amplitude = %.4f' % bestamplitude)
for b in uband:
print('Best meanmag %s = %.4f' %(str(b),bestmeanmag[b]))
print('Best lnprob = %.4f' % bestlnprob)
self.bestperiod = bestperiod
self.bestoffset = bestoffset
self.bestamplitude = bestamplitude
self.bestmeanmag = bestmeanmag
self.bestlnprob = bestlnprob
self.unimodal = unimodal
# Construct best dictionary
best = {'period':bestperiod,'phase':bestoffset,'amplitude':bestamplitude,
'meanmag':bestmeanmag,'lnprob':bestlnprob}
return samples, trials, best
def plots(self,plotbase='sampler',bins=(200,200)):
""" Make the plots."""
data = self.data
ndata = self.ndata
template = self.template
uband = self.bands
nband = self.nbands
bandindex = self._bandindex
ampratios = self.ampratios
bestperiod = self.bestperiod
bestoffset = self.bestoffset
bestamplitude = self.bestamplitude
bestmeanmag = self.bestmeanmag
bestlnprob = self.bestlnprob
samples = self.samples
trials = self.trials
# Make plots
matplotlib.use('Agg')
fig,ax = plt.subplots(2,1,constrained_layout=True)
fig.set_figheight(10)
fig.set_figwidth(10)
xr = [np.min(np.log10(trials['period'])),np.max(np.log10(trials['period']))]
# 2D density map
im,b,c,d = stats.binned_statistic_2d(trials['offset'],np.log10(trials['period']),trials['lnprob'],statistic='mean',bins=bins)
z1 = ax[0].imshow(im,aspect='auto',origin='lower',extent=(c[0],c[-1],b[0],b[-1]))
ax[0].set_xlabel('log(Period)')
ax[0].set_ylabel('Phase Offset')
ax[0].set_xlim(xr)
plt.colorbar(z1,ax=ax[0],label='Mean ln(Prob)')
# Period histogram
hist,a,b = stats.binned_statistic(np.log10(trials['period']),trials['lnprob'],statistic='mean',bins=1000)
ax[1].plot(a[0:-1],hist)
ax[1].set_xlabel('log(Period)')
ax[1].set_ylabel('Mean ln(Prob)')
ax[1].set_xlim(xr)
fig.savefig(plotbase+'_trials.png',bbox_inches='tight')
plt.close(fig)
print('Saving to '+plotbase+'_trials.png')
# Plot offset vs. period color-coded by lnprob
# plot amplitude vs. period color-coded by lnprob
fig,ax = plt.subplots(3,1,constrained_layout=True)
fig.set_figheight(10)
fig.set_figwidth(10)
xr = [np.min(samples['period']),np.max(samples['period'])]
# Plot offset vs. period color-coded by lnprob
z1 = ax[0].scatter(np.log10(samples['period']),samples['offset'],c=samples['lnprob'])
ax[0].set_xlabel('log(Period)')
ax[0].set_ylabel('Phase Offset')
ax[0].set_xlim(xr)
plt.colorbar(z1,ax=ax[0],label='ln(Prob)')
# Plot amplitude vs. period color-coded by lnprob
z2 = ax[1].scatter(np.log10(samples['period']),samples['amplitude'],c=samples['lnprob'])
ax[1].set_xlabel('log(Period)')
ax[1].set_ylabel('Amplitude')
ax[1].set_xlim(xr)
plt.colorbar(z2,ax=ax[1],label='ln(Prob)')
# Sum of lnprob
hist2,a2,b2 = stats.binned_statistic(np.log10(samples['period']),samples['lnprob'],statistic='sum',bins=50)
ax[2].plot(a2[0:-1],hist2)
ax[2].set_xlabel('log(Period)')
ax[2].set_ylabel('Sum ln(Prob)')
ax[2].set_xlim(xr)
fig.savefig(plotbase+'_samples.png',bbox_inches='tight')
plt.close(fig)
print('Saving to '+plotbase+'_samples.png')
# Plot best-fit model
# one panel per band, mag vs. phase
fig,ax = plt.subplots(nband,1)
fig.set_figheight(10)
fig.set_figwidth(10)
phase = (data['jd']/bestperiod + bestoffset) % 1
if hasattr(template, '__call__'):
tmpl = template(phase.ravel())
else:
tmpl = np.interp(phase,template['phase'],template['mag'])
for i,b in enumerate(uband):
ind = bandindex[b]
tphase = (np.linspace(0,1,100)+bestoffset) % 1
si = np.argsort(tphase)
tphase = tphase[si]
tmag = np.interp(tphase,template['phase'],template['mag'])
model = tmag*ampratios[b]*bestamplitude+bestmeanmag[b]
dd = np.hstack((data['mag'][ind],model))
yr = [np.max(dd)+0.05*dln.valrange(dd),np.min(dd)-0.30*dln.valrange(dd)]
ax[i].plot(tphase,model,c='blue',zorder=1)
ax[i].errorbar(phase[ind],data['mag'][ind],yerr=data['err'][ind],c='gray',fmt='none',zorder=2)
ax[i].scatter(phase[ind],data['mag'][ind],c='black',zorder=3)
txt = 'Band '+str(b)
if ampratios is not None:
txt += ' Amp Ratio=%.3f' % ampratios[b]
ax[i].annotate(txt,xy=(0.02,yr[1]+0.10*dln.valrange(dd)),ha='left')
ax[i].annotate('Amplitude=%.3f' % (bestamplitude*ampratios[b]),xy=(0.02,yr[1]+0.20*dln.valrange(dd)),ha='left')
ax[i].annotate('Mean Mag=%.3f' % bestmeanmag[b],xy=(0.02,yr[1]+0.30*dln.valrange(dd)),ha='left')
ax[i].set_xlabel('Phase')
ax[i].set_ylabel('Magnitude')
ax[i].set_xlim(0,1)
ax[i].set_ylim(yr)
if i==0:
ax[i].set_title('Period=%.3f Offset=%.3f Amplitude=%.3f ln(Prob)=%.3f' %
(bestperiod,bestoffset,bestamplitude,bestlnprob))
fig.savefig(plotbase+'_best.png',bbox_inches='tight')
plt.close(fig)
print('Saving to '+plotbase+'_best.png')
#--------------------------------------------------------------------------------------------------------------------
class LinearModelSampler:
"""
Class to perform sampling of periodic linear model (y=a*model(phase)+b)
Parameters
----------
data : table
Tuple with (x,y,yerr) or table with columns x, y and yerr.
model : function or table
Model function or template with x and y columns.
minerror : float, optional
Minimum error to use. Default is 0.02.
"""
def __init__(self,data,model,minerror=0.02):
# Create the sampling for Period (pmin to pmax) and phase offset (0-1)
if type(data) is tuple:
temp = np.zeros(len(data[0]),dtype=np.dtype([('x',float),('y',float),('yerr',float)]))
temp['x'] = data[0]
temp['y'] = data[1]
temp['yerr'] = data[2]
self.data = Table(temp)
else:
self.data = Table(data).copy()
for n in self.data.colnames:
self.data[n].name = n.lower() # change columns names to lower case
self.model = model
# Add weights to internal catalog
self.data['wt'] = 1/np.maximum(self.data['yerr'],minerror)**2
data = self.data
# Make sure the data are sorted by x
si = np.argsort(self.data['x'])
self.data = self.data[si]
print(str(ndata)+' data points')
print('time baseline = %.2f' % (np.max(data['jd'])-np.min(data['jd'])))
# Pre-calculate some terms that are constant
totwt = np.sum(data['wt'])
totwty = np.sum(data['wt']*data['y'])
self._totwt = totwt
self._totwty = totwty
def solve(self,period,offset,amplitude=None):
"""
Solve for a given period and offset.
Parameters
----------
period : float or array
Period as scalar float or array.
offset : float or array
Phase offset as scalar float or array.
amplitude : float or array, optional
Amplitude. If this is not input, then the best amplitude
using linear least squares is derived.
Returns
-------
amplitude : float or array
The amplitudes.
constant : float or array
The best constant offset.
lnlikelihood : float or array
The log likelihood.
Example
-------
amp,const,lnlkhood = samp.solve(period,offset)
"""
data = self.data
ndata = len(data)
nperiod = np.array(period).size
# Calculate phase for each data point
if nperiod==1:
phase = (data['x']/period + offset) % 1
else:
phase = (data['x'].reshape(-1,1)/period.reshape(1,-1) + offset.reshape(1,-1)) % 1
# Calculate template values for this set of period and phase
if hasattr(model, '__call__'):
tmpl = model(phase)
else:
tmpl = np.interp(phase,model['x'],model['y'])
if nperiod>1:
tmpl = tmpl.reshape(ndata,nperiod)
# -- Find best fitting values for linear parameters ---
# Calculate amplitude
# term1 = Sum of XY
# term2 = Sum of X * Y / W
# term3 = Sum of X^2
# term4 = Sum of X * X / W
# amplitude = (term1 - term2)/(term3 - term4)
if amplitude is None:
if nperiod==1:
totwtx1 = np.sum(data['wt'] * tmpl)
totwtx = totwtx1
totwtx2 = np.sum(data['wt'] * tmpl**2)
totwtxy = np.sum(data['wt'] * tmpl*data['y'])
term1 = totwtxy
term2 = totwtx * totwty / totwt
term3 = totwtx2
term4 = totwtx**2 / totwt
amplitude = (term1-term2)/(term3-term4)
else:
totwtx = np.sum(data['wt'].reshape(-1,1) * tmpl,axis=0)
totwtx2 = np.sum(data['wt'].reshape(-1,1) * tmpl**2,axis=0)
totwtxy = np.sum(data['wt'].reshape(-1,1) * tmpl * data['y'].reshape(-1,1),axis=0)
term1 += totwtxy
term2 += totwtx * totwty / totwt
term3 += totwtx2
term4 += totwtx**2 / totwt
amplitude = (term1-term2)/(term3-term4)
# Calculate best constant value
# constant = (Y - amplitude * X)/W
constant = (totwty-amplitude*totwtx)/totwt
# Calculate likelihood
if nperiod==1:
model1 = tmpl*amplitude+constant
resid = data['y']-model1
wtresid = resid**2 * data['wt']
lnlikelihood = -0.5*np.sum(wtresid + np.log(2*np.pi*data['yerr']**2))
else:
model1 = tmpl*amplitude.reshape(1,-1)+constant.reshape(1,-1)
resid = data['y'].reshape(-1,1)-model1
wtresid = resid**2 * data['wt'].reshape(-1,1)
lnlikelihood = -0.5*np.sum(wtresid,axis=0)
lnlikelihood += -0.5*np.sum(np.log(2*np.pi*data['yerr']**2))
return amplitude,constant,lnlikelihood
def copy(self):
""" Make a copy."""
return copy.deepcopy(self)
def run(self,pmin=0.1,pmax=None,offsetrange=None,minsample=128,npoints=200000,
unirefine=True,keepnegamp=False,verbose=True):
"""
Run the sampler.
Parameters
----------
pmin : float, optional
Minimum period to search in days. Default is 0.1 days.
pmax : float, optional
Maximum period to search in days. Default is 2 x time baseline.
offsetrange : list, optional
Two-element range of phase offset values to explore. Default is [0,1].
minsample : int, optional
Mininum number of samples to return. Default is 128.
npoints : int, optional
Number of points to use per loop. Default is 200,000.
unirefine : boolean, optional
If a unimodal posterior distribution function, do a finer search
around the unimodal region. Default is True.
keepnegamp : boolean, optional
Keep negative amplitudes. Default is False.
verbose : boolean, optional
Print useful information to the screen. Default is True.
Returns
-------
samples : astropy table
The Monte Carlo samples that passed rejection sampling.
period, offset, amplitude, lnlikelihood, lnprob, meanmaxBAND.
trials: astropy table
All of the trials period and phase offset positions tried.
period, offset, amplitude, lnlikelihood, lnprob, meanmaxBAND.
best : dictionary
Dictionary of best values (in ln probability) across all of
the trials: period, offset, amplitude, meanmag, lnprob.
Example
-------
samples,trials,best = vs.run()
"""
data = self.data
ndata = self.ndata
model = self.model
totwt = self._totwt
totwty = self._totwty
self.bestperiod = None
self.bestoffset = None
self.bestamplitude = None
self.bestconstant = None
self.bestlnprob = None
self.samples = None
self.trials = None
# Period range
if pmax is None:
pmax = (np.max(data['x'])-np.min(data['x']))*2
lgminp = np.log10(pmin)
lgmaxp = np.log10(pmax)
if verbose:
print('Pmin = %.3f' % pmin)
print('Pmax = %.3f' % pmax)
self.pmin = pmin
self.pmax = pmax
# Phase offset range
if offsetrange is not None:
offsetmin = offsetrange[0]
offsetmax = offsetrange[1]
else:
offsetmin = 0
offsetmax = 1
if offsetmin<0 or offsetmax>1:
raise ValueError('Phase offset range must be within 0 to 1')
if verbose:
print('Phase offset min = %.3f' % offsetmin)
print('Phase offset max = %.3f' % offsetmax)
# Loop until we have enough samples
nsamples = 0
samplelist = []
count = 0
dtt = [('period',float),('offset',float),('amplitude',float),('constant',float),('lnlikelihood',float),('lnprob',float)]
trials = None
while (nsamples<minsample):
# Uniformly sample from log(pmin) to log(pmax)
period = np.random.rand(npoints)*(lgmaxp-lgminp)+lgminp
period = 10**period
# Uniformly sample from offsetmin to offsetmax
offset = np.random.rand(npoints)*(offsetmax-offsetmin)+offsetmin
# Solve for amplitude, constant and lnlikelihood
amplitude,constant,lnlikelihood = self.solve(period,offset)
# Calculate ln probability = ln prior + ln likelihood
# use flat prior, divide by area
lnprior = np.ones(npoints,float) + np.log(1/(1.0*(lgmaxp-lgminp)))
lnprob = lnprior + lnlikelihood
# Save the information
trials1 = np.zeros(npoints,dtype=dtt)
trials1['period'] = period
trials1['offset'] = offset
trials1['amplitude'] = amplitude
trials1['constant'] = constant
trials1['lnlikelihood'] = lnlikelihood
trials1['lnprob'] = lnprob
if trials is None:
trials = trials1
else:
trials = np.hstack((trials,trials1))
# Rejection sampling
draw = np.random.rand(npoints)
if keepnegamp is False:
ind, = np.where((draw < np.exp(lnprob)) & (amplitude > 0))
else:
ind, = np.where(draw < np.exp(lnprob))
if len(ind)>0:
for i in ind:
samp = {'period':period[i],'offset':offset[i],'amplitude':amplitude[i],'constant':constant[i]}
samp['lnlikelihood'] = lnlikelihood[i]
samp['lnprob'] = lnprob[i]
samplelist.append(samp)
nsamples += len(ind)
if verbose:
print(count+1,nsamples)
count += 1
# Convert sample list to table
dt = [('period',float),('offset',float),('amplitude',float),('constant',float)]
dt += [('lnlikelihood',float),('lnprob',float)]
samples = np.zeros(len(samplelist),dtype=dt)
for i,samp in enumerate(samplelist):
samples['period'][i] = samp['period']
samples['offset'][i] = samp['offset']
samples['amplitude'][i] = samp['amplitude']
samples['constant'][i] = samp['constant']
samples['lnlikelihood'][i] = samp['lnlikelihood']
samples['lnprob'][i] = samp['lnprob']
# Convert to astropy tables
samples = Table(samples)
trials = Table(trials)
self.samples = samples
self.trials = trials
if keepnegamp is False:
posind, = np.where(trials['amplitude']>0)
best1 = np.argmax(trials['lnprob'][posind])
best = posind[best1]
else:
best = np.argmax(trials['lnprob'])
bestperiod = trials['period'][best]
bestoffset = trials['offset'][best]
bestlnprob = trials['lnprob'][best]
bestamplitude = trials['amplitude'][best]
bestconstant = trials['constant'][best]
if verbose:
print('Best period = %.4f' % bestperiod)
print('Best offset = %.4f' % bestoffset)
print('Best amplitude = %.4f' % bestamplitude)
print('Best constant = %.4f' % bestconstant)
print('Best lnprob = %.4f' % bestlnprob)
self.bestperiod = bestperiod
self.bestoffset = bestoffset
self.bestamplitude = bestamplitude
self.bestconstant = bestconstant
self.bestlnprob = bestlnprob
ntrials = npoints*count
if verbose:
print('ntrials = ',ntrials)
# If unimodal, run emcee
medperiod = np.median(samples['period'])
delta = (4*medperiod**2)/(2*np.pi*(np.max(data['jd'])-np.min(data['jd'])))
deltap = medperiod**2/(2*np.pi*(np.max(data['jd'])-np.min(data['jd'])))
rmsperiod = np.sqrt(np.mean((samples['period']-medperiod)**2))
unimodal = False
if rmsperiod < delta and unirefine:
print('Unimodal PDF, finer sampling')
unimodal = True
# Fine sampling around the maximum
rmsperiod = dln.mad(samples['period'].data-medperiod,zero=True)
pmin2 = np.maximum(medperiod - 3*rmsperiod,pmin)
pmax2 = medperiod + 3*rmsperiod
medoffset = np.median(samples['offset'])
rmsoffset = dln.mad(samples['offset'].data-medoffset,zero=True)
offsetmin2 = np.maximum(medoffset-3*rmsoffset,offsetmin)
offsetmax2 = medoffset+3*rmsoffset
medamplitude = np.median(samples['amplitude'])
rmsamplitude = dln.mad(samples['amplitude'].data-medamplitude,zero=True)
ampmin2 = np.maximum(medamplitude-3*rmsamplitude,0)
ampmax2 = medamplitude+3*rmsamplitude
# Uniformly sample from min to max
period2 = np.random.rand(npoints)*(pmax2-pmin2)+pmin2
offset2 = np.random.rand(npoints)*(offsetmax2-offsetmin2)+offsetmin2
amplitude2 = np.random.rand(npoints)*(ampmax2-ampmin2)+ampmin2
# Calculate amplitude, constant and lnlikelihood
amplitude3,constant2,lnlikelihood2 = self.solve(period2,offset2,amplitude2)
# Calculate ln probability = ln prior + ln likelihood
# use flat prior, divide by area
lnprior2 = np.ones(npoints,float) + np.log(1/((pmax2-pmin2)*(offsetmax2-offsetmin2)*(ampmax2-ampmin2)))
lnprob2 = lnprior2 + lnlikelihood2
# Save trial information
trials0 = trials
del trials
trials = np.zeros(npoints,dtype=dtt)
trials['period'] = period2
trials['offset'] = offset2
trials['amplitude'] = amplitude2
trials['constant'] = constant2
trials['lnlikelihood'] = lnlikelihood2
trials['lnprob'] = lnprob2
# Rejection sampling
draw = np.random.rand(npoints)
if keepnegamp is False:
ind, = np.where((draw < np.exp(lnprob)) & (amplitude > 0))
else:
ind, = np.where(draw < np.exp(lnprob))
if len(ind)>0:
# Creat table
dt = [('period',float),('offset',float),('amplitude',float),('constant',float),
('lnlikelihood',float),('lnprob',float)]
samples = np.zeros(len(ind),dtype=dt)
samples['period'] = period2[ind]
samples['offset'] = offset2[ind]
samples['amplitude'] = amplitude2[ind]
samples['constant'] = constant2[ind]
samples['lnlikelihood'] = lnlikelihood2[ind]
samples['lnprob'] = lnprob2[ind]
samples = Table(samples)
trials = Table(trials)
self.samples = samples
self.trials = trials
# Get best values
best = np.argmax(trials['lnprob'])
bestperiod = trials['period'][best]
bestoffset = trials['offset'][best]
bestamplitude = trials['amplitude'][best]
bestconstant = trials['constant'][best]
bestlnprob = trials['lnprob'][best]
if verbose:
print('Best period = %.4f' % bestperiod)
print('Best offset = %.4f' % bestoffset)
print('Best amplitude = %.4f' % bestamplitude)
print('Best constant = %.4f' % bestconstant)
print('Best lnprob = %.4f' % bestlnprob)
self.bestperiod = bestperiod
self.bestoffset = bestoffset
self.bestamplitude = bestamplitude
self.bestconstant = bestconstant
self.bestlnprob = bestlnprob
self.unimodal = unimodal
# Construct best dictionary
best = {'period':bestperiod,'phase':bestoffset,'amplitude':bestamplitude,
'constant':bestconstant,'lnprob':bestlnprob}
return samples, trials, best
def plots(self,plotbase='sampler',bins=(200,200)):
""" Make the plots."""
data = self.data
ndata = self.ndata
model = self.model
bestperiod = self.bestperiod
bestoffset = self.bestoffset
bestamplitude = self.bestamplitude
bestconstant = self.bestconstant
bestlnprob = self.bestlnprob
samples = self.samples
trials = self.trials
# Make plots
matplotlib.use('Agg')
fig,ax = plt.subplots(2,1,constrained_layout=True)
fig.set_figheight(10)
fig.set_figwidth(10)
xr = [np.min(np.log10(trials['period'])),np.max(np.log10(trials['period']))]
# 2D density map
im,b,c,d = stats.binned_statistic_2d(trials['offset'],np.log10(trials['period']),trials['lnprob'],statistic='mean',bins=bins)
z1 = ax[0].imshow(im,aspect='auto',origin='lower',extent=(c[0],c[-1],b[0],b[-1]))
ax[0].set_xlabel('log(Period)')
ax[0].set_ylabel('Phase Offset')
ax[0].set_xlim(xr)
plt.colorbar(z1,ax=ax[0],label='Mean ln(Prob)')
# Period histogram
hist,a,b = stats.binned_statistic(np.log10(trials['period']),trials['lnprob'],statistic='mean',bins=1000)
ax[1].plot(a[0:-1],hist)
ax[1].set_xlabel('log(Period)')
ax[1].set_ylabel('Mean ln(Prob)')
ax[1].set_xlim(xr)
fig.savefig(plotbase+'_trials.png',bbox_inches='tight')
plt.close(fig)
print('Saving to '+plotbase+'_trials.png')
# Plot offset vs. period color-coded by lnprob
# plot amplitude vs. period color-coded by lnprob
fig,ax = plt.subplots(3,1,constrained_layout=True)
fig.set_figheight(10)
fig.set_figwidth(10)
xr = [np.min(samples['period']),np.max(samples['period'])]
# Plot offset vs. period color-coded by lnprob
z1 = ax[0].scatter(np.log10(samples['period']),samples['offset'],c=samples['lnprob'])
ax[0].set_xlabel('log(Period)')
ax[0].set_ylabel('Phase Offset')
ax[0].set_xlim(xr)
plt.colorbar(z1,ax=ax[0],label='ln(Prob)')
# Plot amplitude vs. period color-coded by lnprob
z2 = ax[1].scatter(np.log10(samples['period']),samples['amplitude'],c=samples['lnprob'])
ax[1].set_xlabel('log(Period)')
ax[1].set_ylabel('Amplitude')
ax[1].set_xlim(xr)
plt.colorbar(z2,ax=ax[1],label='ln(Prob)')
# Sum of lnprob
hist2,a2,b2 = stats.binned_statistic(np.log10(samples['period']),samples['lnprob'],statistic='sum',bins=50)
ax[2].plot(a2[0:-1],hist2)
ax[2].set_xlabel('log(Period)')
ax[2].set_ylabel('Sum ln(Prob)')
ax[2].set_xlim(xr)
fig.savefig(plotbase+'_samples.png',bbox_inches='tight')
plt.close(fig)
print('Saving to '+plotbase+'_samples.png')
# Plot best-fit model
# one panel per band, mag vs. phase
fig = plt.figure(figsize=(10,10))
phase = (data['x']/bestperiod + bestoffset) % 1
if hasattr(model, '__call__'):
tmpl = model(phase)
else:
tmpl = np.interp(phase,template['x'],template['y'])
tphase = (np.linspace(0,1,100)+bestoffset) % 1
si = np.argsort(tphase)
tphase = tphase[si]
if hasattr(model, '__call__'):
tmag = model(tphase)
else:
tmag = np.interp(tphase,template['x'],template['y'])
model = tmag*bestamplitude+bestconstant
dd = np.hstack((data['y'],model))
yr = [np.max(dd)+0.05*dln.valrange(dd),np.min(dd)-0.30*dln.valrange(dd)]
plt.plot(tphase,model,c='blue',zorder=1)
plt.errorbar(phase[ind],data['mag'][ind],yerr=data['err'][ind],c='gray',fmt='none',zorder=2)
plt.scatter(phase[ind],data['mag'][ind],c='black',zorder=3)
plt.annotate(txt,xy=(0.02,yr[1]+0.10*dln.valrange(dd)),ha='left')
plt.xlabel('Phase')
plt.ylabel('Magnitude')
plt.xlim(0,1)
plt.ylim(yr)
plt.set_title('Period=%.3f Offset=%.3f Amplitude=%.3f Constant=%.3f ln(Prob)=%.3f' %
(bestperiod,bestoffset,bestamplitude,bestconstant,bestlnprob))
fig.savefig(plotbase+'_best.png',bbox_inches='tight')
plt.close(fig)
print('Saving to '+plotbase+'_best.png')
#--------------------------------------------------------------------------------------------------------------------
class Sampler:
"""
Generic sampler of periodic signals.
args : tuple
Must at least contain (x, y, yerr). It can additional contain other positional
arguments to be passed to log_probability().
log_probability : function
Function that calculates the ln probability given (theta, x, y, yerr). It must also
perform the marginalization over the non-linear parameters.
kwargs : dict, optional
Dictionary of keyword arguments to pass to log_probability() function.
"""
def __init__(self,args,log_probability,kwargs=None):
self._args = args
# args should be (x,y,yerr, and other additional arguments to be passed to the functions)
self._log_probability = log_probability
self._kwargs = kwargs
# kwargs is a dictionary of additional keyword arguments to be passed to log_probability()
def copy(self):
""" Make a copy."""
return copy.deepcopy(self)
def run(self,pmin=0.1,pmax=None,minsample=128,npoints=200000):
""" Run the sampling."""
x = self._args[0]
y = self._args[1]
yerr = self._args[2]
ndata = len(x)
args = self._args
kwargs = self._kwargs
model = self._model
log_probability = self._log_probability
# Period range
if pmax is None:
pmax = (np.max(x)-np.min(x))*2
lgminp = np.log10(pmin)
lgmaxp = np.log10(pmax)
print('Pmin = %.3f' % pmin)
print('Pmax = %.3f' % pmax)
self._pmin = pmin
self._pmax = pmax
# Loop until we have enough samples
nsamples,count = 0,0
trials,samples = None,None
dtt = [('period',float),('offset',float),('lnprob',float)]
while (nsamples<minsample):
# Uniformly sample from log(pmin) to log(pmax)
period = np.random.rand(npoints)*(lgmaxp-lgminp)+lgminp
period = 10**period
# Uniformly sample from 0 to 1
offset =
|
np.random.rand(npoints)
|
numpy.random.rand
|
import os
import numpy as np
from torch.utils.data import Dataset
import torch
import cv2
import glob
import imgaug.augmenters as iaa
from perlin import rand_perlin_2d_np
class MVTecDRAEMTestDataset(Dataset):
def __init__(self, root_dir, resize_shape=None):
self.root_dir = root_dir
self.images = sorted(glob.glob(root_dir+"/*/*.png"))
self.resize_shape=resize_shape
def __len__(self):
return len(self.images)
def transform_image(self, image_path, mask_path):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
if mask_path is not None:
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
else:
mask = np.zeros((image.shape[0],image.shape[1]))
if self.resize_shape != None:
image = cv2.resize(image, dsize=(self.resize_shape[1], self.resize_shape[0]))
mask = cv2.resize(mask, dsize=(self.resize_shape[1], self.resize_shape[0]))
image = image / 255.0
mask = mask / 255.0
image = np.array(image).reshape((image.shape[0], image.shape[1], 3)).astype(np.float32)
mask = np.array(mask).reshape((mask.shape[0], mask.shape[1], 1)).astype(np.float32)
image = np.transpose(image, (2, 0, 1))
mask = np.transpose(mask, (2, 0, 1))
return image, mask
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = self.images[idx]
dir_path, file_name = os.path.split(img_path)
base_dir = os.path.basename(dir_path)
if base_dir == 'good':
image, mask = self.transform_image(img_path, None)
has_anomaly = np.array([0], dtype=np.float32)
else:
mask_path = os.path.join(dir_path, '../../ground_truth/')
mask_path = os.path.join(mask_path, base_dir)
mask_file_name = file_name.split(".")[0]+"_mask.png"
mask_path = os.path.join(mask_path, mask_file_name)
image, mask = self.transform_image(img_path, mask_path)
has_anomaly = np.array([1], dtype=np.float32)
sample = {'image': image, 'has_anomaly': has_anomaly,'mask': mask, 'idx': idx}
return sample
class MVTecDRAEMTrainDataset(Dataset):
def __init__(self, root_dir, anomaly_source_path, resize_shape=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.resize_shape=resize_shape
self.image_paths = sorted(glob.glob(root_dir+"/*.png"))
self.anomaly_source_paths = sorted(glob.glob(anomaly_source_path+"/*/*.jpg"))
self.augmenters = [iaa.GammaContrast((0.5,2.0),per_channel=True),
iaa.MultiplyAndAddToBrightness(mul=(0.8,1.2),add=(-30,30)),
iaa.pillike.EnhanceSharpness(),
iaa.AddToHueAndSaturation((-50,50),per_channel=True),
iaa.Solarize(0.5, threshold=(32,128)),
iaa.Posterize(),
iaa.Invert(),
iaa.pillike.Autocontrast(),
iaa.pillike.Equalize(),
iaa.Affine(rotate=(-45, 45))
]
self.rot = iaa.Sequential([iaa.Affine(rotate=(-90, 90))])
def __len__(self):
return len(self.image_paths)
def randAugmenter(self):
aug_ind = np.random.choice(np.arange(len(self.augmenters)), 3, replace=False)
aug = iaa.Sequential([self.augmenters[aug_ind[0]],
self.augmenters[aug_ind[1]],
self.augmenters[aug_ind[2]]]
)
return aug
def augment_image(self, image, anomaly_source_path):
aug = self.randAugmenter()
perlin_scale = 6
min_perlin_scale = 0
anomaly_source_img = cv2.imread(anomaly_source_path)
anomaly_source_img = cv2.resize(anomaly_source_img, dsize=(self.resize_shape[1], self.resize_shape[0]))
anomaly_img_augmented = aug(image=anomaly_source_img)
perlin_scalex = 2 ** (torch.randint(min_perlin_scale, perlin_scale, (1,)).numpy()[0])
perlin_scaley = 2 ** (torch.randint(min_perlin_scale, perlin_scale, (1,)).numpy()[0])
perlin_noise = rand_perlin_2d_np((self.resize_shape[0], self.resize_shape[1]), (perlin_scalex, perlin_scaley))
perlin_noise = self.rot(image=perlin_noise)
threshold = 0.5
perlin_thr = np.where(perlin_noise > threshold, np.ones_like(perlin_noise), np.zeros_like(perlin_noise))
perlin_thr =
|
np.expand_dims(perlin_thr, axis=2)
|
numpy.expand_dims
|
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.model_selection import StratifiedKFold as SKFold
from sklearn.metrics import f1_score
import pickle as pkl
import numpy as np
import os
import warnings
from collections import Counter
from operators.unary import *
warnings.filterwarnings("ignore")
exclude_dataset = ['fbis_wc']
class LFE(object): # Learning Feature Engineering for Classification, IJCAI 2017
def __init__(self, lower=-10, upper=10, num_bins=200, theta=0.01, gamma=0.8): # gamma not mentioned in the work
'''
:param lower: lower bound
:param upper: upper bound
:param num_bins: number of bins
:param theta: threshold for deciding whether a sample is positive
:param gamma: threshold for deciding whether to recommend the best transformation. If prediction > threshold, recommend!
'''
self.lower = lower
self.upper = upper
self.num_bins = num_bins
self.theta = theta
self.gamma = gamma
self.name_prefix = "lower_" + str(lower) + "_upper_" + str(upper) + "_bins_" + str(num_bins) + "_theta_" + str(
theta)
def generate_samples(self, x, y, dataset_name, save_dir='lfe/data'): # One-vs-rest
'''
Given a dataset, generate training samples for LFE
:param x: features
:param y: labels
:param dataset_name: dataset name
:return: QSA meta-features, <transformation, label list> dicitonary like {'log':[0,1,0],'sigmoid':[1,1,1]}
'''
if not os.path.exists(save_dir):
raise ValueError("Directory %s not existed!" % save_dir)
qsa_save_path = os.path.join(save_dir, "qsa_" + dataset_name)
label_save_path = os.path.join(save_dir, "label_" + dataset_name)
x = np.array(x)
y = np.array(y)
label_dict = {i: [] for i in unary_collection}
num_features = x.shape[1]
y_classes = list(set(y))
qsa_x = []
for feature_index in range(num_features):
if len(y_classes) > 2:
for label in y_classes:
y_ = []
for i in y:
y_.append(1 if i == label else 0)
y_ = np.array(y_)
qsa_x.append(self.generate_qsa(x[:, feature_index], y_))
result_dict = self.valid_sample(x, y_, feature_index)
for op in unary_collection:
label_dict[op].append(result_dict[op])
else:
qsa_x.append(self.generate_qsa(x[:, feature_index], y))
result_dict = self.valid_sample(x, y, feature_index)
for op in unary_collection:
label_dict[op].append(result_dict[op])
for key in label_dict:
label_dict[key] = np.array(label_dict[key])
qsa_x = np.array(qsa_x)
with open(qsa_save_path, 'wb') as f:
pkl.dump(qsa_x, f)
with open(label_save_path, 'wb') as f:
pkl.dump(label_dict, f)
return qsa_x, label_dict
def generate_qsa(self, x, y): # Default one-vs-rest
'''
Convert a column into Quantile Sketch Array
:param x: a column
:param y: binary labels
:return: Quantile Sketch Array
'''
scaler = MinMaxScaler(feature_range=(self.lower, self.upper))
qsa = []
for i in [0, 1]:
x_ = [x[index] for index in range(len(x)) if y[index] == i]
x_ = np.array(x_)
x_ = np.reshape(x_, (len(x_), 1))
x_ = scaler.fit_transform(x_)
x_ = np.reshape(x_, (len(x_)))
x_ -= self.lower
bin_range = (self.upper - self.lower) / self.num_bins
bucketized_col = np.zeros((self.num_bins,))
for element in x_:
index = int(element / bin_range)
if index == self.num_bins:
index = self.num_bins - 1
bucketized_col[index] += 1
qsa.extend(bucketized_col / len(x_))
return np.array(qsa)
def fit(self, train_ops, data_dir='lfe/data', save_dir='lfe'):
'''
:param train_ops: list for train_ops
:param data_dir: directory for training data
:param save_dir: directory to save models
:return:
'''
if not os.path.exists(save_dir):
os.mkdir(save_dir)
train_x, train_y = self.load_training_data(data_dir)
for train_op in train_ops:
save_path = "lfe_" + self.name_prefix + "_" + train_op
save_path = os.path.join(save_dir, save_path)
if train_op == 'log':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'sqrt':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'square':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'freq':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'round':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'tanh':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'sigmoid':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'isoreg':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'zscore':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
elif train_op == 'norm':
clf = MLP(hidden_layer_sizes=(500,), max_iter=3000, verbose=1, n_iter_no_change=20, tol=1e-5)
else:
raise ValueError("Unexpected operation %s" % train_op)
clf.fit(train_x, train_y[train_op])
from sklearn.metrics import accuracy_score
print(accuracy_score(clf.predict(train_x), train_y[train_op]))
with open(save_path, 'wb') as f:
pkl.dump(clf, f)
def predict(self, pred_op, x, save_dir='lfe'):
'''
:param pred_op: name of a unary operation, as shown below
:param x: Quantile Sketch Array
:param save_dir:
:return: predictions, indicating the expected performance of each transformation
'''
save_path = "lfe_" + self.name_prefix + "_" + pred_op
save_path = os.path.join(save_dir, save_path)
with open(save_path, 'rb') as f:
clf = pkl.load(f)
pred = clf.predict_proba(x)
return [element[1] for element in pred]
def choose(self, x, y, save_dir='lfe'):
'''
Choose transformations for features
:param x: features
:param y: labels
:param save_dir:
:return: Operator if prediction > gamma, else None
'''
transformation = []
x = np.array(x)
num_features = x.shape[1]
qsa_features = [self.generate_qsa(x[:, i], y) for i in range(num_features)]
qsa_features = np.array(qsa_features)
pred_dict = {}
for pred_op in unary_collection:
pred_dict[pred_op] = self.predict(pred_op, qsa_features, save_dir)
for i in range(num_features):
max_performance = -1
best_op = ''
for pred_op in unary_collection:
pred = pred_dict[pred_op][i]
if pred > max_performance:
max_performance = pred
best_op = pred_op
if max_performance > self.gamma:
tran = best_op
else:
tran = None
transformation.append(tran)
return transformation
def valid_sample(self, x, y, t_id):
'''
Determine whether the t-th feature in features is a positive training sample
:param x: original features
:param y: ground truth label
:param t_id: index of feature to be transformed
:param threshold: threshold of improvement of newly constructed feature
:return: dictionary, like {'log':1, 'sigmoid':0} 1 for positive and 0 for not positive
'''
x = np.array(x)
y = np.array(y)
kfold = SKFold(n_splits=10)
results_org = []
results_new = {op: [] for op in unary_collection}
for train_index, test_index in kfold.split(x, y):
# Original feature
rfc_org = RFC()
rfc_org.fit(x[train_index, t_id:t_id + 1], y[train_index])
pred_org = rfc_org.predict(x[test_index, t_id:t_id + 1])
results_org.append(f1_score(y[test_index], pred_org))
# Constructed feature
for op in unary_collection:
operator = op_dict[op]
rfc_new = RFC()
new_feature = operator.operate(x[train_index, t_id])
new_feature = np.reshape(new_feature, (len(new_feature), 1))
rfc_new.fit(new_feature, y[train_index])
# print(op,Counter(list(x[test_index, t_id])))
new_feature = operator.operate(x[test_index, t_id])
# print(op,Counter(list(new_feature)))
new_feature = np.reshape(new_feature, (len(new_feature), 1))
pred_new = rfc_new.predict(new_feature)
results_new[op].append(f1_score(y[test_index], pred_new))
result_org = np.mean(results_org)
result_dict = {}
for key in results_new:
result_new = np.mean(results_new[key])
if result_new >= result_org * (1 + self.theta):
result_dict[key] = 1
else:
result_dict[key] = 0
return result_dict
def load_training_data(self, data_dir='lfe/data'):
data = {}
for root, _, files in os.walk(data_dir):
for file in files:
path = os.path.join(root, file)
dataset = '_'.join(file.split('_')[1:])
if dataset in exclude_dataset:
continue
with open(path, 'rb') as f:
if file.split('_')[0] == 'qsa':
qsa = pkl.load(f)
if dataset not in data:
data[dataset] = {'qsa': qsa}
else:
data[dataset]['qsa'] = qsa
elif file.split('_')[0] == 'label':
label_dict = pkl.load(f)
if dataset not in data:
data[dataset] = {'label': label_dict}
else:
data[dataset]['label'] = label_dict
train_x = []
train_y = {op: [] for op in unary_collection}
for key in data:
train_x.extend(data[key]['qsa'])
for op in unary_collection:
train_y[op].extend(data[key]['label'][op])
for op in unary_collection:
train_y[op] = np.array(train_y[op])
train_x = np.array(train_x)
return train_x, train_y
def oversample(x, y, sample_size=0.2):
num_samples = len(x)
num_oversamples = int(sample_size * num_samples)
print(y)
true_inx = [i for i in range(num_samples) if y[i] == 1]
oversample_idx =
|
np.random.choice(true_inx, num_oversamples)
|
numpy.random.choice
|
import os
import sys
on_kaggle_server = os.path.exists("/kaggle")
if on_kaggle_server:
os.system("pip install ../input/mmdetection-v280/src/mmdet-2.8.0/mmdet-2.8.0/")
os.system(
"pip install ../input/mmdetection-v280/src/mmpycocotools-12.0.3/mmpycocotools-12.0.3/"
)
os.system(
"pip install ../input/stratified-with-multi-class/iterative-stratification-master"
)
os.system("pip install ../input/hpapytorchzoo/hpa-pytorch-zoo")
os.system("pip install ../input/hpa-cell-segmentation/HPA-Cell-Segmentation")
os.system("pip install ../input/pretrainedmodels/pretrainedmodels-0.7.4")
os.system("pip install ../input/pytorch-timm/timm-0.3.2-py3-none-any.whl")
os.system("pip install ../input/efficientnet-pytorch/efficientnet_pytorch-0.6.3")
os.system("pip install ../input/qubvel-segmentation")
sys.path.insert(0, "../input/hpa-ws-repo/kaggle-hpa-single-cell-image-classification-main")
import argparse
import base64
import copy
import csv
import functools
import gc
import glob
import zlib
from pathlib import Path
from typing import Dict, List, Optional, Text, Tuple, Union
import cv2
import hpacellseg.cellsegmentator as cellsegmentator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torchvision
import yaml
from hpacellseg.utils import label_cell, label_nuclei
from pycocotools import _mask as coco_mask
from tqdm import tqdm
from src.dataset.datamodule import HpaDatamodule
from src.modeling.pl_model import (
LitModel,
get_cam_dir,
get_cam_pred_path,
load_trained_pl_model,
)
data_dir = "../input/hpa-single-cell-image-classification"
batch_size = 4
input_size = 2048
label_find_size = (512, 512)
PRED_THRESH = 0.01
NEGA_CLASS = 18
# hpa cellsegmentator tool weights
NUC_MODEL = "../input/hpa-cell-seg-weights/hpa-cell-seg-weights/nuclei-model.pth"
CELL_MODEL = "../input/hpa-cell-seg-weights/hpa-cell-seg-weights/cell-model.pth"
cell_area_thresh = 0.2 * 0.5
cell_h_area_thresh = 0.08 * 0.5
nuc_area_thresh = 0.2
nuc_h_area_thresh = 0.08
green_ch_thresh = 1.0
use_same_thresh = True
cam_thresh = 1.0e-6
high_cam_thresh = 0.75
min_mask_ratio = 0.01
default_bkg_score = 0.75
tta_mode = "scale" # "skip", "flip", "scale", "split"
scales = [1.2, 1.4]
def get_dm_default_args() -> dict:
dm_args = {
"val_fold": 0,
"aug_mode": 0,
"num_inchannels": 4,
"round_nb": 0,
"sub_label_dir": None,
}
return dm_args
def load_ckpt_paths(ckpt_paths: List[Tuple[str, float, str]]) -> List[dict]:
models = []
for i, ckpt_ in enumerate(ckpt_paths):
if isinstance(ckpt_, tuple):
how_, w, path = ckpt_
else:
w = 1.0 / len(ckpt_paths)
how_ = "both"
if path.find(".ckpt") > -1:
model, args_hparams = load_trained_pl_model(LitModel, path)
model.cuda()
model.eval()
is_cuda = True
else:
print("use cached cam for inference:", path)
assert os.path.isdir(path)
psuedo_path = os.path.dirname(path) + "/checkponts/last.ckpt"
model, args_hparams = load_trained_pl_model(
LitModel, psuedo_path, only_load_yaml=True
)
is_cuda = False
models.append(
{
"path": path,
"model": model,
"hparams": args_hparams,
"how_join": how_,
"weight": w,
"is_cuda": is_cuda,
}
)
return models
def create_cell_masks(im: np.ndarray, segmentator) -> tuple:
"""
im: (batch, 4, H, W), rbgy image
"""
# For nuclei List[np.ndarray(H, W)] blue
nuc_input = [rgby_image[..., 2] for rgby_image in im]
nuc_segmentations = segmentator.pred_nuclei(nuc_input)
# For full cells
# List[List[np.ndarray(H, W), r], List[np.ndaray(H,W), y], List[np.ndarray(H,W), b]]
cell_input = [[rgby_image[..., j] for rgby_image in im] for j in [0, 3, 2]]
cellsegmentations = segmentator.pred_cells(cell_input)
batch_n_masks = []
batch_c_masks = []
# post-processing
for i, pred in enumerate(cell_segmentations):
nuclei_mask, cell_mask = label_cell(nuc_segmentations[i], cell_segmentations[i])
batch_n_masks.append(nuclei_mask)
batch_c_masks.append(cell_mask)
return batch_n_masks, batch_c_masks
def cache_cell_masks(
input_ids: List[str],
batch_n_masks: List[np.ndarray],
batch_c_masks: List[np.ndarray],
save_dir: Path = Path("../input/hpa-mask"),
make_resize_data: bool = False,
im: Optional[np.ndarray] = None,
image_size: int = 768,
):
cell_dir = save_dir / "hpa_cell_mask"
nucl_dir = save_dir / "hpa_nuclei_mask"
cell_dir.mkdir(parents=True, exist_ok=True)
nucl_dir.mkdir(parents=True, exist_ok=True)
for i, input_id in enumerate(input_ids):
np.savez_compressed(str(nucl_dir / f"{input_id}.npz"), batch_n_masks[i])
np.savez_compressed(str(cell_dir / f"{input_id}.npz"), batch_c_masks[i])
if make_resize_data and (im is not None):
resize_dir = save_dir / "hpa_resize_ext"
resize_dir.mkdir(exist_ok=True)
for i, input_id in enumerate(input_ids):
save_name = str(resize_dir / input_id)
red = im[i, :, :, 0]
red = cv2.resize(red, (image_size, image_size))
cv2.imwrite(f"{save_name}_red.png", red)
green = im[i, :, :, 1]
green = cv2.resize(green, (image_size, image_size))
cv2.imwrite(f"{save_name}_green.png", green)
blue = im[i, :, :, 2]
blue = cv2.resize(blue, (image_size, image_size))
cv2.imwrite(f"{save_name}_blue.png", blue)
yellow = im[i, :, :, 3]
yellow = cv2.resize(yellow, (image_size, image_size))
cv2.imwrite(f"{save_name}_yellow.png", yellow)
return
def get_cell_masks(
data: dict, im: np.ndarray, segmentator: Optional[torch.nn.Module], stage="test"
):
if stage == "test":
batch_n_masks, batch_c_masks = create_cell_masks(im, segmentator)
elif stage == "gen_pseudo":
if np.all(data["is_load"].numpy()):
batch_n_masks, batch_c_masks = data["nucl_mask"], data["cell_mask"]
batch_c_masks = [
mask.squeeze()
for mask in np.split(batch_c_masks.numpy(), batch_c_masks.shape[0])
]
batch_n_masks = [
mask.squeeze()
for mask in np.split(batch_n_masks.numpy(), batch_n_masks.shape[0])
]
else:
batch_n_masks, batch_c_masks = create_cell_masks(im, segmentator)
cache_cell_masks(
input_ids=data["input_id"],
batch_n_masks=batch_n_masks,
batch_c_masks=batch_c_masks,
make_resize_data=True,
im=im,
)
else:
raise NotImplementedError
return batch_n_masks, batch_c_masks
def flip_tta(
model: LitModel,
data: dict,
batch_idx: int,
cam: torch.Tensor,
pred: torch.Tensor,
):
transforms = [
torchvision.transforms.functional.hflip,
torchvision.transforms.functional.vflip,
]
inverts = [
torchvision.transforms.functional.hflip,
torchvision.transforms.functional.vflip,
]
for trans_, invert_ in zip(transforms, inverts):
tta_data = {"image": trans_(data["image"])}
cam_f, pred_f = model.test_step(tta_data, batch_idx, save_npy=False)
cam += invert_(cam_f)
pred += pred_f
pred *= 1.0 / (len(transforms) + 1)
cam *= 1.0 / (len(transforms) + 1)
return cam, pred
def check_tta_size(
infer_size: int, scales: List[float], tta_mode: str
) -> Tuple[List[float], str]:
if tta_mode == "skip":
return scales, tta_mode
if infer_size >= 1024:
return [], "flip"
if infer_size >= 768:
if len(scales) > 0:
return scales[:2], tta_mode
return scales, tta_mode
def infer(
model: LitModel,
data: dict,
batch_idx: int,
args_hparams: dict,
infer_size: int = 512,
tta_mode: str = "flip",
scales: List[float] = [1.2],
scale_with_flip: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor]:
# normal infer
orig_img = data["image"][:, : args_hparams["num_inchannels"]]
infer_data = {
"image": F.interpolate(
orig_img, infer_size, mode="bilinear", align_corners=False
)
}
cam, pred = model.test_step(infer_data, batch_idx, save_npy=False)
scales, tta_mode = check_tta_size(
infer_size=infer_size, scales=scales, tta_mode=tta_mode
)
if tta_mode == "skip":
pass
elif tta_mode == "flip":
cam, pred = flip_tta(
model=model, batch_idx=batch_idx, data=infer_data, cam=cam, pred=pred
)
elif tta_mode == "scale":
tta_sizes = [int(infer_size * scale) for scale in scales]
if scale_with_flip:
cam, pred = flip_tta(
model=model, batch_idx=batch_idx, data=infer_data, cam=cam, pred=pred
)
cam_preds = [cam]
for tta_size in tta_sizes:
tta_data = {
"image": F.interpolate(
orig_img, tta_size, mode="bilinear", align_corners=False
)
}
cam_s, pred_s = model.test_step(tta_data, batch_idx, save_npy=False)
if scale_with_flip:
cam_s, pred_s = flip_tta(
model=model,
batch_idx=batch_idx,
data=tta_data,
cam=cam_s,
pred=pred_s,
)
cam_preds.append(cam_s)
pred += pred_s
cam_size = np.max([cam_.shape[-1] for cam_ in cam_preds])
for i, cam_ in enumerate(cam_preds):
cam_preds[i] = F.interpolate(
cam_, cam_size, mode="bilinear", align_corners=False
)
pred *= 1.0 / (len(tta_sizes) + 1)
# cam *= 1.0 / (len(tta_sizes) + 1)
cam = torch.mean(torch.stack(cam_preds), dim=0)
elif tta_mode == "split":
pass
return cam, pred
def get_class_mask(
data: dict,
batch_idx: int,
args_hparams: dict,
model: LitModel,
infer_size: int = 512,
pred_thresh: float = 0.5,
label_find_size: Optional[tuple] = (512, 512),
stage: str = "test",
mode: str = "cam",
tta_mode: str = "flip",
scales: List[float] = [1.2],
) -> Tuple[torch.Tensor, torch.Tensor]:
# adjust input shape for cam
tta_mode = "flip" if (mode == "segm") & (tta_mode == "scale") else tta_mode
cam, pred = infer(
model=model,
data=data,
args_hparams=args_hparams,
batch_idx=batch_idx,
infer_size=infer_size,
tta_mode=tta_mode,
scales=scales,
)
if stage == "test":
if mode == "segm":
pred = cam.reshape(cam.shape[0], cam.shape[1], -1)
pred = torch.where(pred[:, :-1] > pred_thresh, 1.0, 0.0).max(dim=-1)[0]
elif stage == "gen_pseudo":
pred = data["target"].cuda()
if mode == "cam":
cam_pred = model.convert_cam_to_mask(
cam.clone(), pred >= pred_thresh, orig_img_size=label_find_size
)
elif mode == "segm":
# remove bkg class
cam_pred = cam[:, :-1]
cam_pred = F.interpolate(
cam_pred, label_find_size, mode="bilinear", align_corners=False
)
else:
raise NotImplementedError
return cam_pred, pred
def process_ensemble(
results: List[Dict[str, torch.Tensor]],
how_join: np.ndarray,
weights: Optional[np.ndarray] = None,
eps: float = 1.0e-4,
) -> Tuple[np.ndarray, np.ndarray]:
pred = torch.zeros_like(results[0]["pred"])
cam_pred = torch.zeros_like(results[0]["cam_pred"])
if weights is not None:
assert np.sum(weights) - 1.0 < eps, f"weight = {np.sum(weights)}, {weights}"
assert weights.shape[0] == len(results)
else:
weights = np.ones((len(results),), dtype=np.float32) / len(results)
assert how_join.shape[0] == len(results)
unused_w_pred = 0.0
unused_w_cam = 0.0
for i, res in enumerate(results):
w = weights[i]
how_ = how_join[i]
if how_ == "both":
cam_pred += w * res["cam_pred"]
pred += w * res["pred"]
elif how_ == "cam":
cam_pred += w * res["cam_pred"]
unused_w_pred += w
elif how_ == "pred":
pred += w * res["pred"]
unused_w_cam += w
else:
raise NotImplementedError
cam_pred *= 1.0 / (1.0 - unused_w_cam)
pred *= 1.0 / (1.0 - unused_w_pred)
return cam_pred.cpu().numpy(), pred.cpu().numpy()
def vis_masks(im, batch_n_masks, batch_c_masks, on_kaggle_server=False, ind: int = 0):
# mt :red
# er : yellow
# nu : blue
# images = [mt, er, nu]
fig, ax = plt.subplots(1, 3, figsize=(20, 20))
for i in range(3):
microtubule = im[i][..., 0]
endoplasmicrec = im[i][..., 3]
nuclei = im[i][..., 2]
mask = batch_c_masks[i]
img = np.dstack((microtubule, endoplasmicrec, nuclei))
ax[i].imshow(img)
ax[i].imshow(mask, alpha=0.7)
ax[i].axis("off")
if on_kaggle_server:
plt.savefig(f"./hpa_cell_{ind}.png")
else:
plt.show()
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(20, 20))
for i in range(3):
microtubule = im[i][..., 0]
endoplasmicrec = im[i][..., 3]
nuclei = im[i][..., 2]
mask = batch_n_masks[i]
img = np.dstack((microtubule, endoplasmicrec, nuclei))
ax[i].imshow(img)
ax[i].imshow(mask, alpha=0.7)
ax[i].axis("off")
if on_kaggle_server:
plt.savefig(f"./hpa_nuc_{ind}.png")
else:
plt.show()
plt.close()
def encode_binary_mask(mask: np.ndarray) -> Text:
"""Converts a binary mask into OID challenge encoding ascii text."""
# check input mask --
if mask.dtype != np.bool:
raise ValueError(
"encode_binary_mask expects a binary mask, received dtype == %s"
% mask.dtype
)
mask = np.squeeze(mask)
if len(mask.shape) != 2:
raise ValueError(
"encode_binary_mask expects a 2d mask, received shape == %s" % mask.shape
)
# convert input mask to expected COCO API input --
mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1)
mask_to_encode = mask_to_encode.astype(np.uint8)
mask_to_encode = np.asfortranarray(mask_to_encode)
# RLE encode mask --
encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"]
# compress and base64 encoding --
binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION)
base64_str = base64.b64encode(binary_str)
return base64_str.decode()
def decode_ascii_mask(
base64_str: Text, w_size: int = 2048, h_size: int = 2048, is_numpy: bool = False
) -> dict:
# check input mask --
if type(base64_str) != str:
raise ValueError(
"decode_ascii_mask, expects a str, received dtype == %s" % type(base64_str)
)
base64_str = base64_str.encode()
# base64 decoding and decompress
binary_str = base64.b64decode(base64_str)
encoded_mask = zlib.decompress(binary_str)
# RLE decode mask --
rle = [{"size": [h_size, w_size], "counts": encoded_mask}]
mask_to_encode = coco_mask.decode(rle)
if is_numpy:
mask_to_encode = np.ascontiguousarray(mask_to_encode)
else:
mask_to_encode = np.empty(0)
return {"mask": mask_to_encode, "rle": rle[0]}
def calc_conf(
target_cam: np.ndarray, pred: np.ndarray, cam_rate: np.ndarray, how="max"
) -> float:
if how == "max":
cnf = pred * np.max(target_cam)
elif how == "mean":
cnf = pred * np.sum(target_cam) / np.sum(target_cam > 0)
elif how == "cam_rate":
cnf = pred * np.max(target_cam) * cam_rate
elif how == "cam_rate_mean":
cnf = pred * np.sum(target_cam) / np.sum(target_cam > 0) * cam_rate
return cnf
def calc_cam_rate_cond(
target_cam: np.ndarray,
target_mask: np.ndarray,
target_nuclei: np.ndarray,
mask_area: int,
nuclei_area: int,
cell_area_thresh: float = 0.2,
cell_h_area_thresh: float = 0.08,
nuc_area_thresh: float = 0.2,
nuc_h_area_thresh: float = 0.08,
high_cam_thresh: float = 0.75,
min_mask_ratio: float = 0.01,
use_same_thresh: bool = True,
):
cam_rate = np.sum(target_cam > 0) / mask_area
cam_h_rate = np.sum(target_cam >= high_cam_thresh) / mask_area
nuc_cam_rate_cond = False
if nuclei_area >= (target_cam.shape[1] * min_mask_ratio) ** 2:
if use_same_thresh:
cam_rate = max(
cam_rate,
np.sum((target_cam * target_nuclei) > 0) / nuclei_area,
)
cam_h_rate = max(
cam_h_rate,
np.sum((target_cam * target_nuclei) >= high_cam_thresh) / nuclei_area,
)
else:
nuc_cam_rate = np.sum((target_cam * target_nuclei) > 0) / nuclei_area
nuc_cam_h_rate = (
np.sum((target_cam * target_nuclei) >= high_cam_thresh) / nuclei_area
)
nuc_cam_rate_cond = (nuc_cam_rate >= nuc_area_thresh) or (
nuc_cam_h_rate >= nuc_h_area_thresh
)
cam_rate_cond = (
(cam_rate >= cell_area_thresh)
or (cam_h_rate >= cell_h_area_thresh)
or nuc_cam_rate_cond
)
return cam_rate_cond, cam_rate
def reduce_label_size(
target_mask: np.ndarray,
target_nuclei: np.ndarray,
green_ch: Optional[np.ndarray],
image_size: tuple,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# target_mask = cv2.resize(target_mask.astype(np.uint8), image_size).astype(np.bool)
target_mask = cv2.resize(
target_mask.astype(np.uint8), image_size, interpolation=cv2.INTER_NEAREST
)
target_nuclei = cv2.resize(
target_nuclei.astype(np.uint8), image_size, interpolation=cv2.INTER_NEAREST
).astype(np.bool)
if green_ch is not None:
green_ch = cv2.resize(green_ch.astype(np.uint8), image_size)
else:
green_ch = np.empty(0)
return target_mask, target_nuclei, green_ch
def check_bkg_score(
pred_ins: List[str],
is_bkg: bool,
low_green: bool,
class_ids: List[int],
cnfs: List[float],
):
if is_bkg or low_green:
pass
elif NEGA_CLASS in class_ids:
max_pred = np.argmax(cnfs)
max_class = class_ids[max_pred]
if max_class == NEGA_CLASS:
# use only bkg class
pred_ins = [pred_ins[-1]]
else:
# remove bkg class
pred_ins = pred_ins[:-1]
return pred_ins
def find_cell_labels(
rle: Text,
cam_pred: np.ndarray,
target_mask: np.ndarray,
target_nuclei: np.ndarray,
pred: np.ndarray,
green_ch: np.ndarray = np.empty(0),
green_ch_thresh: float = 2.0,
skip_bkg_check: bool = True,
pred_thresh: float = 0.5,
cell_area_thresh: float = 0.2,
cell_h_area_thresh: float = 0.08,
nuc_area_thresh: float = 0.2,
nuc_h_area_thresh: float = 0.08,
use_same_thresh: bool = True,
high_cam_thresh: float = 0.75,
min_mask_ratio: float = 0.01,
conf_how: str = "max",
default_bkg_score: float = 0.8,
) -> List[str]:
# class prediction
assert np.all(cam_pred.shape[0:1] == pred.shape)
assert np.all(cam_pred.shape[1:3] == target_mask.shape[1:3])
assert np.all(cam_pred.shape[1:3] == target_nuclei.shape[1:3])
assert np.all(cam_pred.shape[1:3] == green_ch.shape[1:3])
assert green_ch.dtype == np.uint8
pred_ins: List[str] = []
mask_area = np.sum(target_mask)
nuclei_area = np.sum(target_nuclei)
if mask_area <= (cam_pred.shape[1] * min_mask_ratio) ** 2:
return pred_ins
bkg_score = []
target_cam = cam_pred * target_mask
target_green = green_ch * target_mask
low_green = target_green.sum() / mask_area < green_ch_thresh
is_bkg = (np.argmax(pred) == NEGA_CLASS) and (pred[NEGA_CLASS] > 0)
if skip_bkg_check:
is_bkg = False
class_ids = []
cnfs = []
for class_id in np.where(pred >= pred_thresh)[0]:
cam_rate_cond, cam_rate = calc_cam_rate_cond(
target_cam=target_cam[class_id],
target_mask=target_mask[0],
target_nuclei=target_nuclei[0],
mask_area=mask_area,
nuclei_area=nuclei_area,
cell_area_thresh=cell_area_thresh,
cell_h_area_thresh=cell_h_area_thresh,
nuc_area_thresh=nuc_area_thresh,
nuc_h_area_thresh=nuc_h_area_thresh,
high_cam_thresh=high_cam_thresh,
min_mask_ratio=min_mask_ratio,
use_same_thresh=use_same_thresh,
)
if cam_rate_cond and (not is_bkg) and (not low_green):
cnf = calc_conf(
target_cam=target_cam[class_id],
pred=pred[class_id],
cam_rate=cam_rate,
how=conf_how,
)
pred_ins.append(f"{class_id} {cnf} {rle}")
cnfs.append(cnf)
class_ids.append(class_id)
else:
bkg_score.append(1.0 - cam_rate)
if not skip_bkg_check:
pred_ins = check_bkg_score(
pred_ins=pred_ins,
is_bkg=is_bkg,
low_green=low_green,
class_ids=class_ids,
cnfs=cnfs,
)
if len(pred_ins) == 0:
class_id = NEGA_CLASS
cnf = default_bkg_score
if not np.all(bkg_score == 0) and (not is_bkg):
nweight = (
np.median(bkg_score) - (1.0 - cell_area_thresh)
) / cell_area_thresh
nweight = max(1.0e-6, nweight)
cnf *= nweight
pred_ins.append(f"{class_id} {cnf} {rle}")
return pred_ins
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run infer for hpa ws",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--yaml_path",
default="../input/hpa-ws-repo/kaggle-hpa-single-cell-image-classification-main/src/config/kaggle_submission.yaml",
type=str,
help="run config path",
)
parser.add_argument(
"--sub_path",
type=str,
default="./submission.csv",
help="path for resutl csv",
)
parser.add_argument(
"--num_workers",
default="4",
type=int,
help="number of cpus for DataLoader",
)
parser.add_argument(
"--para_num",
default=None,
type=int,
help="number of parallel processings at psuedo label generation",
)
parser.add_argument(
"--para_ind",
default="0",
type=int,
help=" parallel run index",
)
args = parser.parse_args()
sub_path = args.sub_path
print("use submission name:", args.sub_path)
with open(args.yaml_path) as f:
configs = yaml.load(f)
stage = configs["stage"]
conf_how = configs["conf_how"]
mask_dir = configs["mask_dir"]
skip_bkg_check = configs["skip_bkg_check"]
use_ext_data = configs["use_ext_data"]
ext_data_mode = configs["ext_data_mode"]
is_debug = configs["is_debug"]
ckpt_paths = configs["ckpt_paths"]
df = pd.read_csv(os.path.join(data_dir, "sample_submission.csv"))
if len(df) == 559:
debug_num = 5
else:
is_debug = False
datamodule = HpaDatamodule(
data_dir=data_dir,
batch_size=batch_size,
is_debug=is_debug,
input_size=input_size,
mask_dir=mask_dir,
num_workers=args.num_workers,
use_ext_data=use_ext_data,
ext_data_mode=ext_data_mode,
para_num=args.para_num,
para_ind=args.para_ind,
**get_dm_default_args(),
)
datamodule.prepare_data()
datamodule.setup(stage=stage)
test_dataloader = datamodule.test_dataloader()
segmentator = cellsegmentator.CellSegmentator(
NUC_MODEL,
CELL_MODEL,
scale_factor=0.25,
device="cuda",
padding=True,
multi_channel_model=True,
)
models = load_ckpt_paths(ckpt_paths=ckpt_paths)
gc.collect()
with torch.no_grad():
pred_rows = [["ID", "ImageWidth", "ImageHeight", "PredictionString"]]
for i, data in tqdm(enumerate(test_dataloader), total=len(test_dataloader)):
if is_debug:
if i >= debug_num:
break
im = data["image"].numpy().transpose(0, 2, 3, 1)
im = im * np.array(datamodule.img_std) + np.array(datamodule.img_mean)
im = (im * 255).astype(np.uint8)
green_batch = im[..., 1]
batch_n_masks, batch_c_masks = get_cell_masks(
data=data, im=im, segmentator=segmentator, stage=stage
)
if is_debug:
# pass
# Visualizing the segmentation masks we just predicted above
vis_masks(
im,
batch_n_masks,
batch_c_masks,
on_kaggle_server=on_kaggle_server,
ind=i,
)
is_any_cuda = np.any([model["is_cuda"] for model in models])
if is_any_cuda:
data["image"] = data["image"].cuda()
results = []
weights = np.zeros((len(models),), dtype=np.float32)
how_join = np.zeros((len(models),), dtype=np.object)
for i, model_dict in enumerate(models):
mode = (
"segm"
if model_dict["hparams"]["segm_label_dir"] is not None
else "cam"
)
if isinstance(model_dict["model"], LitModel):
cam_pred, pred = get_class_mask(
data,
batch_idx=i,
args_hparams=model_dict["hparams"],
model=model_dict["model"],
infer_size=model_dict["hparams"]["input_size"],
pred_thresh=PRED_THRESH,
label_find_size=label_find_size,
stage=stage,
mode=mode,
tta_mode=tta_mode,
scales=scales,
)
else:
cam_dir = model_dict["path"]
cam_preds = []
preds = []
for input_id in data["input_id"]:
cam_path, pred_path = get_cam_pred_path(cam_dir, input_id)
cam_preds.append(np.load(str(cam_path)))
preds.append(np.load(str(pred_path)))
cam_pred = torch.from_numpy(np.array(cam_preds))
pred = torch.from_numpy(np.array(preds))
if is_any_cuda:
cam_pred = cam_pred.cuda()
pred = pred.cuda()
cam_pred = F.interpolate(
cam_pred, label_find_size, mode="bilinear", align_corners=False
)
results.append({"cam_pred": cam_pred, "pred": pred})
weights[i] = model_dict["weight"]
how_join[i] = model_dict["how_join"]
cam_pred, pred = process_ensemble(
results=results, how_join=how_join, weights=weights
)
if is_debug:
num_ = 4
inputs = F.interpolate(
torch.Tensor(im.transpose(0, 3, 1, 2)), label_find_size
)
inputs = inputs.numpy().transpose(0, 2, 3, 1)[..., :3].astype(np.uint8)
_ = plt.figure(figsize=(10 * num_, 10))
out_pred = LitModel.overlay_cam_on_input(
inputs=inputs,
cam_mask=cam_pred,
targets=pred,
batch_num=cam_pred.shape[0],
stack_axis=1,
threshold=PRED_THRESH,
)
inputs = np.concatenate(inputs, axis=1)
out = np.concatenate((inputs, out_pred), axis=0)
plt.imshow(out)
plt.axis("off")
if on_kaggle_server:
plt.savefig(f"./cam_{i}.png")
else:
plt.show()
plt.close()
w_size_batch = data["w_size"].numpy()
h_size_batch = data["h_size"].numpy()
cam_pred =
|
np.where(cam_pred <= cam_thresh, 0.0, cam_pred)
|
numpy.where
|
#!/usr/bin/env python
# coding=utf-8
import logging
import warnings
from collections import namedtuple
from numbers import Real
from typing import Sequence, Tuple, Union, Dict
import numpy as np
from pygc import great_distance
from ioos_qc.utils import (
isnan,
isfixedlength
)
L = logging.getLogger(__name__) # noqa
class QartodFlags(object):
"""Primary flags for QARTOD."""
GOOD = 1
UNKNOWN = 2
SUSPECT = 3
FAIL = 4
MISSING = 9
FLAGS = QartodFlags # Default name for all check modules
N = Real
span = namedtuple('Span', 'minv maxv')
# Convert dates to datetime and leave datetimes alone. This is also reducing all
# objects to second precision
def mapdates(dates):
if hasattr(dates, 'dtype') and np.issubdtype(dates.dtype, np.datetime64):
return dates.astype('datetime64[ns]')
else:
return np.array(dates, dtype='datetime64[ns]')
def qartod_compare(vectors : Sequence[Sequence[N]]
) -> np.ma.MaskedArray:
"""Aggregates an array of flags by precedence into a single array.
Args:
vectors: An array of uniform length arrays representing individual flags
Returns:
A masked array of aggregated flag data.
"""
shapes = [v.shape[0] for v in vectors]
# Assert that all of the vectors are the same size.
assert all([s == shapes[0] for s in shapes])
assert all([v.ndim == 1 for v in vectors])
result = np.ma.empty(shapes[0])
result.fill(QartodFlags.MISSING)
priorities = [
QartodFlags.MISSING,
QartodFlags.UNKNOWN,
QartodFlags.GOOD,
QartodFlags.SUSPECT,
QartodFlags.FAIL
]
# For each of the priorities in order, set the resultant array to the the
# flag where that flag exists in each of the vectors.
for p in priorities:
for v in vectors:
idx = np.where(v == p)[0]
result[idx] = p
return result
def location_test(lon : Sequence[N],
lat : Sequence[N],
bbox : Tuple[N, N, N, N] = (-180, -90, 180, 90),
range_max : N = None
) -> np.ma.core.MaskedArray:
"""Checks that a location is within reasonable bounds.
Checks that longitude and latitude are within reasonable bounds defaulting
to lon = [-180, 180] and lat = [-90, 90]. Optionally, check for a maximum
range parameter in great circle distance defaulting to meters which can
also use a unit from the quantities library. Missing and masked data is
flagged as UNKNOWN.
Args:
lon: Longitudes as a numeric numpy array or a list of numbers.
lat: Latitudes as a numeric numpy array or a list of numbers.
bbox: A length 4 tuple expressed in (minx, miny, maxx, maxy) [optional].
range_max: Maximum allowed range expressed in geodesic curve distance (meters).
Returns:
A masked array of flag values equal in size to that of the input.
"""
bboxnt = namedtuple('BBOX', 'minx miny maxx maxy')
if bbox is not None:
assert isfixedlength(bbox, 4)
bbox = bboxnt(*bbox)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lat = np.ma.masked_invalid(np.array(lat).astype(np.floating))
lon = np.ma.masked_invalid(np.array(lon).astype(np.floating))
if lon.shape != lat.shape:
raise ValueError(
'Lon ({0.shape}) and lat ({1.shape}) are different shapes'.format(
lon, lat
)
)
# Save original shape
original_shape = lon.shape
lon = lon.flatten()
lat = lat.flatten()
# Start with everything as passing (1)
flag_arr = np.ma.ones(lon.size, dtype='uint8')
# If either lon or lat are masked we just set the flag to MISSING
mloc = lon.mask & lat.mask
flag_arr[mloc] = QartodFlags.MISSING
# If there is only one masked value fail the location test
mismatch = lon.mask != lat.mask
flag_arr[mismatch] = QartodFlags.FAIL
if range_max is not None and lon.size > 1:
# Calculating the great_distance between each point
# Flag suspect any distance over range_max
d = np.ma.zeros(lon.size, dtype=np.float64)
d[1:] = great_distance(
start_latitude=lat[:-1],
end_latitude=lat[1:],
start_longitude=lon[:-1],
end_longitude=lon[1:]
)['distance']
flag_arr[d > range_max] = QartodFlags.SUSPECT
# Ignore warnings when comparing NaN values even though they are masked
# https://github.com/numpy/numpy/blob/master/doc/release/1.8.0-notes.rst#runtime-warnings-when-comparing-nan-numbers
with np.errstate(invalid='ignore'):
flag_arr[(lon < bbox.minx) | (lat < bbox.miny) |
(lon > bbox.maxx) | (lat > bbox.maxy)] = QartodFlags.FAIL
return flag_arr.reshape(original_shape)
def gross_range_test(inp : Sequence[N],
fail_span : Tuple[N, N],
suspect_span : Tuple[N, N] = None
) -> np.ma.core.MaskedArray:
"""Checks that values are within reasonable range bounds.
Given a 2-tuple of minimum/maximum values, flag data outside of the given
range as FAIL data. Optionally also flag data which falls outside of a user
defined range as SUSPECT. Missing and masked data is flagged as UNKNOWN.
Args:
inp: Input data as a numeric numpy array or a list of numbers.
fail_span: 2-tuple range which to flag outside data as FAIL.
suspect_span: 2-tuple range which to flag outside data as SUSPECT. [optional]
Returns:
A masked array of flag values equal in size to that of the input.
"""
assert isfixedlength(fail_span, 2)
sspan = span(*sorted(fail_span))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))
# Save original shape
original_shape = inp.shape
inp = inp.flatten()
# Start with everything as passing (1)
flag_arr = np.ma.ones(inp.size, dtype='uint8')
# If the value is masked set the flag to MISSING
flag_arr[inp.mask] = QartodFlags.MISSING
if suspect_span is not None:
assert isfixedlength(suspect_span, 2)
uspan = span(*sorted(suspect_span))
if uspan.minv < sspan.minv or uspan.maxv > sspan.maxv:
raise ValueError('User span range may not exceed sensor span')
# Flag suspect outside of user span
with np.errstate(invalid='ignore'):
flag_arr[(inp < uspan.minv) | (inp > uspan.maxv)] = QartodFlags.SUSPECT
# Flag suspect outside of sensor span
with np.errstate(invalid='ignore'):
flag_arr[(inp < sspan.minv) | (inp > sspan.maxv)] = QartodFlags.FAIL
return flag_arr.reshape(original_shape)
class ClimatologyConfig(object):
mem = namedtuple('window', [
'tspan',
'vspan',
'zspan'
])
def __init__(self, members=None):
members = members or []
self._members = members
@property
def members(self):
return self._members
def values(self, tind, zind=None):
span = (None, None)
for m in self._members:
# If we are between times
if tind > m.tspan.minv and tind <= m.tspan.maxv:
if not isnan(zind) and not isnan(m.zspan):
# If we are between depths
if zind > m.zspan.minv and zind <= m.zspan.maxv:
span = m.vspan
elif isnan(zind) and isnan(m.zspan):
span = m.vspan
return span
def add(self,
tspan : Tuple[N, N],
vspan : Tuple[N, N],
zspan : Tuple[N, N] = None) -> None:
assert isfixedlength(tspan, 2)
tspan = mapdates(tspan)
tspan = span(*sorted(tspan))
assert isfixedlength(vspan, 2)
vspan = span(*sorted(vspan))
if zspan is not None:
assert isfixedlength(zspan, 2)
zspan = span(*sorted(zspan))
self._members.append(
self.mem(
tspan,
vspan,
zspan
)
)
def climatology_test(config : Union[ClimatologyConfig, Sequence[Dict[str, Tuple]]],
inp : Sequence[N],
tinp : Sequence[N],
zinp : Sequence[N],
) -> np.ma.core.MaskedArray:
"""Checks that values are within reasonable range bounds and flags as SUSPECT.
Data for which no ClimatologyConfig member exists is marked as UNKNOWN.
Args:
config: A ClimatologyConfig object or a list of dicts containing tuples
that can be used to create a ClimatologyConfig object. Dict should be composed of
keywords 'tspan' and 'vspan' as well as an optional 'zspan'
tinp: Time data as a numpy array of dtype `datetime64`.
vinp: Input data as a numeric numpy array or a list of numbers.
zinp: Z (depth) data as a numeric numpy array or a list of numbers.
Returns:
A masked array of flag values equal in size to that of the input.
"""
# Create a ClimatologyConfig object if one was not passed in
if not isinstance(config, ClimatologyConfig):
c = ClimatologyConfig()
for climate_config_dict in config:
c.add(**climate_config_dict)
config = c
tinp = mapdates(tinp)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))
zinp = np.ma.masked_invalid(np.array(zinp).astype(np.floating))
# Save original shape
original_shape = inp.shape
tinp = tinp.flatten()
inp = inp.flatten()
zinp = zinp.flatten()
# Start with everything as passing (1)
flag_arr = np.ma.ones(inp.size, dtype='uint8')
# If the value is masked set the flag to MISSING
flag_arr[inp.mask] = QartodFlags.MISSING
for i, (tind, ind, zind) in enumerate(zip(tinp, inp, zinp)):
minv, maxv = config.values(tind, zind)
if minv is None or maxv is None:
flag_arr[i] = QartodFlags.MISSING
else:
# Flag suspect outside of climatology span
with np.errstate(invalid='ignore'):
if ind < minv or ind > maxv:
flag_arr[i] = QartodFlags.SUSPECT
return flag_arr.reshape(original_shape)
def spike_test(inp : Sequence[N],
suspect_threshold: N,
fail_threshold: N
) -> np.ma.core.MaskedArray:
"""Check for spikes by checking neighboring data against thresholds
Determine if there is a spike at data point n-1 by subtracting
the midpoint of n and n-2 and taking the absolute value of this
quantity, and checking if it exceeds a low or high threshold.
Values which do not exceed either threshold are flagged GOOD,
values which exceed the low threshold are flagged SUSPECT,
and values which exceed the high threshold are flagged FAIL.
Missing and masked data is flagged as UNKNOWN.
Args:
inp: Input data as a numeric numpy array or a list of numbers.
suspect_threshold: The SUSPECT threshold value, in observations units.
fail_threshold: The SUSPECT threshold value, in observations units.
Returns:
A masked array of flag values equal in size to that of the input.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))
# Save original shape
original_shape = inp.shape
inp = inp.flatten()
# Calculate the average of n-2 and n
ref =
|
np.zeros(inp.size, dtype=np.float64)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 19:11:52 2017
@author: mariapanteli
"""
import pytest
import os
import numpy as np
import pandas as pd
import scripts.load_features as load_features
feat_loader = load_features.FeatureLoader(win2sec=8)
TEST_METADATA_FILE = os.path.join(os.path.dirname(__file__), 'data', 'metadata.csv')
TEST_MELODIA_FILE = os.path.join(os.path.dirname(__file__), 'data', 'melodia_mel_1_2_1.csv')
def test_get_music_idx_from_bounds():
bounds = np.array([['0', '10.5', 'm']])
sr = feat_loader.framessr2
music_bounds = feat_loader.get_music_idx_from_bounds(bounds, sr=sr)
# upper bound minus half window size
#half_win_sec = 4.0 # assume 8-second window
win_sec = 8
music_bounds_true = np.arange(np.round(sr * (np.float(bounds[-1, 1]) - win_sec)), dtype=int)
assert np.array_equal(music_bounds, music_bounds_true)
def test_get_music_idx_from_bounds_short_segment():
# anything less than half window size is not processed
bounds = np.array([['0', '7.9', 'm']])
sr = feat_loader.framessr2
music_bounds = feat_loader.get_music_idx_from_bounds(bounds, sr=sr)
music_bounds_true = np.array([])
assert np.array_equal(music_bounds, music_bounds_true)
def test_get_music_idx_from_bounds_single_frame():
bounds = np.array([['0', '8.1', 'm']])
sr = feat_loader.framessr2
music_bounds = feat_loader.get_music_idx_from_bounds(bounds, sr=sr)
music_bounds_true = np.array([0])
assert np.array_equal(music_bounds, music_bounds_true)
def test_get_music_idx_from_bounds_mix_segments():
bounds = np.array([['0', '10.5', 'm'],
['10.5', '3.0', 's'],
['13.5', '5.0', 'm']])
sr = feat_loader.framessr2
music_bounds = feat_loader.get_music_idx_from_bounds(bounds, sr=sr)
#half_win_sec = 4.0 # assume 8-second window
win_sec = 8.0 # assume 8-second window
music_bounds_true = np.concatenate([np.arange(np.round(sr * (10.5 - win_sec)), dtype=int),
np.arange(np.round(sr * 13.5),
np.round(sr * (18.5 - win_sec)), dtype=int)])
assert np.array_equal(music_bounds, music_bounds_true)
def test_get_music_idx_from_bounds_overlap_segments():
bounds = np.array([['0', '10.5', 'm'],
['9.5', '3.0', 's'],
['11.5', '5.0', 'm']])
sr = feat_loader.framessr2
music_bounds = feat_loader.get_music_idx_from_bounds(bounds, sr=sr)
half_win_sec = 4.0 # assume 8-second window
win_sec = 8.0 # assume 8-second window
music_bounds_true = np.concatenate([np.arange(
|
np.round(sr * (10.5 - win_sec))
|
numpy.round
|
import string
from atexit import register
from copy import deepcopy
from operator import itemgetter
import random
import numpy as np
from psonic import *
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
from pymongo.errors import WriteError
from Naked.toolshed.shell import muterun_rb
from itertools import chain
from instrument_reference import *
# TODO: Optional: Change the code from random.choice to random.choices with probabilities - for granular selection.
# TODO: Continual: improve design of the optimization problem in run_GA_on(), and improve performance as it
# increases in size.
# TODO: Optional: Change the code to store instrument instructions of all lengths to db and get instructions of a
# specific length.
# TODO: Optional: Change the code to work with genres in the database and in GA.
# TODO: Optional: Change code so that user midi files are training data, train GA and store populations into midi
# file_name_collection, then make new instrument instructions by...
def main():
"""Prereq: sonic pi must be running to play sounds."""
# Set for additional console messages
debug = True
# Instantiate GA
ga = GA(debug=debug)
# Register stop function (stop music) to run upon program termination.
register(stop)
user_input = ''
while user_input != 'exit':
user_input = input("Enter command for GA: ")
if user_input == 'play':
# get song instructions in text form from GA
# command python sonic run(song_instructions)
if len(ga.song_instruments) > 0:
pi_code = ga.get_song_sonicpi_code()
run(pi_code)
print('Now playing')
if debug:
print("{} instrument(s) playing.".format(len(ga.song_instruments)))
else:
print("Error: Something needs to be on the music stack to get sonic pi code, run add_instrument.")
elif user_input == 'stop':
# command python sonic stop()
stop()
print('Stopped')
# elif user_input == 'upload_song': possible addition - if song is really good
# store song in the db (for use by user, or public top songs chart).
elif user_input == 'add_instrument':
ga.push_instrument()
stop()
pi_code = ga.get_song_sonicpi_code()
print('Added {}. {} instrument(s) playing.'.format(ga.song_instruments[-1][2], len(ga.song_instruments)))
# if in dev mode
if debug:
print("Song code (sonic pi):\n{}".format(pi_code))
run(pi_code)
elif user_input == 'reset':
print('Reset')
# reset ga variable
ga = GA(debug=debug)
# command python sonic stop()
stop()
elif user_input == 'rate':
print('Rating...')
if len(ga.song_instruments) > 0:
rating = input("Please enter a rating for newest instrument: ")
if rating.isdigit() and 1 <= int(rating) <= 10:
ga.rate_instrument(int(rating))
code = ga.get_song_sonicpi_code()
if debug:
print("Song code (sonic pi):\n{}".format(code))
run(code)
else:
print('Error: Rating entered is not a number between 1 to 10.')
else:
print('Error: There are no instruments to rate. Run add_instrument.')
elif user_input == 'apply_ga':
print('Applying GA')
# Stop music
stop()
# Run ga.
try:
ga.run_ga()
pi_code = ga.get_song_sonicpi_code()
run(pi_code)
if debug:
print("Song code (sonic pi):\n{}".format(pi_code))
except IndexError:
None
elif user_input == 'toggle_debug':
debug = not debug
ga.debug = debug
print('Debuging is {}'.format("Enabled" if debug else "Disabled"))
elif user_input == 'set_reference':
"""Sets the GA's instrument_reference and instrument_rating, from a user defined midi file
and rating. The midi file is processed to conform to the data structure using in the GA."""
print("Setting reference from MIDI file.")
args = input("Please enter path to midi_file followed by a Temp folder name to store results:")
try:
rating = int(input("Please enter a rating, from 1-10, of this song: "))
try:
# Run midi2spi program to convert midi to txt tracks with notes and store it under
# SECOND_ARG/track_XX.txt
_ = muterun_rb('midi2spi.rb', args)
args = args.split()
# load ticks since last note, note duration, and note pitch data from first track
# into np.array
data = np.genfromtxt("./{}/Track_02.txt".format(args[1]),
delimiter=',',
usecols=(2, 4, 5),
skip_header=11,
dtype=float)
if debug:
print("Data shape on load: {}".format(data.shape))
# try to get an estimated bpm from header that was generated file.
bpm = 120
try:
with open('./{}/header.txt'.format(args[1])) as f:
for line in f:
line = line.split()
if line[0] == 'BPM:':
bpm = int(line[1]) * 6
break
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
if debug:
print("BPM: {}".format(bpm))
# Set the sleep vals
data[:, 0] = np.round(data[:, 0] / bpm, decimals=3)
data[-1, 0] = 1
# Set the duration vals
data[:, 1] = np.round(data[:, 1] / bpm, decimals=3)
# Now put data into our format
head = ['live_loop :ref do\n', 'use_synth :', 'piano', '\n', 'with_fx :', 'none',
' do\n']
data = [['sleep ', vals[0], '\n',
'play ', vals[2], ', amp: ', 1, ', pan: ', 0, ', release: ', 0.5, ', attack: ', 0.03,
', sustain: ', vals[1], ', decay: ', 0.5, '\n',
] for vals in data]
data = list(chain.from_iterable(data))
end = ['end\nend\n']
result = head + data + end
if debug:
# concat function
if len(result) > 0:
text = ''
text = text + ''.join(str(i) for i in result)
print("Final output (Sonic PI):\n{}".format(text))
# set the reference instrument in GA
ga.instrument_reference = result
ga.instrument_rating = rating
ga.instrument_reference_name = args[1]
except FileNotFoundError as fnf:
print("Error: Midi txt file not found. {}".format(fnf.strerror))
except ValueError:
print("Error: Please enter an integer.")
elif user_input != 'exit':
print("""Usage is: GA [options]
play - Play current GA instruments
stop - Stop playing GA instruments
add_instrument - Generate a new instrument and add to instrument stack
reset - Reset the GA, GA instruments, and GA reference instrument.
rate - Rate the top most instrument
set_reference - Set reference midi file and rating to train GA
apply_GA - Apply Genetic Algorithm to instrument at the top of the instrument stack
toggle_debug - Toggle additional console output messages
exit - Exit program\n""")
print('Program exit.')
class GA:
def __init__(self, population_size=50, # Modulates search space size.
instrument_size=10, # Amount of sleep and play note pairs included in instrument track.
generations=10, # Max number of generations to compute before a solution is found.
crossover_rate=0.7, # Crossover probability [0-1)
mutation_rate=0.1, # Mutation probability [0-1)
tol=0.1, # Threshold difference between ref and GA'd instrument scores that count as a solution.
use_db=True, # Specify whether db connection is enabled.
debug=False # Specify for additional console printouts
):
# Population size: Too large: long epoch time, restricting acceptable generation times.
# Too small: not good coverage of search space.
# Mutation rate: Too high: risk of individuals jumping over a solution they were close to.
# Too small: individuals getting stuck in local minimums.
"""Instantiate GA object and member variables to specified values"""
self.instrument_population = [] # Population array: [ { 'rating': 5, 'instructions': [1,...] }, .... ]
self.new_instrument_population = [] # New Population array: [ { 'rating': 5, 'instructions': [1,...] }, .... ]
self.song_instruments = [] # array of array of instrument instructions
self.song_instruments_ratings = [] # array of ratings from user that correspond to song_instruments.
self.use_db = use_db
self.population_size = population_size
self.instrument_size = instrument_size
self.generations = generations # Max generations to iterate over population before tol has been found
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.curr_down_vote_count = 0 # Used to keep count how many downvotes for a instrument a person has made.
self.tol = tol
self.debug = debug
self.instrument_reference = instrument_reference
self.instrument_rating = instrument_rating
self.instrument_reference_name = instrument_reference_name
self.scores_ = [] # Array that stores score values at every generation when run_GA_on() is called.
"""PARAMETERS"""
self.synths = ['prophet', 'saw', 'dpulse', 'cnoise', 'subpulse',
'piano', 'chiplead', 'dull_bell', 'pretty_bell',
'hoover', 'pluck', 'tech_saws']
self.synths_probs = np.array([0.01, 0.04, 0.01, 0.01, 0.02, 0.53, 0.03, 0.05, 0.05, 0.05, 0.1, 0.1])
self.sleep_vals = [np.arange(0, 2, 0.03), np.arange(0, 2, 0.02)]
self.play_vals = range(20, 95)
self.amp_vals = [1]
self.pan_vals = [0]
self.release_vals = [0.2, 0.3, 0.4, 0.5]
self.attack_vals = [0, 0.02, 0.04, 0.06]
self.sustain_vals = np.arange(0, 0.5, 0.1)
self.decay_vals = [0.05]
self.fx_vals = ["none"]
# sample_vals = [...] optional
if use_db:
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
# client = MongoClient(port=27017)
client = MongoClient("mongodb://admin1:12345@ds<EMAIL>.mlab.com:25016/music_db")
self.db = client.music_db
# The collections are indexed by '_id' key that is automatically inserted on upload.
# '_id' is equal to the hash of srt(instrument_instructions)
# Duplicate instruments are prevented from upload for this reason.
"""Instrument Data Structure: Header: indices 0 - 6, where indices 2 and 5 are synth and fx values respectively.
DATA: indices 7 - (instrument_size*18 - 1), where:
play is followed by 7 floats,
sleep is followed by 1 float which can take on the values:
[0.0, 0.25, 0.33, 0.5, 0.66, 0.75, 1.0, 1.25],
The sleep and play values alternate between each other.
I believe this data structure is better than an object because we can access it easier for crossover and
mutation functions, and we can assume its structure. But this is open to ideas."""
"""Class Methods"""
def push_instrument(self):
"""Generates and pushes an instrument into the instrument stack, uses GA and DB if db is enabled."""
if self.debug:
print("Pushing new instrument")
# Append to the stack a GA'd instrument
inst_dict = self.run_ga_on()
if self.debug:
print("GA ran and picked best instrument with a rating of {}".format(inst_dict['rating']))
self.song_instruments.append(inst_dict['instructions'])
def pop_instrument(self):
"""Pop top instrument from the instrument stack"""
if len(self.song_instruments) > 0:
self.song_instruments.pop(-1)
else:
print("No instruments to pop from the stack")
def run_ga(self):
"""Run genetic algorithm on the top item in song_instruments
Precondition: There has to be an instrument in the song stack and it has to have been rated.
Postcondition: The instrument at top of the stack will be applied GA"""
try:
inst_dict = self.run_ga_on(self.song_instruments[-1], self.song_instruments_ratings[-1])
if self.debug:
print("GA ran and picked best instrument with a rating of {}".format(inst_dict['rating']))
self.song_instruments[-1] = inst_dict['instructions']
except IndexError:
print("Error: Something needs to be on the music stack and have been rated to apply GA. " +
"Run push_instrument.")
raise
def get_song_sonicpi_code(self, start=0):
"""Returns formatted sonic pi code from song_instruments instructions. Optionally, start from
a specified index from song_instruments where startIndex < len(song_instruments) """
if len(self.song_instruments) > 0:
text = ''
for instrument in self.song_instruments[start:]:
text = text + ''.join(str(i) for i in instrument)
return text
else:
print("Error: Something needs to be on the music stack to get sonic pi code, run push_instrument.")
def rate_instrument(self, rating):
"""Rates the instrument at the top of the instrument stack, on a scale of 1 to 10, saves to db if enabled,
then generates a GA instrument from db or a random one and pushes to the song stack.
Precondition: self has instrument in stack and rating is 1 <= rating <= 10. Connection to db if enabled.
Post-condition: instrument in top of stack will have a rating (fitness)"""
# assert that ga has instrument and 1 <= rating <= 10
if len(self.song_instruments) > 0 and (1 <= rating <= 10):
# append rating to song_instruments_ratings
self.song_instruments_ratings.append(rating)
# Record user's input to db
if self.use_db:
self.upload_instrument(self.song_instruments[-1], rating=rating)
# perform an update
if rating >= 5:
# save topmost instrument to db.
self.curr_down_vote_count = 0
# generate (random or from Ga from db) a new instrument and push it onto the song stack.
self.push_instrument()
else:
# increment number of downvotes
self.curr_down_vote_count += 1
# if down votes is too much, replace instrument, else mutate it.
if self.curr_down_vote_count > 3:
# delete top instrument and put another one in place
self.song_instruments.pop()
if self.debug:
print("Popped top instrument")
self.push_instrument()
else:
self.mutate(self.song_instruments[-1], probability=0.5)
if self.debug:
print('Mutated instrument.')
else:
print('Error in rating: either this GA does not have an instrument or rating is not from 1 to 10.')
"""HELPER FUNCTIONS"""
def generate_instrument_population(self, instrument, size):
"""Return an array of dicts of instruments, containing rating & instructions, of len=size from db or randomly
generated. The list will be sorted in descending order of ratings."""
# Returning result
result = []
# Initialize a sample reference rating and instrument to score random instruments against.
ref_inst = self.instrument_reference[:7+self.instrument_size*18] + ['end\nend\n']
ref_inst[2] = instrument
ref_rating = self.instrument_rating
if self.use_db:
# get up-to size amount of instrument from db and put into result. Items are dictionaries,
# sorted by descending ratings.
result = self.load_instruments(instrument, size)
if result is None:
result = []
# if len(result) != 0:
# # Optional
# # pick the first most element as reference for use during scoring as it might be better for scoring
# # than the hardcoded reference that was assigned above.
# if result[0]['rating'] >= ref_rating:
# ref_rating = result[0]['rating']
# ref_inst = result[0]['instructions']
# generate new instruments if necessary and add to result.
for i in range(len(result), size):
rand_inst = self.generate_instrument(instrument, self.instrument_size)
result.append({'rating': self.score(rand_inst, ref_inst, ref_rating), 'instructions': rand_inst})
result = sorted(result, key=itemgetter('rating'), reverse=True)
return result
def generate_instrument(self, instrument, size):
"""Returns an array of instrument instructions of data len=size and type=instrument."""
result = []
# randomly set sleep intervals to multiples of 0.25 or 0.33
sleep_val = random.choice(self.sleep_vals)
# Append Live loop, use_synth, with_fx HEADER
result.append('live_loop :{} do\n'.format(''.join(random.choices(string.ascii_uppercase, k=5))))
result.append('use_synth :')
result.append(instrument)
result.append('\n')
result.append('with_fx :')
result.append(random.choice(self.fx_vals))
result.append(' do\n')
# Append play and sleep DATA
for i in range(0, size):
result.append('sleep ') # index 7, 25,... (7 + (18 * (size-1)) where size > 0)
result.append(random.choice(sleep_val)) # index 8, 26,... (8 + (18 * (size-1)) where size > 0)
result.append('\n') # index 9, 27,... (9 + (18 * (size-1)) where size > 0)
result.append('play ') # index 10, (10 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.play_vals)) # index 11, (11 + (18 * (size-1)) where size > 0)
result.append(', amp: ') # index 12, (12 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.amp_vals)) # index 13, (13 + (18 * (size-1)) where size > 0)
result.append(', pan: ') # index 14, (14 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.pan_vals)) # index 15, (15 + (18 * (size-1)) where size > 0)
result.append(', release: ') # index 16, (16 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.release_vals)) # index 17, (17 + (18 * (size-1)) where size > 0)
result.append(', attack: ') # index 18, (18 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.attack_vals)) # index 19, (19 + (18 * (size-1)) where size > 0)
result.append(', sustain: ') # index 20, (20 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.sustain_vals)) # index 21, (21 + (18 * (size-1)) where size > 0)
result.append(', decay: ') # index 22, (22 + (18 * (size-1)) where size > 0)
result.append(random.choice(self.decay_vals)) # index 23, (23 + (18 * (size-1)) where size > 0)
result.append('\n') # index 24, (24 + (18 * (size-1)) where size > 0)
result.append('end\nend\n') # index 25, 43,... (25 + (18 * (size-1)) where size > 0)
return result
def run_ga_on(self, ref_inst=None, ref_rating=None):
"""Runs Genetic Algorithm on the reference instrument (ref_inst) or on a sample of picked
from instrument population and returns a new, GA'd, version of it. Best picks from the population
will be uploaded to db. Instrument population is either randomly generated or loaded from db.
Precondition: instrument_instructions and prev_population must be of the same type of instrument
and of same lengths.
Post condition: A best pick GA'd instrument will be returned. self.instrument_population will be updated
with best instruments after GA and will uploaded to db. self.new_instrument_population will be empty."""
# Print init
print("Running GA")
# Reset scores_ to store scores for this run.
self.scores_ = []
# Handle the case when generating a new GA instrument or if we are applying GA to existing instrument
if ref_inst is None:
# pick a synth (synth list is not exhaustive and changeable)
instrument = np.random.choice(self.synths, p=self.synths_probs)
# Load population from database (if enabled) and automatically generate new chromosomes if necessary.
self.instrument_population = self.generate_instrument_population(instrument,
self.population_size)
ref_rating = self.instrument_rating
ref_inst = self.instrument_reference
if self.debug:
print("Reference name: {}, Reference Rating: {}".format(self.instrument_reference_name, ref_rating))
else:
# Load population from database (if enabled) and automatically generate new chromosomes if necessary.
# In this case we already have a ref_inst and ref_rating so it is not necessary to set one.
self.instrument_population = self.generate_instrument_population(ref_inst[2],
self.population_size)
if self.debug:
print("Reference name: {}, Reference Rating: {}".format(ref_inst[2], ref_rating))
i = 0
found_solution = False
while i < self.generations and not found_solution:
# increment gen count
i += 1
# assert new population array is empty
self.new_instrument_population.clear()
for _ in range(len(self.instrument_population)):
# Select two instruments
inst_1 = self.select_member(self.instrument_population)
inst_2 = self.select_member(self.instrument_population)
inst_1_copy = deepcopy(inst_1)
inst_2_copy = deepcopy(inst_2)
# Crossover and mutate
self.crossover(inst_1['instructions'], inst_2['instructions'], self.crossover_rate)
self.mutate(inst_1['instructions'], self.mutation_rate)
self.mutate(inst_2['instructions'], self.mutation_rate)
# Check if a change was made to inst_1 and inst_2, if so continue, else skip loop. We do not have
# to calculate scores, check for a solution, and add to the new_instrument_population array, inst_1 or
# inst_2 if they are already known solutions in the instrument population. This helps minimize duplicate
# instruments appearing in instrument population, however note that they still do appear in an edge
# case when instruments are crossover or mutated and added to the population, and then are crossover
# and mutated in a way that sets them back to their original form, and then added back to the population
if inst_1 == inst_1_copy or inst_2 == inst_2_copy:
continue
# Re-score the instruments. Here we optimize (try to find a solution) to likeness of the ref_inst. In
# the case we have an item in the stack we want to apply GA, ref_inst=item_in_the_stack. In the case
# where we just want to randomly generate an instrument from the database or randomly generated
# population, ref_inst = highest rated instrument from population. Note that randomly generated
# instruments from self.generate_instrument_population() are pre-scored against a hard coded reference
# for offline purposes and initial database creation. The reference and inst_1/2 must be same length.
# We can later modify this behavior.
inst_1['rating'] = self.score(inst_1['instructions'], ref_inst, ref_rating)
inst_2['rating'] = self.score(inst_2['instructions'], ref_inst, ref_rating)
# if offspring rating is within self.tol of ref_rating we have found a solution.
if abs(inst_1['rating']-ref_rating) <= self.tol:
if self.debug:
print("Found a solution of rating={}, within tol={} of reference instrument.".format(
inst_1['rating'], self.tol))
self.new_instrument_population.append(inst_1)
found_solution = True
break
if abs(inst_2['rating']-ref_rating) <= self.tol:
if self.debug:
print("Found a solution of rating={}, within tol={} of reference instrument.".format(
inst_2['rating'], self.tol))
self.new_instrument_population.append(inst_2)
found_solution = True
break
# Add to the new pool
self.new_instrument_population.append(inst_1)
self.new_instrument_population.append(inst_2)
# Add the new population to the old population
self.instrument_population.extend(self.new_instrument_population)
# Only keep the best of the last 3 generations if instrument_population becomes too large to compute.
if len(self.instrument_population) >= self.population_size * 4:
# sort the population by descending rating
self.instrument_population = sorted(self.instrument_population, key=itemgetter('rating'), reverse=True)
# Prune the instrument_population to prevent it from becoming too large.
self.instrument_population = self.instrument_population[:self.population_size]
# Store the training scores for every generation
score = max(item['rating'] for item in self.instrument_population)
self.scores_.append(score)
# Print the current generation training score.
if self.debug:
print("Generation: {} Score: {}".format(i, score))
# At this point, either no solution was found and max number of generations was reached, so choose best pick, or
# a good enough solution was found so choose it.
# sort the population by descending rating
self.instrument_population = sorted(self.instrument_population, key=itemgetter('rating'), reverse=True)
# Prune the instrument_population to prevent it from becoming too large and upload the best to db.
self.instrument_population = self.instrument_population[:self.population_size]
if self.use_db:
self.upload_instruments(self.instrument_population)
# New population not needed, release memory.
self.new_instrument_population.clear()
return self.instrument_population[0]
def select_member(self, population):
"""Selects a {rating, instrument} dict from population based on roulette wheel selection
and returns a copy of it.
Precondition: population is an array of rating, instrument dicts
Postcondition: a {rating, instrument} dict from the population is returned."""
maximum = sum([c['rating'] for c in population])
pick = random.uniform(0, maximum)
current = 0
for chromosome in np.random.choice(population, size=len(population), replace=False):
current += chromosome['rating']
if current > pick:
return deepcopy(chromosome)
def score(self, instrument, reference, ref_rating):
"""This function scores an instrument based on a known reference instrument and rating. It returns a score
corresponding to how well the instrument is related to the reference. This is very theoretical. Alternate
idea for scoring is to have a point scoring system based on known likes and dislikes in music theory.
Precondition: both instruments must be of the same type and size.
Post-condition: An integer representing the score of how well the instrument is related to the reference
is returned."""
# list of base indices of where the sleep and play data lie
base_indices = np.array([8, 11, 13, 15, 17, 19, 21, 23])
# Create an array of indices from base indices for specific instrument
indices = np.mgrid[0:self.instrument_size*18:18, 0:8][0]
indices = np.ravel(indices + base_indices)
# get the sleep and play data from instrument and reference
inst_data = np.array(itemgetter(*indices)(instrument))
ref_data = np.array(itemgetter(*indices)(reference))
# calculate instrument and reference difference
differences = np.abs(inst_data - ref_data)
sum_of_diffs = np.sum(differences)
# return an estimated rating based on differences
if sum_of_diffs == 0:
return ref_rating
else:
# 90 is max ins vs ref difference for instrument of size 1. This line might result in floats.
return ref_rating - (ref_rating * sum_of_diffs/(83.96*self.instrument_size))
def crossover(self, instrument1, instrument2, probability):
"""Crossover: Select a point in instrument1,
swap contents of each array from that point on.
Side Affects instrument1, instrument2"""
make_change = np.random.random() < probability
if make_change:
# select random index in instrument data (not header)
crossover_index = instrument1.index(random.choice(instrument1[7:]))
swap = instrument1[crossover_index:]
# swap
instrument1[crossover_index:] = instrument2[crossover_index:]
instrument2[crossover_index:] = swap
def mutate(self, instrument, probability):
"""Rolls a dice for random mutation of instructions in array. Note: The items that can be mutated will
not include the header. This function has side effects."""
# list of base indices of where the sleep data lie
indices =
|
np.arange(8, self.instrument_size * 18, 18)
|
numpy.arange
|
"""
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
# from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
# import keras
import keras.backend as KB
# import keras.layers as KL
# import keras.initializers as KI
import keras.engine as KE
# import keras.models as KM
sys.path.append('..')
import mrcnn.utils as utils
import pprint
############################################################
##
############################################################
def get_layer_output(model, model_input,output_layer, training_flag = True):
_my_input = model_input
for name,inp in zip(model.input_names, model_input):
print(' Input Name: ({:24}) \t Input shape: {}'.format(name, inp.shape))
_mrcnn_class = KB.function(model.input , model.output)
# [model.keras_model.layers[output_layer].output])
output = _mrcnn_class(_my_input)
for name,out in zip (model.output_names,output):
print(' Output Name: ({:24}) \t Output shape: {}'.format(name, out.shape))
return output
class PCTensor():
"""
Subsamples proposals and generates target box refinment, class_ids, and masks for each.
Inputs:
-------
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns:
-------
Target ROIs and corresponding class IDs, bounding box shifts, and masks.
tensor : [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
stacked: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinments.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, model, outputs= None):
# super(DetectionTargetLayer, self).__init__(**kwargs)
# super().__init__(**kwargs)
self.config = model.config
self.model = model.keras_model
self.mdl_outputs = outputs
def build_gaussian_np(self):
from scipy.stats import multivariate_normal
pp = pprint.PrettyPrinter(indent=2, width=100)
img_h, img_w = self.config.IMAGE_SHAPE[:2]
num_images = self.config.BATCH_SIZE
num_classes = self.config.NUM_CLASSES
num_rois = self.config.TRAIN_ROIS_PER_IMAGE
# print(bbox.shape)
X = np.arange(0, img_w, 1)
Y = np.arange(0, img_h, 1)
X, Y = np.meshgrid(X, Y)
pos = np.empty((num_rois,) + X.shape + (2,)) # concatinate shape of x to make ( x.rows, x.cols, 2)
print(pos.shape)
pos[:,:,:,0] = X;
pos[:,:,:,1] = Y;
# Build the covariance matrix
pp1 = np.full((32), 12.0)
pp2 = np.full((32), 19.0)
cov = np.stack((pp1,pp2),axis=-1)
k_sess = KB.get_session()
# prt = self.pred_stacked
Zout = np.zeros((num_images, num_classes, img_w, img_h))
print(' COVARIANCE SHAPE:',cov.shape)
# print('PRT SHAPES:', prt[0].shape, prt[1].shape)
for img in range(num_images):
ps = self.pred_stacked[img] # .eval(session = k_sess) # .eval(session=k_sess)
print('shape of ps', ps.shape)
print(ps)
for cls in range(num_classes):
cls_idxs = np.argwhere(ps[:,6] == cls).squeeze()
# ps = _ps[cls_idxs,:]
print('cls:',cls,' ',cls_idxs)
width = ps[:,5] - ps[:,3]
height = ps[:,4] - ps[:,2]
cx = ps[:,3] + ( width / 2.0)
cy = ps[:,2] + ( height / 2.0)
means = np.stack((cx,cy),axis = -1)
print(type)
print(ps.shape, type(ps),width.shape, height.shape, cx.shape, cy.shape, type(means),means.shape)
rv = list( map(multivariate_normal, means, cov))
pdf = list( map(lambda x,y: x.pdf(y) , rv, pos))
pdf_arr = np.asarray(pdf)
print(pdf_arr.shape)
pdf_sum = np.sum(pdf_arr[[cls_idxs]],axis=0)
Zout[img,cls] += pdf_sum
return Zout
def get_pred_stacked(self):
'''
return all bboxes for images in a list, one ndarray per image
'''
pred_stacked = []
for img in range(self.config.BATCH_SIZE):
_substack = np.empty((0,8),dtype=np.float32)
for cls in range(self.config.NUM_CLASSES):
# if self.pred_cls_cnt[img, cls] > 0:
# _substack.append( self.pred_tensor[img, cls, 0:self.pred_cls_cnt[img, cls]] )
_substack = np.vstack((_substack, self.pred_tensor[img, cls, 0:self.pred_cls_cnt[img, cls]] ))
pred_stacked.append(np.asarray(_substack))
# self.pred_stacked.append(tf.concat(_substacked , 0))
print('get stacked: pred_stacekd shape:',len(pred_stacked), pred_stacked[0].shape)
return pred_stacked
def build_predictions(self, input = None):
self.build_gt(input)
# // pass model to TensorBuilder
num_images = self.config.BATCH_SIZE
num_classes = self.config.NUM_CLASSES
num_rois = self.config.TRAIN_ROIS_PER_IMAGE
num_max_gt = self.config.DETECTION_MAX_INSTANCES
num_cols = 8
h, w = self.config.IMAGE_SHAPE[:2]
class_idx = self.model.output_names.index('mrcnn_class')
bbox_idx = self.model.output_names.index('mrcnn_bbox')
outroi_idx = self.model.output_names.index('output_rois')
if self.mdl_outputs == None:
_mdl_outputs = get_layer_output(self.model, input , 229, 1.0)
self.mrcnn_class = _mdl_outputs[class_idx]
self.mrcnn_bbox = _mdl_outputs[bbox_idx]
self.output_rois = _mdl_outputs[outroi_idx] * np.array([h,w,h,w])
# print('mrcnn_class idx: {} mrcnn_bbox idx : {} output_rois idx : {}'.format(class_idx, bbox_idx,outroi_idx))
# print(' mrcnn_bbox : \n',self.mrcnn_bbox[0,0,:,:])
# mdl_outputs[outroi_idx] returns the normalized coordinates, we multiply by h,w to get true coordinates
_pred_arr = np.zeros((num_images, num_classes, num_rois, num_cols )) # img_in_batch, 4, 32, 8
_pred_tensor = np.zeros_like(_pred_arr)
self.pred_stacked = []
self.pred_cls_cnt= np.zeros((num_images, num_classes), dtype='int16')
# print('mrcnn_class shape : ', type(self.mrcnn_class), 'mrcnn_bbox.shape : ', type(self.mrcnn_bbox),\
# 'output_rois.shape : ', self.output_rois.shape, 'pred_tensor shape : ', _pred_tensor.shape )
# print(self.output_rois)
#---------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#---------------------------------------------------------------------------
_pred_class = np.argmax(self.mrcnn_class[:,:,:],axis=2).astype('int16') # (32,)
# print('mrcnn_class is: \n',self.mrcnn_class)
# print('_pred_class is: \n',_pred_class.shape,'\n',_pred_class)
for img in range(num_images):
_substacked = []
for cls in range(num_classes) :
_class_idxs = np.argwhere( _pred_class[img,:] == cls )
self.pred_cls_cnt[img,cls] = _class_idxs.shape[0]
# print('img/cls is: ' , img,'/',cls, '_class_idxs: ' , _class_idxs)
for j , c_idx in enumerate(_class_idxs):
_pred_arr[img, cls, j, 0] = j
_pred_arr[img, cls, j, 1] = np.max(self.mrcnn_class[img, c_idx ]) # probability
_pred_arr[img, cls, j,2:6] = self.output_rois[img,c_idx] # roi coordinates
_pred_arr[img, cls, j, 6] = cls # class_id
_pred_arr[img, cls, j, 7] = c_idx # index from mrcnn_class array (temp for verification)
# sort each class in descending prediction order
order = _pred_arr[img,cls,:,1].argsort()
_pred_arr[img, cls,:,1:] = _pred_arr[img, cls, order[::-1] ,1:] #[img, cls,::-1]
# _pred_tensor[img, cls,:,0] = _pred_arr[img, cls,:,0]
# for cls in range(0,num_classes):
# if self.pred_cls_cnt[img, cls] > 0:
# _substacked.append( _pred_arr[img, cls, 0:self.pred_cls_cnt[img, cls]] )
# self.pred_stacked.append(np.concatenate(_substacked,0))
# self.pred_tensor = tf.convert_to_tensor(_pred_arr)
self.pred_tensor = _pred_arr
self.pred_stacked = self.get_pred_stacked()
# print('pred_tensor type, shape :', type(self.pred_tensor), self.pred_tensor.shape)
# for img in range(num_images):
# print(self.pred_tensor[img].eval(session=KB.get_session()))
# print(self.pred_tensor[img])
# print('img ', img, ' substacked')
# print(self.pred_stacked[img].eval(session=KB.get_session()))
return
def build_gt(self, input):
num_images = self.config.BATCH_SIZE
num_classes = self.config.NUM_CLASSES
num_max_gt = self.config.DETECTION_MAX_INSTANCES
num_cols = 8
gtcls_idx = self.model.input_names.index('input_gt_class_ids')
gtbox_idx = self.model.input_names.index('input_gt_boxes')
gtmsk_idx = self.model.input_names.index('input_gt_masks')
gt_classes = input[gtcls_idx]
gt_bboxes = input[gtbox_idx]
_pred_arr = np.zeros((num_images, num_classes, num_max_gt, num_cols )) # img_in_batch, 4, 32, 8
self.gt_tensor = np.zeros_like(_pred_arr)
self.gt_cls_cnt = np.zeros((num_images, num_classes), dtype='int16')
# gt_masks = sample_x[gtmsk_idx][0,:,:,nz_idx]
# gt_indexes = np.arange(gt_classes.shape[0],dtype='int16')
# gt_probs = np.ones(gt_classes.shape[0])
# print('gt_classes.shape :',gt_classes.shape, 'gt_boxes.shape :',gt_bboxes.shape)
for img in range(num_images):
for cls in range(num_classes) :
_class_idxs = np.argwhere( gt_classes[img, :] == cls)
# print('k is: ' , k, '_class_idxs: ' , _class_idxs)
self.gt_cls_cnt[img, cls] = _class_idxs.shape[0]
for j , c_idx in enumerate(_class_idxs):
self.gt_tensor[img, cls, j, 0] = j
self.gt_tensor[img, cls, j, 1] = 1.0 # probability
self.gt_tensor[img, cls, j, 2:6] = gt_bboxes[img,c_idx,:] # roi coordinates
self.gt_tensor[img, cls, j, 6] = cls # class_id
self.gt_tensor[img, cls, j, 7] = c_idx # index from mrcnn_class array (temp for verification)
self.gt_stacked = []
for img in range(num_images):
_substacked = np.empty((0,8))
for cls in range(0,num_classes):
if self.gt_cls_cnt[img, cls] > 0:
_substacked = np.vstack((_substacked, self.gt_tensor[img , cls, 0:self.gt_cls_cnt[img, cls]] ))
self.gt_stacked.append( _substacked )
# print('gt_tensor : (idx, class, prob, y1, x1, y2, x2)', self.gt_tensor.shape, '\n')
# for i in range(len(self.gt_stacked)):
# print(self.gt_stacked[i].shape)
# print(self.gt_stacked[i])
def get_gt_stacked(self):
'''
return all bboxes for images in a list, one ndarray per image
'''
self.gt_stacked = []
for img in range(self.config.BATCH_SIZE):
_substacked = np.empty((0,8))
for cls in range(self.config.NUM_CLASSES):
if self.gt_cls_cnt[img, cls] > 0:
_substacked = np.vstack((_substacked, self.gt_tensor[img , cls, 0:self.gt_cls_cnt[img, cls]] ))
self.gt_stacked.append( _substacked )
def __repr__(self):
print(' I\'m in repr ...!')
def __str__(self):
print(' I\'m in __str__')
from scipy.stats import multivariate_normal
import numpy as np
def bbox_gaussian( bbox, Zin ):
"""
receive a bounding box, and generate a gaussian distribution centered on the bounding box and with a
covariance matrix based on the width and height of the bounding box/.
Inputs :
--------
bbox : (index, class_prob, y1, x1, y2, x2, class_id, old_idx)
bbox : (index, class_id, class_prob, cx, cy, width, height)
Returns:
--------
bbox_g grid mesh [image_height, image width] covering the distribution
"""
img_w, img_h = Zin.shape
width = bbox[5] - bbox[3]
height = bbox[4] - bbox[2]
cx = bbox[3] + ( width / 2.0)
cy = bbox[2] + ( height / 2.0)
# cx, cy, width, height = bbox[3:]
print('center is ({:4f},{:4f}) width: {:4f} height: {:4f} '.format(cx, cy, width, height))
# srtd_cpb_2 = np.column_stack((srtd_cpb[:, 0:2], cx,cy, width, height ))
X = np.arange(0, img_w, 1)
Y =
|
np.arange(0, img_h, 1)
|
numpy.arange
|
"""Generate brainweb data with two simulated temporal arteries
Usage:
generate_brainweb_vasculitis [--help | options]
Options:
-i <path>, --out_im=<path> output image filename prefix [default: im]
--save-labels save label images for all non-zero structures and a total background
--brainweb-cache=<path> filename prefix for saving brainweb data [default: brainweb_labels]
--voxel-size=<val> string specifying the output voxel size (mMR | MR | brainweb) [default: mMR]
--iIL=<val> inner intensity (left) [default: 1]
--iIR=<val> inner intensity (right) [default: 2]
--oIL=<val> outer intensity (left) [default: 5]
--oIR=<val> outer intensity (right) [default: 8]
--iRL=<val> inner radius (left) [default: 3]
--iRR=<val> inner radius (right) [default: 3]
--oRL=<val> outer radius (left) [default: 5]
--oRR=<val> outer radius (right) [default: 5]
--lL=<val> vessel length (left) [default: 40]
--lR=<val> vessel length (right) [default: 40]
--cL=<val> centre (left) [default: -80]
--cR=<val> centre (left) [default: 80]
"""
# CCP SyneRBI Synergistic Image Reconstruction Framework (SIRF)
# Copyright 2020 University College London.
#
# author <NAME>
# author <NAME>
# This is software developed for the Collaborative Computational
# Project in Synergistic Image Reconstruction for Biomedical Imaging
# (http://www.ccpsynerbi.ac.uk/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#import MINC
import brainweb
import numpy as np
from tqdm.auto import tqdm
import sirf.STIR as pet
import sirf.Reg as reg
#from sirf.Utilities import examples_data_path
from docopt import docopt
import os
import nibabel
__version__ = '0.3.0'
args = docopt(__doc__, version=__version__)
print(args)
# Parse input arguments
out_prefix = args['--out_im']
save_labels = args['--save-labels']
brainweb_label_prefix = args['--brainweb-cache']
outres=args['--voxel-size']
iIL = float(args['--iIL'])
iIR = float(args['--iIR'])
oIL = float(args['--oIL'])
oIR = float(args['--oIR'])
iRL = float(args['--iRL'])
iRR = float(args['--iRR'])
oRL = float(args['--oRL'])
oRR = float(args['--oRR'])
lL = float(args['--lL'])
lR = float(args['--lR'])
cL = float(args['--cL'])
cR = float(args['--cR'])
def get_brainweb_labels():
"""Get brainweb image."""
fname, url = sorted(brainweb.utils.LINKS.items())[0]
brainweb.get_file(fname, url, ".")
data = brainweb.load_file(fname)
return data
def get_brainweb_labels_as_pet():
data=get_brainweb_labels()
res=getattr(brainweb.Res,'brainweb')
new_shape=(data.shape[0],512,512)
padLR, padR = divmod((np.array(new_shape) - data.shape), 2)
data = np.pad(data, [(p, p + r) for (p, r)
in zip(padLR.astype(int), padR.astype(int))],
mode="constant")
#data = np.flip(data, 0)
return get_as_pet_im(data,res)
def get_brainweb_image(outres=outres, PetClass=brainweb.FDG, save_labels=False):
"""Get brainweb image. (no longer used)"""
fname, url = sorted(brainweb.utils.LINKS.items())[0]
brainweb.get_file(fname, url, ".")
#data = brainweb.load_file(fname)
brainweb.seed(1337)
#for f in tqdm([fname], desc="ground truths", unit="subject"):
vol = brainweb.get_mmr_fromfile(
fname, petNoise=0, petSigma=0, outres=outres, PetClass=PetClass)
if save_labels:
labels = brainweb.get_label_probabilities(fname, outres=outres)
non_zero_labels = PetClass.attrs
all_labels = PetClass.all_labels
non_zero_indices = list(all_labels.index(l) for l in non_zero_labels)
# keep only non-zero ones
labels = labels[non_zero_indices, :, :, :]
return (vol['PET'], vol['res'], labels, non_zero_labels)
else:
return (vol['PET'], vol['res'])
def crop_and_orient(im, res):
"""Crop and orient image."""
# original code for the mMR voxel sizes
# im = im[:, 105:105+127, 108:108+127]
mMR_res = np.array((2.0312, 2.0863, 2.0863))
org_min=np.array((0, 105, 108))
org_max=org_min+127
new_min = np.int32(np.round(org_min*mMR_res/res))
new_max = np.int32(np.round(org_max*mMR_res/res))
im = im[new_min[0]:new_max[0], new_min[1]:new_max[1], new_min[2]:new_max[2]]
im = np.flip(im, 0)
return im
def get_as_pet_im(arr, res):
"""Get as PET image."""
im = pet.ImageData()
im.initialise(arr.shape, tuple(res))
im.fill(arr)
return im
def save_nii(im, fname):
"""Save as nii."""
reg.ImageData(im).write(fname)
def get_cylinder_in_im(im_in, length, radius, origin, intensity, tm=None, num_samples=3):
"""Get an image containing a cylinder."""
cylinder = pet.EllipticCylinder()
cylinder.set_length(length)
cylinder.set_radii([radius, radius])
geo = im_in.get_geometrical_info()
centre = (np.array(geo.get_offset()) +
(np.array(geo.get_size())-1)*np.array(geo.get_spacing())/2.)
# warning: CURRENTLY NEED TO REVERSE
cylinder.set_origin(tuple(np.array(origin) + centre[::-1]))
im = im_in.clone()
im.fill(0)
im.add_shape(cylinder, intensity, num_samples)
if tm:
# resample
res = reg.NiftyResample()
res.set_reference_image(im)
res.set_floating_image(im)
res.add_transformation(tm)
res.set_interpolation_type_to_cubic_spline()
im = res.forward(im)
return im
def weighted_add(out, values, weights):
"""set out to out + sum(weights*values) """
for (w,v) in zip (weights, values):
out += w*v
def zoom_image(im, new_voxel_size):
"""
returns an image with new voxel sizes
It uses the 'preserve_values' option of sirf.STIR.ImageData.zoom_image (appropriate for probabilistic labels)
This uses internal STIR knowledge such that the zoomed image still has the same STIR offset as the input.
This is only important once using the image for forward projection etc
"""
geo=im.get_geometrical_info()
# warning: need to revert these at present
voxel_size = np.array(geo.get_spacing()[::-1])
size = np.array(geo.get_size()[::-1])
zooms = voxel_size / new_voxel_size
new_size = np.array(
|
np.ceil(size * zooms)
|
numpy.ceil
|
import functools
import numpy as np
from source import plot_data as pd
def normalize_2d_vectors(x, y):
xy = np.array([x, y])
xy_len = np.linalg.norm(xy, axis=0, keepdims=True)
xy_normalized = xy / xy_len
return xy_normalized
def normalize_3d_vectors(x, y, z):
xyz = np.array([x, y, z])
xyz_len = np.linalg.norm(xyz, axis=0, keepdims=True)
xyz_normalized = xyz / xyz_len
return xyz_normalized
def convert_coordinate_system_3d(x, y, z):
"""
Switch right-hand to left-hand coordinate system and vice versa.
:param x: float scalar or numpy array
:param y: float scalar or numpy array
:param z: float scalar or numpy array
:return:
"""
return x, -z, y
def convert_coordinate_system_2d(x, z):
"""
Switch 2D (top-down) right-hand to left-hand coordinate system and vice versa.
:param x: float scalar or numpy array
:param z: float scalar or numpy array
:return:
"""
return x, -z
def get_min_middle_max(x):
x_min = x.min()
x_max = x.max()
x_middle = (x_max + x_min) * 0.5
return x_min, x_middle, x_max
def differences(x, fix_negative_dt=False):
x_prev = np.concatenate((np.array([x[0]]), x[:-1]))
x_diff = x - x_prev
# prevent negative times due to next lap
if fix_negative_dt:
x_diff[x_diff < 0.0] = np.finfo(x_diff.dtype).eps
return x_diff
def derive_nan(x, time_steps):
time_diff = differences(time_steps)
x_diff = differences(x)
x_derived = x_diff / time_diff
return x_derived
def derive_no_nan(x, time_steps):
"""
Prevent nan
:param x:
:param time_steps:
:return:
"""
time_diff = differences(time_steps)
time_diff[time_diff < 0.0] = np.finfo(time_diff.dtype).eps # lap time is reset to 0.0 on a new lap, of course
time_diff[time_diff == 0.0] = np.finfo(time_diff.dtype).eps # same lap time should be filtered out but who knows
x_diff = differences(x)
x_derived = x_diff / time_diff
return x_derived
def get_forward_dir_2d(plot_data: pd.PlotData):
pxy_normalized = normalize_2d_vectors(plot_data.roll_x, plot_data.roll_y)
return pxy_normalized
def get_forward_dir_3d(plot_data: pd.PlotData):
pxyz_normalized = normalize_3d_vectors(plot_data.roll_x, plot_data.roll_y, plot_data.roll_z)
return pxyz_normalized
def get_sideward_dir_3d(plot_data: pd.PlotData):
pxy_normalized = normalize_3d_vectors(plot_data.pitch_x, plot_data.pitch_y, plot_data.pitch_z)
return pxy_normalized
def get_forward_vel_2d(plot_data: pd.PlotData):
vxy_normalized = normalize_2d_vectors(plot_data.vel_x, plot_data.vel_y)
return vxy_normalized
def get_drift_angle_deg(plot_data: pd.PlotData):
pxy_normalized = get_forward_dir_2d(plot_data)
vxy_normalized = get_forward_vel_2d(plot_data)
# dot(dir, speed) = drift
drift = (pxy_normalized * vxy_normalized).sum(axis=0)
drift_angle = np.arccos(drift)
drift_angle_deg = np.rad2deg(drift_angle)
return drift_angle_deg
def get_energy(plot_data: pd.PlotData):
mass = 1000.0 # kg, doesn't really matter because we want only the relative changes in energy
gravity = 9.81 # m/s^2
velocity = plot_data.speed_ms
height = plot_data.pos_y
height_relative = height - np.min(height)
kinetic_energy = 0.5 * mass * np.square(velocity)
potential_energy = mass * gravity * height_relative
# TODO: add rotational energy
# TODO: add rotational energy of wheels
# TODO: add heat energy in brakes? lol
energy = kinetic_energy + potential_energy
return energy, kinetic_energy, potential_energy
def get_gear_shift_mask(plot_data: pd.PlotData, shift_time_ms=100.0):
# exclude times ~0.1 sec around gear shifts and gears < 1
# assuming 1 UDP sample is 10 ms (delay=1 in Dirt Rally)
time_steps = plot_data.run_time
gear_changes = derive_no_nan(plot_data.gear, time_steps=time_steps)
gear_changes[gear_changes != 0.0] = 1.0 # 1.0 if the gear changed, 0.0 otherwise
box_filter_length = int(round(shift_time_ms / 2.0 / 10.0))
box_filter = np.array([1.0] * box_filter_length)
close_to_gear_changes = np.convolve(gear_changes, box_filter, mode='same') > 0.0
return close_to_gear_changes
def get_optimal_rpm(plot_data: pd.PlotData):
# the first gear is rather unreliable because the wheels usually spin freely at the start
# median makes it more robust
full_acceleration_mask = get_full_acceleration_mask(plot_data=plot_data)
if not np.any(full_acceleration_mask):
return None, None, None
# energy, kinetic_energy, potential_energy = get_energy(plot_data=plot_data)
optimal_y_per_gear = []
optimal_x_per_gear = []
optimal_rpm_range_min_per_gear = []
optimal_rpm_range_max_per_gear = []
data_gear = plot_data.gear
range_gears = list(set(data_gear))
range_gears.sort()
range_gears = [g for g in range_gears if g > 0.0]
for g in range_gears:
current_gear = plot_data.gear == g
not_close_to_gear_changes = np.logical_not(get_gear_shift_mask(plot_data=plot_data, shift_time_ms=100.0))
full_in_current_gear = np.logical_and(not_close_to_gear_changes, current_gear)
interesting = np.logical_and(full_in_current_gear, full_acceleration_mask)
# acc_gear = plot_data.g_force_lon[interesting] # optimal RPM prediction is noisy with acceleration
vel_gear = plot_data.speed_ms[interesting]
# acc_gear = kinetic_energy[interesting]
rpm_gear = plot_data.rpm[interesting]
rpm_min = np.min(rpm_gear)
rpm_max = np.max(rpm_gear)
try:
# poly_coefficients = np.polyfit(rpm_gear, acc_gear, 3)
poly_coefficients = np.polyfit(rpm_gear, vel_gear, 3)
poly =
|
np.poly1d(poly_coefficients)
|
numpy.poly1d
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: skills-taxonomy-v2
# language: python
# name: skills-taxonomy-v2
# ---
# %%
import logging
from collections import Counter
import re
import itertools
import spacy
import pytextrank
from argparse import ArgumentParser
import yaml
import pandas as pd
from tqdm import tqdm
import boto3
import pytextrank
import numpy as np
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.util import ngrams # function for making ngrams
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from gensim.models.phrases import Phrases, Phraser, ENGLISH_CONNECTOR_WORDS
from sklearn.metrics.pairwise import cosine_similarity
from skills_taxonomy_v2.pipeline.sentence_classifier.sentence_classifier import (
BertVectorizer,
)
from skills_taxonomy_v2.getters.s3_data import load_s3_data, save_to_s3
from skills_taxonomy_v2 import BUCKET_NAME
from pattern.text.en import singularize
from collections import OrderedDict
import random
from collections import defaultdict
import time
logger = logging.getLogger(__name__)
nltk.download("wordnet")
# %% [markdown]
# # 0. Set up
# ## 0.1 load functions
# %%
def replace_ngrams(sentence, ngram_words):
for word_list in ngram_words:
sentence = sentence.replace(" ".join(word_list), "-".join(word_list))
return sentence
def get_top_tf_idf_words(clusters_vect, feature_names, top_n=2):
"""
From https://stackoverflow.com/questions/34232190/scikit-learn-tfidfvectorizer-how-to-get-top-n-terms-with-highest-tf-idf-score
"""
sorted_nzs = np.argsort(clusters_vect.data)[: -(top_n + 1) : -1]
return feature_names[clusters_vect.indices[sorted_nzs]]
def clean_cluster_descriptions(sentences_data): # remove job based stop words!
"""
For each cluster normalise the texts for getting descriptions from
- lemmatize
- lower case
- remove duplicates
- n-grams
Input:
sentences_data (DataFrame): The sentences in each cluster
with "description" and "Cluster number" columns
Output:
cluster_descriptions (dict): Cluster number : list of cleaned
sentences in this cluster
"""
# Init the Wordnet Lemmatizer
lemmatizer = WordNetLemmatizer()
# How many times a n-gram has to occur in order for occurences of them
# to be converted to a single dash separated word
num_times_ngrams_thresh = 3
work_stopwords = [
"essential",
"requirement",
"degree",
"responsibility",
"duties",
"responsibilities",
"experienced",
"previous",
"andor",
"minimum",
"years",
]
sentences_data["description"] = sentences_data["description"].apply(
lambda x: re.sub("\s+", " ", x)
)
cluster_descriptions = {}
for cluster_num, cluster_group in sentences_data.groupby("Cluster number"):
cluster_docs = cluster_group["description"].tolist()
cluster_docs_cleaned = []
for doc in cluster_docs:
# Remove capitals, but not when it's an acronym
no_work_stopwords = [w for w in doc.split(" ") if w not in work_stopwords]
acronyms = re.findall("[A-Z]{2,}", doc)
# Lemmatize
lemmatized_output = [
lemmatizer.lemmatize(w)
if w in acronyms
else lemmatizer.lemmatize(w.lower())
for w in doc.split(" ")
]
# singularise
singularised_output = [singularize(w) for w in doc.split(" ")]
# remove work stopwords
cluster_docs_cleaned.append(" ".join(no_work_stopwords).strip())
# Remove duplicates
cluster_docs_cleaned = list(set(cluster_docs_cleaned))
# Find the ngrams for this cluster
all_cluster_docs = " ".join(cluster_docs_cleaned).split(" ")
esBigrams = ngrams(all_cluster_docs, 3)
ngram_words = [
words
for words, count in Counter(esBigrams).most_common()
if count >= num_times_ngrams_thresh
]
cluster_docs_clean = [
replace_ngrams(sentence, ngram_words) for sentence in cluster_docs_cleaned
]
cluster_descriptions[cluster_num] = cluster_docs_clean
return cluster_descriptions
def get_clean_ngrams(sentence_skills, ngram, min_count, threshold):
"""
Using the sentences data where each sentence has been clustered into skills,
find a list of all cleaned n-grams
"""
# Clean sentences
cluster_descriptions = clean_cluster_descriptions(sentence_skills)
# get cluster texts
cluster_texts = [" ".join(sentences) for sentences in cluster_descriptions.values()]
# tokenise skills
tokenised_skills = [word_tokenize(skill) for skill in cluster_texts]
# generate ngrams
t = 1
while t < ngram:
phrases = Phrases(
tokenised_skills,
min_count=min_count,
threshold=threshold,
scoring="npmi",
connector_words=ENGLISH_CONNECTOR_WORDS,
)
ngram_phraser = Phraser(phrases)
tokenised_skills = ngram_phraser[tokenised_skills]
t += 1
# clean up ngrams
clean_ngrams = [
[skill.replace("_", " ").replace("-", " ") for skill in skills]
for skills in list(tokenised_skills)
]
clean_ngrams = list(
set(
[
skill
for skill in list(itertools.chain(*clean_ngrams))
if len(skill.split(" ")) > 1
]
)
)
# get rid of duplicate terms in ngrams
clean_ngrams = [
" ".join(OrderedDict((w, w) for w in ngrm.split()).keys())
for ngrm in clean_ngrams
]
# only return ngrams that are more than 1 word long
return [
clean for clean in clean_ngrams if len(clean.split(" ")) > 1
], cluster_descriptions
def get_skill_info(
sentence_skills, sentence_embs, num_top_sent=2, ngram=4, min_count=1, threshold=0.15
):
"""
Output: skills_data (dict), for each skill number:
'Skills name' : join the closest ngram to the centroid
'Examples': Join the num_top_sent closest original sentences to the centroid
'Skills name embed': embedding of closest ngram to the centroid or shortest description embedding
'Texts': All the cleaned sentences for this cluster
"""
start_time = time.time()
bert_vectorizer = BertVectorizer(
bert_model_name="sentence-transformers/paraphrase-MiniLM-L6-v2",
multi_process=True,
)
bert_vectorizer.fit()
skills_data = {}
for cluster_num, cluster_data in tqdm(sentence_skills.groupby("Cluster number")):
# There may be the same sentence repeated
cluster_data.drop_duplicates(["sentence id"], inplace=True)
cluster_text = cluster_data["original sentence"].tolist()
cluster_coords = cluster_data[["reduced_points x", "reduced_points y"]].values
cluster_embeds = [
np.array(sentence_embs[str(sent_id)]).astype("float32")
for sent_id in cluster_data["sentence id"].values.tolist()
if str(sent_id) in sentence_embs
]
# Get sent similarities to centre
sent_similarities = cosine_similarity(
np.mean(cluster_coords, axis=0).reshape(1, -1), cluster_coords
)
# generate candidate ngrams per sentence cluster and embed
candidate_ngrams, cluster_descriptions = get_clean_ngrams(
cluster_data, ngram, min_count, threshold
)
if (
len(candidate_ngrams) > 1
): # if there are more than 1 candidate ngrams, skill cluster is labelled as the closest ngram embedding to the cluster mean embedding
candidate_ngrams_embeds = bert_vectorizer.transform(candidate_ngrams)
# calculate similarities between ngrams per cluster and cluster mean
ngram_similarities = cosine_similarity(
np.mean(cluster_embeds, axis=0).reshape(1, -1), candidate_ngrams_embeds
)
closest_ngram = candidate_ngrams[
int(ngram_similarities.argsort()[0][::-1].tolist()[0:1][0])
]
skills_data[cluster_num] = {
"Skills name": closest_ngram,
"Name method": "phrases_embedding",
"Skills name embed": candidate_ngrams_embeds[
int(ngram_similarities.argsort()[0][::-1].tolist()[0:1][0])
],
"Examples": " ".join(
[
cluster_text[i]
for i in sent_similarities.argsort()[0][::-1].tolist()[
0:num_top_sent
]
]
),
"Texts": cluster_descriptions[cluster_num],
}
else:
print(
"no candidate ngrams"
) # if no candidate ngrams are generated, skill name is smallest skill description
skills_data[cluster_num] = {
"Skills name": min(cluster_descriptions[cluster_num], key=len),
"Name method": "minimum description",
"Skills name embed": bert_vectorizer.transform(
[min(cluster_descriptions[cluster_num], key=len)]
)[0],
"Examples": " ".join(
[
cluster_text[i]
for i in sent_similarities.argsort()[0][::-1].tolist()[
0:num_top_sent
]
]
),
"Texts": cluster_descriptions[cluster_num],
}
print("--- %s seconds ---" % (time.time() - start_time))
return skills_data
# %% [markdown]
# ## 0.2. load data
# %%
s3 = boto3.resource("s3")
# Load data
sentence_skills_path = (
"outputs/skills_extraction/extracted_skills/2021.08.31_sentences_data.json"
)
embedding_sample_path = "outputs/skills_extraction/extracted_skills/2021.08.31_sentence_id_2_embedding_dict.json.gz"
sentence_skills = load_s3_data(s3, BUCKET_NAME, sentence_skills_path)
sentence_skills = pd.DataFrame(sentence_skills)
sentence_skills = sentence_skills[sentence_skills["Cluster number"] != -1]
sentence_embs = load_s3_data(s3, BUCKET_NAME, embedding_sample_path)
# %% [markdown]
# ## 0.3. get sample based on random clusters
# %%
# random sample
k = 100
random_clusters = random.sample(
list(set(sentence_skills["Cluster number"].tolist())), k
)
sentence_skills_sample = sentence_skills[
sentence_skills["Cluster number"].isin(random_clusters)
]
# %% [markdown]
# ## 0.4. run updated get_skill_info on sample of k clusters
# * updated text cleaning to a) get rid of job specific language, b) singularise terms, c) get rid of duplicate term phrases i.e. 'day day'
# * updated get_skill_info to generate phrases _per_ skill cluster and to assign minimum text description if less than two phrases per cluster are generated
# * lowered Phrases algorithm threshold parameters to generate more phrases per cluster
# * return dictionary incl. closest ngram embed for cluster merging and candidate_ngram list per cluster for updated skill name per merged skill cluster
# %%
named_skills = get_skill_info(sentence_skills_sample, sentence_embs)
# %% [markdown]
# # 1. Skill clusters EDA
# %%
# how large are skill clusters?
cluster_counts = (
sentence_skills_sample.groupby("Cluster number")
.count()
.sort_values("description", ascending=False)
)
cluster_counts[cluster_counts["description"] > 10]
cluster_counts[cluster_counts["description"] <= 5]
cluster_counts["description"].plot.hist(
bins=15, range=[5, 50]
) # vast majority of clusters are quite small!
# %% [markdown]
# # 2. Experiment w/ summarisation methods
# %% [markdown]
# ## 2.0 PyTextRank
# %%
all_texts = " ".join(sentence_skills[:10]["description"].tolist())
en_nlp = spacy.load("en_core_web_sm")
en_nlp.add_pipe("textrank")
doc = en_nlp(all_texts)
candidate_phrases = list(
set(
[
phrase.text.strip()
for phrase in doc._.phrases
if 1 < len(phrase.text.split(" ")) < 4
]
)
)
# %% [markdown]
# ## 2.1 Noun Chunks
# %%
nlp = spacy.load("en_core_web_sm")
doc = nlp(all_texts)
noun_chunks = []
for chunk in doc.noun_chunks:
noun_chunks.append(chunk.text)
candidate_chunks = [
noun.strip() for noun in noun_chunks if 2 <= len(noun.strip().split(" ")) <= 4
]
print(candidate_chunks)
# %% [markdown]
# ## 3.0 PyTextRank w embeddings, verb chunking experiments
# %%
def get_clean_ngrams_pytextrank(sentence_skills, min_ngram=2, max_ngram=3):
# Clean sentences
nlp = spacy.load("en_core_web_sm")
nlp.add_pipe("textrank")
cluster_descriptions = clean_cluster_descriptions(sentence_skills)
# get cluster texts
cluster_texts = [" ".join(sentences) for sentences in cluster_descriptions.values()]
candidate_chunks = []
for cluster_text in cluster_texts:
doc = nlp(cluster_text)
# get rid of double terms
chunks = [phrase.text for phrase in doc._.phrases]
clean_chunks = [
" ".join(OrderedDict((w, w) for w in ngrm.split(" "))) for ngrm in chunks
]
candidate_chunks.append(
[
chunk
for chunk in clean_chunks
if min_ngram <= len(chunk.split(" ")) <= max_ngram
]
)
return list(itertools.chain(*candidate_chunks)), cluster_descriptions
# %%
def get_clean_ngrams_chunks(sentence_skills):
nlp = spacy.load("en_core_web_sm")
patterns = [
"VERB NOUN",
"NOUN VERB",
"VERB ADJ NOUN",
"VERB NOUN NOUN",
] # experiment with this!
re_patterns = [" ".join(["(\w+)_!" + pos for pos in p.split()]) for p in patterns]
# Clean sentences
cluster_descriptions = clean_cluster_descriptions(sentence_skills)
# get cluster texts
cluster_texts = [" ".join(sentences) for sentences in cluster_descriptions.values()]
candidate_chunks = []
for cluster_text in cluster_texts:
doc = nlp(cluster_text)
text_pos = " ".join([token.text + "_!" + token.pos_ for token in doc])
candidate_chunk = [
[" ".join(result) for result in re.findall(pattern, text_pos)]
for i, pattern in enumerate(re_patterns)
]
candidate_chunks.append(candidate_chunk)
return (
list(
set(itertools.chain(*list(itertools.chain.from_iterable(candidate_chunks))))
),
cluster_descriptions,
)
# %%
# get skill info w/ pytextrank embeddings
start_time = time.time()
bert_vectorizer = BertVectorizer(
bert_model_name="sentence-transformers/paraphrase-MiniLM-L6-v2",
multi_process=True,
)
bert_vectorizer.fit()
skills_data = {}
for cluster_num, cluster_data in tqdm(sentence_skills_sample.groupby("Cluster number")):
# There may be the same sentence repeated
cluster_data.drop_duplicates(["sentence id"], inplace=True)
cluster_text = cluster_data["original sentence"].tolist()
cluster_coords = cluster_data[["reduced_points x", "reduced_points y"]].values
cluster_embeds = [
np.array(sentence_embs[str(sent_id)]).astype("float32")
for sent_id in cluster_data["sentence id"].values.tolist()
if str(sent_id) in sentence_embs
]
# Get sent similarities to centre
sent_similarities = cosine_similarity(
np.mean(cluster_coords, axis=0).reshape(1, -1), cluster_coords
)
# ----GENERATE CANDIDATE NGRAMS BASED ON PYTEXTRANK----#
candidate_ngrams, cluster_descriptions = get_clean_ngrams_pytextrank(
cluster_data, min_ngram=2, max_ngram=3
)
if (
len(candidate_ngrams) > 1
): # if there are more than 1 candidate ngrams, skill cluster is labelled as the closest ngram embedding to the cluster mean embedding
candidate_ngrams_embeds = bert_vectorizer.transform(candidate_ngrams)
# calculate similarities between ngrams per cluster and cluster mean
ngram_similarities = cosine_similarity(
np.mean(cluster_embeds, axis=0).reshape(1, -1), candidate_ngrams_embeds
)
closest_ngram = candidate_ngrams[
int(ngram_similarities.argsort()[0][::-1].tolist()[0:1][0])
]
skills_data[cluster_num] = {
"Skills name": closest_ngram,
"Name method": "pytextrank_embedding",
"Skills name embed": candidate_ngrams_embeds[
int(ngram_similarities.argsort()[0][::-1].tolist()[0:1][0])
],
"Examples": " ".join(
[
cluster_text[i]
for i in sent_similarities.argsort()[0][::-1].tolist()[0:2]
]
),
"Texts": cluster_descriptions[cluster_num],
}
else:
print(
"no candidate ngrams"
) # if no candidate ngrams are generated, skill name is smallest skill description
skills_data[cluster_num] = {
"Skills name": min(cluster_descriptions[cluster_num], key=len),
"Name method": "minimum description",
"Skills name embed": bert_vectorizer.transform(
[min(cluster_descriptions[cluster_num], key=len)]
)[0],
"Examples": " ".join(
[
cluster_text[i]
for i in sent_similarities.argsort()[0][::-1].tolist()[0:2]
]
),
"Texts": cluster_descriptions[cluster_num],
}
print("--- %s seconds ---" % (time.time() - start_time))
# %%
# get skill info w/ verb chunking
start_time = time.time()
skills_data = {}
for cluster_num, cluster_data in tqdm(sentence_skills_sample.groupby("Cluster number")):
# There may be the same sentence repeated
cluster_data.drop_duplicates(["sentence id"], inplace=True)
cluster_text = cluster_data["original sentence"].tolist()
cluster_coords = cluster_data[["reduced_points x", "reduced_points y"]].values
cluster_embeds = [
np.array(sentence_embs[str(sent_id)]).astype("float32")
for sent_id in cluster_data["sentence id"].values.tolist()
if str(sent_id) in sentence_embs
]
# Get sent similarities to centre
sent_similarities = cosine_similarity(
np.mean(cluster_coords, axis=0).reshape(1, -1), cluster_coords
)
# ----GENERATE CANDIDATE NGRAMS BASED ON VERB CHUNKING----#
candidate_ngrams, cluster_descriptions = get_clean_ngrams_chunks(cluster_data)
if (
len(candidate_ngrams) > 1
): # if there are more than 1 candidate ngrams, skill cluster is labelled as the closest ngram embedding to the cluster mean embedding
candidate_ngrams_embeds = bert_vectorizer.transform(candidate_ngrams)
# calculate similarities between ngrams per cluster and cluster mean
ngram_similarities = cosine_similarity(
np.mean(cluster_embeds, axis=0).reshape(1, -1), candidate_ngrams_embeds
)
closest_ngram = candidate_ngrams[
int(ngram_similarities.argsort()[0][::-1].tolist()[0:1][0])
]
skills_data[cluster_num] = {
"Skills name": closest_ngram,
"Name method": "verb_chunks_embedding",
"Skills name embed": candidate_ngrams_embeds[
int(ngram_similarities.argsort()[0][::-1].tolist()[0:1][0])
],
"Examples": " ".join(
[
cluster_text[i]
for i in sent_similarities.argsort()[0][::-1].tolist()[0:2]
]
),
"Texts": cluster_descriptions[cluster_num],
}
else:
print(
"no candidate ngrams"
) # if no candidate ngrams are generated, skill name is smallest skill description
skills_data[cluster_num] = {
"Skills name": min(cluster_descriptions[cluster_num], key=len),
"Name method": "minimum description",
"Skills name embed": bert_vectorizer.transform(
[min(cluster_descriptions[cluster_num], key=len)]
)[0],
"Examples": " ".join(
[
cluster_text[i]
for i in sent_similarities.argsort()[0][::-1].tolist()[0:2]
]
),
"Texts": cluster_descriptions[cluster_num],
}
print("--- %s seconds ---" % (time.time() - start_time))
# %% [markdown]
# # 3. Merge based on named skills proximity AND centroid proximity
# %%
def merge_skill_clusters(
named_skills,
sentence_skills,
skill_name_sim_threshold=0.9,
centroid_threshold=0.9,
):
skill_name_sims = cosine_similarity(
np.array(
[
skill_data["Skills name embed"]
for skill_clust, skill_data in named_skills.items()
]
)
)
duplicate_skills = []
for sims in skill_name_sims:
sims_indexes =
|
np.where(sims > skill_name_sim_threshold)
|
numpy.where
|
# -*- coding: utf-8 -*-
"""
readreflex.py
firstly intended as a library to read and export ReflexW-Data-Formats
Reads .PAR and .DAT formats written by that program
https://www.sandmeier-geo.de/reflexw.html
"""
import pandas
import struct
import numpy as np
import h5py
import csv
import matplotlib.pyplot as plt
#for reading seismic formats:
from obspy import read as obread
from scipy.signal import resample
import copy
import segyio
#specific scipy packages for heritage to filters:
from scipy.signal import butter, lfilter, spectrogram, welch,windows,hilbert
from librosa.core import reassigned_spectrogram
from librosa.core import reassigned_spectrogram as ifgram
from tqdm import tqdm
## radargram reader-class
class radargram():
#initialization to set up overtake of fileformats
#for later bookkeeping, unused yet
def __init__(self,fileformat=None):
#this is unfinished and not used
if fileformat is not None and fileformat.lower() in {"**r", "**t"}:
self.fileformat = "**R"
else:
print('No file format was given, assuming .par')
# get some functions for copying itself correctly
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
# Unfortunately there are some undocumented changes in the byteorders somewhere along the upgrade-path
# so there are different byte-positions of information that are not really clear
def read_header_file_v8(self,filepath):
''' reads OLD .**R file of the reflexw format and returns a dictionary of the chosen parmeters
It doesn't matter if you input .**R file or .**T file but **T would be prefered to keep consistency
Keyword arguments:
filepath -- str path to the .DAT file
'''
print("reading file: " + filepath)
#check if we are dealing with raw .at and .par files
stringlist= filepath.split('.')
if stringlist[-1]=="DAT":
parfilefull=stringlist[0]+'.'+"PAR"
else:
# get file ending number wich according to documentation can only be 2 digits of numbers + the rest T of the ".dat"
procnumber=stringlist[-1][-3:-1]
parfile=stringlist[-2]
parfilefull=parfile+'.'+procnumber+'R'
#parfilefull=parfile+'.PAR'
with open(parfilefull, "rb") as f:
# Read the whole file at once
data = f.read()
#print(data)
samplenumber=int.from_bytes(data[420:422],byteorder='little')
tracenumber=int.from_bytes(data[452:456],byteorder='little')
formatcode=int.from_bytes(data[436:440],byteorder='little')
traceincrement=struct.unpack('f', data[460:464])
#print(traceincrement)
timeincrement=struct.unpack('f', data[464:468])
#print(timeincrement)
timebegin=struct.unpack('f', data[472:476])
#print(timebegin)
#span up a vecotor of timesteps
timevec=np.arange(0,samplenumber*timeincrement[0],timeincrement[0])
#description of the header goes in here
description= '''Samplenumber: Number of samples in a trace;
tracenumber: Number of Traces in the Radargram,
formatcode: 2 - 16 bit small integer 3 - 32 bit float
traceincrement: distance increment
timeincrement: sampling time 1/f
timebegin: Set from processing, time when material in radargram begins
timevec: Vector of timesteps
description: this text'''
header={"samplenumber":samplenumber, "tracenumber":tracenumber,"formatcode":formatcode,"traceincrement":traceincrement[0],
"timeincrement":timeincrement[0],"timebegin":timebegin[0],"time":timevec,"description": description}
self.header=header
def read_header_file_v9(self,filepath):
''' reads .**T file of the reflexw format and returns a dictionary of the chosen parmeters
Parameters:
filepath -- path to the file(str)
'''
print("reading file: " + filepath)
## NEEDS UNIX/WINDOWS PATH HANDLING
stringlist= filepath.split('.')
#check if we are dealing with raw .at and .par files
if stringlist[-1]=="DAT":
#check if relative path:
if stringlist[0]=='':
parfilefull='.'+stringlist[1]+'.'+"PAR"
else:
parfilefull=stringlist[0]+'.'+"PAR"
else:
# get file ending number wich according to documentation can only be 2 digits of numbers + the rest T of the ".dat"
procnumber=stringlist[-1][-3:-1]
parfile=stringlist[-2]
parfilefull=parfile+'.'+procnumber+'R'
#parfilefull=parfile+'.PAR'
with open(parfilefull, "rb") as f:
# Read the whole file at once
data = f.read()
# Reading header data is unfortunately quite inconvenient, as the documentation suggests that lengths of the used parameter strings
# is fixed. upon inspection of the bytes it turns out it's not for all. so while a few fields DO possess the suggested 20 byte length,
# others do not
# so parameters are read here to the best of my ability but are not guaranteed to deliver constant or even usable results
# for example the first 400 bytes are suggested to be an array of 20 strings of 20 bytes per entry, yet the positioning within that array is off
# or worse: filled with random uninitialized memory at the time of program execution
# much sanitization probably needs to be done here
#translate dist dimension into numbers
distdict={'METER':1,'CM':1E-2,'MM':1E-3}
distdimension=data[253:258].decode()
distdimension_float=distdict[distdimension]
#translate time dimension into numbers
timedict={'ns':1E-9,'ms':1E-3,'s':1}
timedimension= data[343:345].decode()
timedimension_float=timedict[timedimension]
samplenumber=int.from_bytes(data[420:424],byteorder='little')
tracenumber=int.from_bytes(data[424:428],byteorder='little')
#unfortunately there is some undocumented occurence where when the trace-count is >64000 this value is set to 64000 and
#the REAL count is written at 484:488
if tracenumber==64000:
print('Tracenumber appears to be 64000 \n')
print("This could be due to numerical issues with the file formats. ")
print("Try other data position, possible correct value? Y/N")
answer=input()
if answer=='y':
tracenumber=int.from_bytes(data[484:488],byteorder='little',signed=True)
formatcode=int.from_bytes(data[452:456],byteorder='little')
#formatcode doesn't work sometimes as it doesn't seem to be set correctly sometimes, error catching:
if formatcode==1:
print('Error. The formatcode of reflex read as 1, however it can only be 2(new int)\n or 3(new float). Please enter:\n')
newcode=int(input('Enter an integer 2 or 3:'))
formatcode=newcode
#these all change because v9 uses double
traceincrement=struct.unpack('d', data[492:500])
print('Trace increment',traceincrement[0])
timeincrement=struct.unpack('d', data[500:508])
print('Time increment: ',timeincrement)
timebegin=struct.unpack('d', data[516:524])[0]
print('Time start: ',timebegin)
x_start=struct.unpack('d', data[548:556])[0]
print("X-coord Start:", x_start)
y_start=struct.unpack('d', data[556:564])[0]
print("Y-coord Start:", y_start)
z_start=struct.unpack('d', data[564:572])[0]
x_end=struct.unpack('d', data[572:580])[0]
print("X-coord End:", x_end)
y_end=struct.unpack('d', data[580:588])[0]
print("Y-coord End:", y_end)
xoffset=np.abs(x_end-x_start)
timevec=np.arange(0,samplenumber*timeincrement[0],timeincrement[0])
description= '''Samplenumber: Number of samples in a trace;
tracenumber: Number of Traces in the Radargram,
formatcode: 2 - 16 bit small integer 3 - 32 bit float
traceincrement: distance increment
timeincrement: sampling time 1/f
timebegin: Set from processing, time when material in radargram begins
timevec: Vector of timesteps
xoffset: X-Profile offset, assumed to be only dimension for now
description: this text'''
header={"samplenumber":samplenumber, "tracenumber":tracenumber,"formatcode":formatcode,"traceincrement":traceincrement[0],
"timeincrement":timeincrement[0],"timebegin":timebegin,"timedimension":timedimension,"time":timevec,"xoffset":xoffset,"description": description}
self.header=header
def read_data_file(self,filepath,version=8):
'''
reads hole binary file as a bytes object, reads the header file
converts the bytedata to an array
If version=8, old formats
version=9: new formats'''
with open(filepath, "rb") as f:
# Read the whole file at once
datdata = f.read()
#print(data)
self.bytedata =datdata
if version==9:
self.read_header_file_v9(filepath=filepath)
else:
self.read_header_file_v8(filepath=filepath)
self.__convert_to_array()
def __readtrace_newformat(self,byteobject):
# reads a trace in the given byteobject
# byteobject should be of Formatcode 3 (32bit floating point int)
self._TraceNo=int.from_bytes(byteobject[0:4],byteorder='little')
self.NoOfSamples=int.from_bytes(byteobject[4:8],byteorder='little')
#error catching in case header is broke which for some reason happens super often
if self.NoOfSamples==0 or self.NoOfSamples>1024:
self.NoOfSamples=self.header['samplenumber']
tracedata=np.empty(self.NoOfSamples)
#header takes 158 bytes, always! (at least it should)
if self.header["formatcode"]==3:
bytelength=4
for j,i in enumerate(np.arange(158,158+self.NoOfSamples*bytelength,bytelength)):
tracedata[j]=struct.unpack('f', byteobject[i:i+bytelength])[0]
#print(tracedata[j])
return tracedata
else:
bytelength=2
for j,i in enumerate(np.arange(156,156+self.NoOfSamples*bytelength,bytelength)):
tracedata[j]=struct.unpack('h', byteobject[i:i+bytelength])[0]
#tracedata[j]=struct.unpack('h', byteobject[i:i+bytelength])
#print(tracedata[j])
return tracedata
def __convert_to_array(self):
if self.header["formatcode"]==3:
_bytetracesize=158+self.header['samplenumber']*4
else:
_bytetracesize=156+self.header['samplenumber']*2
self.traces=
|
np.empty([self.header["tracenumber"],self.header["samplenumber"]])
|
numpy.empty
|
#coding: utf-8
import numpy as np
def LIFsimulation(V_rest=-65, V_thre=-40, V_reset=-70, V_fire=20, C=2e-4, R=7.5e4, I=5e-4, dt=0.1, T=100):
"""Leaky Integrate-and-Fire(LIF) model.
I-(V-Vrest)/Rm = Cm*(dV/dt)
"""
Vm = V_rest
Time = np.arange(0,T+dt,dt)
V = np.zeros_like(Time)
for i in range(len(V)):
V[i] = Vm
# dV/dt=(IR-(Vm-Vrest))/RC
dVdt = (I*R-(Vm-V_rest))/(R*C)
V_plus_dV = Vm+dt*dVdt
Vm = V_reset if Vm>V_thre else V_fire if V_plus_dV > V_thre else V_plus_dV
return Time,V
def LITSpikeFrequency(Is, **params):
freqs=[]
for I in Is:
params["I"]=I
Time,V = LIFsimulation(**params)
first, second = np.where(V==params.get("V_fire", 20))[0][:2]
span = (second-first)*params.get("dt", 0.1)*1e-3
freqs.append(1/span)
return np.asarray(freqs)
def HHsimulation(V_rest=-65, V_thre=-55, V_reset=-70, V_fire=20,
gl=3.0e1, gK=3.6e3, gNa=1.2e4,
Vl=-54.402, VK=-77.0, VNa=50.0,
Cm=1e2, I=0, dt=0.1, T=100):
"""Hodgkin-Huxley model. (`hh_psc_alpha`)
I=Cm*(dVm/dt) + gl*(Vm-Vl) + gK*n^4*()
"""
alpha_n = lambda Vm: (1e-2*(Vm+55)) / (1-np.exp(-(Vm+55)/10))
beta_n = lambda Vm: 0.125*np.exp(-(Vm+65)/80)
alpha_m = lambda Vm: (0.1*(Vm+40)) / (1-np.exp(-(Vm+40)/10))
beta_m = lambda Vm: 4*np.exp(-(Vm+65)/18)
alpha_h = lambda Vm: 7e-2*np.exp(-(Vm+65)/20)
beta_h = lambda Vm: 1 / (1+np.exp(-(Vm+35)/10))
# Initialization
Vm = V_rest
Time = np.arange(0,T+dt,dt)
V = np.zeros_like(Time)
n = alpha_n(Vm) / (alpha_n(Vm)+beta_n(Vm))
m = alpha_m(Vm) / (alpha_m(Vm)+beta_m(Vm))
h = alpha_h(Vm) / (alpha_h(Vm)+beta_h(Vm))
for i,t in enumerate(Time):
V[i] = Vm
dVdt = (I-gl*(Vm-Vl)-gK*(n**4)*(Vm-VK)-gNa*(m**3)*h*(Vm-VNa))/Cm
V_plus_dV = Vm+dt*dVdt
n += (alpha_n(Vm)*(1-n) - beta_n(Vm)*n)*dt
m += (alpha_m(Vm)*(1-m) - beta_m(Vm)*m)*dt
h += (alpha_h(Vm)*(1-h) - beta_h(Vm)*h)*dt
Vm = V_reset if Vm>V_thre else V_fire if V_plus_dV > V_thre else V_plus_dV
return Time,V
def STDPsimulation(pre, post, dt=0.1, T=100):
Time = np.arange(0,T+dt,dt)
X = np.zeros_like(Time); Y = np.zeros_like(Time)
Pre_Neuron = np.zeros_like(Time); Post_Neuron =
|
np.zeros_like(Time)
|
numpy.zeros_like
|
import numpy as np
import pymaster as nmt
import matplotlib.pyplot as plt
import os
# This script describes the functionality of the flat-sky version of pymaster
# We start by defining the flat-sky field
Lx = 72. * np.pi/180
Ly = 48. * np.pi/180
Nx = 602
Ny = 410
# Nx=301; Ny=205;
# Let's now create a mask:
mask = np.ones(Nx * Ny)
xarr = np.ones(Ny)[:, None] * np.arange(Nx)[None, :] * Lx/Nx
yarr = np.ones(Nx)[None, :] * np.arange(Ny)[:, None] * Ly/Ny
def dig_hole(x, y, r):
rad = (np.sqrt((xarr-x)**2+(yarr-y)**2)).flatten()
return np.where(rad < r)[0]
mask[dig_hole(0.3 * Lx, 0.6 * Ly, 0.05 * np.sqrt(Lx * Ly))] = 0.
mask[dig_hole(0.7 * Lx, 0.12 * Ly, 0.07 * np.sqrt(Lx * Ly))] = 0.
mask[dig_hole(0.7 * Lx, 0.8 * Ly, 0.03 * np.sqrt(Lx * Ly))] = 0.
mask[np.where(xarr.flatten() < Lx / 16.)] = 0
mask[np.where(xarr.flatten() > 15 * Lx / 16.)] = 0
mask[np.where(yarr.flatten() < Ly / 16.)] = 0
mask[np.where(yarr.flatten() > 15 * Ly / 16.)] = 0
mask = mask.reshape([Ny, Nx])
mask = nmt.mask_apodization_flat(mask, Lx, Ly, aposize=2., apotype="C1")
plt.figure()
plt.imshow(mask, interpolation='nearest', origin='lower')
plt.colorbar()
# Binning scheme
l0_bins = np.arange(Nx/8) * 8 * np.pi/Lx
lf_bins = (np.arange(Nx/8)+1) * 8 * np.pi/Lx
b = nmt.NmtBinFlat(l0_bins, lf_bins)
ells_uncoupled = b.get_effective_ells()
# Let's create a fictitious theoretical power spectrum to generate
# Gaussian simulations:
larr = np.arange(3000.)
clarr = ((larr+50.)/300.)**(-1.1)+0.5
# This function will generate random fields
def get_sample_field():
mpt = nmt.synfast_flat(Nx, Ny, Lx, Ly, np.array([clarr]), [0])[0]
return nmt.NmtFieldFlat(Lx, Ly, mask, [mpt])
# Convenience function from sample_workspaces.py for flat-sky fields
def compute_master(f_a, f_b, wsp):
cl_coupled = nmt.compute_coupled_cell_flat(f_a, f_b, b)
cl_decoupled = wsp.decouple_cell(cl_coupled)
return cl_decoupled
# Let's generate one particular sample and its power spectrum
print("Field")
f0 = get_sample_field()
plt.figure()
plt.imshow(f0.get_maps()[0] * mask, interpolation='nearest', origin='lower')
plt.colorbar()
print("Workspace")
w = nmt.NmtWorkspaceFlat()
if not os.path.isfile("w_flat_covar.fits"):
w.compute_coupling_matrix(f0, f0, b)
w.write_to("w_flat_covar.fits")
w.read_from("w_flat_covar.fits")
cl_0 = compute_master(f0, f0, w)[0]
# Let's now compute the Gaussian estimate of the covariance!
print("Covariance")
# First we generate a NmtCovarianceWorkspaceFlat object to precompute
# and store the necessary coupling coefficients
cw = nmt.NmtCovarianceWorkspaceFlat()
if not os.path.isfile("cw_flat.fits"):
# This is the time-consuming operation
cw.compute_coupling_coefficients(f0, f0, b)
cw.write_to("cw_flat.fits")
cw.read_from("cw_flat.fits")
covar = nmt.gaussian_covariance_flat(cw, 0, 0, 0, 0, larr,
[clarr], [clarr],
[clarr], [clarr], w)
# Let's now compute the sample covariance
print("Sample covariance")
nsamp = 1000
covar_sample = np.zeros([len(cl_0), len(cl_0)])
mean_sample = np.zeros(len(cl_0))
for i in np.arange(nsamp):
print(i)
f = get_sample_field()
cl = compute_master(f, f, w)[0]
covar_sample += cl[None, :] * cl[:, None]
mean_sample += cl
mean_sample /= nsamp
covar_sample = covar_sample/nsamp-mean_sample[None, :] * mean_sample[:, None]
# Let's plot them:
plt.figure()
plt.plot(ells_uncoupled[0:], np.fabs(np.diag(covar, k=0)),
'r-', label='0-th diag., theory')
plt.plot(ells_uncoupled[0:], np.fabs(np.diag(covar_sample, k=0)),
'b-', label='0-th diag., 10K sims')
plt.plot(ells_uncoupled[1:], np.fabs(np.diag(covar, k=1)), 'r--',
label='1-st diag., theory')
plt.plot(ells_uncoupled[1:], np.fabs(np.diag(covar_sample, k=1)),
'b--', label='1-st diag., !0K sims')
plt.xlabel('$\\ell$', fontsize=16)
plt.ylabel('${\\rm diag}({\\rm Cov})$', fontsize=16)
plt.legend(loc='upper right', frameon=False)
plt.loglog()
plt.savefig("diags.png", bbox_inches='tight')
plt.figure()
plt.title("Correlation matrix residuals")
dcorr = (covar - covar_sample) / np.sqrt(
|
np.diag(covar)
|
numpy.diag
|
"""Hypothesis strategies for numpy arrays, and tools for the generated data
Strategies
----------
complex_numbers
Similar to `hypothesis.strategies.complex_numbers`, but it takes the same
options as `hypothesis.strategies.floats`.
real_numbers
Wrapper of `hypothesis.strategies.floats` with different default options.
integers
Wrapper of `hypothesis.strategies.integers` to ignore irrelevant options.
numeric_dtypes
Generate `dtype` and `elements` values for `hypothesis.extra.numpy.arrays`
signature_shapes
Generate `shape` values for `hypothesis.extra.numpy.arrays` that broadcast
with a given signature.
broadcastable
Generate a tuple of arrays of the same generated `dtype` with generated
`shape`s that broadcast with a given signature.
constant
Generate a single array with all elements equal, with a generated `dtype`
and a `shape` that broadcasts with a given signature.
matrices_b
Generate broadcasting matrices (ndim >= 2) with float entries.
matrices_c
Generate core-only matrices (ndim == 2) with float entries.
vectors
Generate vectors (ndim == 1) with float entries.
Functions
---------
core_only
Romove non-core dimensions from arrays for functions that do not broadcast.
For `hypothesis.assume`:
non_singular
Check if e
ach matrix in an array has finite nonzero determinant.
all_non_singular
Check if every matrix in some arrays has finite nonzero determinant.
full_rank
Check if each matrix in an array has the maximum rank given its shape.
all_full_rank
Check if every matrix in some arrays has the maximum rank given its shape.
wide
Check if a matrix has more columns than rows.
tall
Check if a matrix has more rows than columns.
nonsquare
Check if a matrix has different numbers of rows and columns.
"""
import collections.abc
from numbers import Number
from typing import Sequence, Tuple, Union, Optional
import hypothesis.extra.numpy as hyn
import hypothesis.strategies as st
import numpy as np
__all__ = [
'complex_numbers',
'real_numbers',
'integers',
'numeric_dtypes',
'signature_shapes',
'broadcastable',
'constant',
'matrices_b',
'matrices_c',
'vectors',
'core_only',
'non_singular',
'all_non_singular',
'full_rank',
'all_full_rank',
'wide',
'tall',
'nonsquare',
]
Shape = Tuple[int, ...]
CodeStrategy = Union[None, str, Sequence[str], st.SearchStrategy[str]]
# =============================================================================
# Strategies for Hypothesis generated test examples
# =============================================================================
_DEFAULT_INTS = {'min_value': -1e10, 'max_value': 1e10}
_DEFAULT_REALS = {'allow_infinity': False, 'allow_nan': False, 'width': 64,
'exclude_min': False, 'exclude_max': False, **_DEFAULT_INTS}
_DEFAULT_SHAPE = {'min_dims': 0, 'max_dims': 3, 'min_side': 1, 'max_side': 5,
'base_shape': ()}
def _extract_kwds(kwds: dict, **defaults) -> dict:
"""Take keys in defaults and pop from kwds, return as a dict"""
extracted = {}
for key, value in defaults.items():
extracted[key] = kwds.pop(key, value)
return extracted
def integers(**kwds) -> st.SearchStrategy[float]:
"""Strategy to generate real numbers of specified width
This is a wrapper for `hypothesis.strategies.integers` that has different
defaults and ignores irrelevant keywords, instead of raising an exception.
See Also
--------
`hypothesis.strategies.integers`
"""
return st.integers(**_extract_kwds(kwds, **_DEFAULT_INTS))
def real_numbers(**kwds) -> st.SearchStrategy[float]:
"""Strategy to generate real numbers of specified width
This is a wrapper for `hypothesis.strategies.floats` that has different
defaults and ignores irrelevant keywords, instead of raising an exception.
See Also
--------
`hypothesis.strategies.floats`
"""
return st.floats(**_extract_kwds(kwds, **_DEFAULT_REALS))
def complex_numbers(**kwds) -> st.SearchStrategy[complex]:
"""Strategy to generate complex numbers of specified width
Takes any keyword arguments for `hypothesis.strategies.floats`
Returns
-------
complex_strategy : st.SearchStrategy[complex]
Strategy for complex numbers that applies float options to real and
imaginary parts.
See Also
--------
`hypothesis.strategies.complex_numbers`
"""
if 'width' in kwds:
kwds['width'] //= 2
return st.builds(complex, st.floats(**kwds), st.floats(**kwds))
_DTYPES = {
'f': (np.float32, real_numbers),
'd': (np.float64, real_numbers),
'F': (np.complex64, complex_numbers),
'D': (np.complex128, complex_numbers),
'i': (np.int32, integers)
}
@st.composite
def numeric_dtypes(draw: st.DataObject, code_st: CodeStrategy = None,
**kwds) -> Tuple[np.dtype, Number]:
"""Strategy to generate dtypes codes
Parameters
----------
code_st : None|str|Sequence[str]|SearchStrategy[str], optional
Strategy for dtype-code of numbers: a choice, or a list to choose from,
or `None` to choose from {'f','d','F','D'} or a custom strategy.
By default: `None`.
Also takes any keyword arguments for `hypothesis.strategies.floats`.
Returns
-------
dtype_strategy : np.dtype
Strategy for dtypes that are recognised by BLAS/LAPACK.
elements_strategy : Number
Strategy for numbers of that dtype.
See Also
--------
`hypothesis.extra.numpy.arrays`
"""
if code_st is None:
code_st = st.sampled_from(['f', 'd', 'F', 'D'])
elif isinstance(code_st, str):
code_st = st.just(code_st)
elif isinstance(code_st, collections.abc.Sequence):
code_st = st.sampled_from(code_st)
code = draw(code_st)
dtype, element_st = _DTYPES[code]
kwds['width'] = dtype().itemsize * 8
return dtype, element_st(**kwds)
@st.composite
def signature_shapes(draw: st.DataObject, signature: str,
**kwds) -> Tuple[Shape, ...]:
"""Create a hypothesis strategy for a tuple of shapes with the signature
Parameters
----------
signature : str
Signature of array core dimension, without the return
Also takes any keyword arguments (excluding `num_shapes`) for
`hypothesis.extra.numpy.mutually_broadcastable_shapes`.
Returns
-------
shape_strategy : Tuple[Tuple[int, ...], ...]
strategy to produce a tuple of tuples of ints that broadcast with the
given core dimension signature.
See Also
--------
`hypothesis.extra.numpy.arrays`
"""
opts = _extract_kwds(kwds, **_DEFAULT_SHAPE)
opts['signature'] = signature + '->()'
return draw(hyn.mutually_broadcastable_shapes(**opts)).input_shapes
@st.composite
def _arrays_args(draw: st.DataObject, signature: str, code_st: CodeStrategy,
kwds: Optional[dict] = None
) -> Tuple[np.dtype, Tuple[Shape, ...], Number]:
"""Generate inputs for hyn.arrays strategy
"""
kwds = {} if kwds is None else kwds
num_opts = _extract_kwds(kwds, **_DEFAULT_REALS)
shape_opts = _extract_kwds(kwds, **_DEFAULT_SHAPE)
if kwds:
raise ValueError(f"Unknown keywords: {list(kwds)}")
dtype, elements = draw(numeric_dtypes(code_st, **num_opts))
shapes = draw(signature_shapes(signature, **shape_opts))
return dtype, shapes, elements
@st.composite
def broadcastable(draw: st.DataObject,
signature: str,
code_st: CodeStrategy = None,
**kwds) -> Tuple[np.ndarray, ...]:
"""Create a hypothesis strategy for a tuple of arrays with the signature
Parameters
----------
signature : str
Signature of array core dimension, without the return
code_st : None|str|Sequence[str]|SearchStrategy[str], optional
Strategy for dtype code of numbers: a choice, or a list to choose from,
or `None` to choose from {'f','d','F','D'} or a custom strategy.
By default: `None`.
Also takes any keyword arguments for `hypothesis.strategies.floats` or
`hypothesis.extra.numpy.mutually_broadcastable_shapes` except `num_shapes`.
Returns
-------
strategy : Tuple[np.ndarray, ...]
Strategy to produce a tuple of arrays that broadcast with the given
core dimension signature.
See Also
--------
`hypothesis.extra.numpy.arrays`
"""
dtype, shapes, elements = draw(_arrays_args(signature, code_st, kwds))
kwds.update(dtype=dtype, elements=elements, fill=st.nothing())
result = tuple(draw(hyn.arrays(shape=shape, **kwds)) for shape in shapes)
return result[0] if len(result) == 1 else result
@st.composite
def constant(draw: st.DataObject,
signature: str,
code_st: CodeStrategy = None,
**kwds) -> np.ndarray:
"""Create a hypothesis strategy for a constant array with the signature
Parameters
----------
signature : str
Signature of array core dimension, without the return
code_st : None|str|Sequence[str]|SearchStrategy[str], optional
Strategy for dtype code of numbers: a choice, or a list to choose from,
or `None` to choose from {'f','d','F','D'} or a custom strategy.
By default: `None`.
Also takes any keyword arguments for `hypothesis.strategies.floats` or
`hypothesis.extra.numpy.mutually_broadcastable_shapes` except `num_shapes`.
Returns
-------
strategy : np.ndarray
Strategy to produce an array that broadcasts with the given core
dimension signature, with a constant value of thet dtype.
See Also
--------
`hypothesis.extra.numpy.arrays`
"""
dtype, shapes, elements = draw(_arrays_args(signature, code_st, kwds))
fill = draw(elements)
return
|
np.full(shapes[0], fill, dtype)
|
numpy.full
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
# MIT liscense
"""
The elliptical Gaussian filter class
Parameters
----------
scale_x,scale_y: float
Scale of the filter, x and y are the two orthogonal direction
sigma_x,sigma_y: float
Variances of the filter, x and y direction
raidus_x,radius_y: float
The FHTM radii of the two dimensional filter
psf: np.ndarray
The two dimensional mat
Functions
---------
get_radius: calculate the FHTM radius
get_filter: gen the filter
"""
# Modules
import numpy as np
# import astropy
# custom designed
# Defination of class
class EGFilter:
# __init__
def __init__(self,scale = (8,8),sigma = (1,2),angle = 0):
self.scale_x,self.scale_y = scale
self.sigma_x,self.sigma_y = sigma
self.angle = angle
self._get_radius()
def _get_radius(self):
"""
Calculate the radii
"""
self.radius_x = np.sqrt(2*np.log(10))*self.sigma_x
self.radius_y = np.sqrt(2*
|
np.log(10)
|
numpy.log
|
# ---------------------------------------------------------- #
# ------------------ OzDES_Calculation.py ------------------ #
# --------- https://github.com/jhoormann/RMCodeDump -------- #
# ---------------------------------------------------------- #
# This is a dump of all the functions I have collated for #
# the OzDES RM program. This includes funtions defined in #
# OzDES_calibSpec/getPhoto/makeLC plus some others. #
# Unless otherwise noted this code was written by #
# <NAME>. #
# ---------------------------------------------------------- #
from astropy.io import fits
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.integrate import fixed_quad
import OzDES_Plotting as ozplot
from scipy.interpolate import InterpolatedUnivariateSpline
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
import sys
# -------------------------------------------------- #
# Modified from a function originally provided by #
# <NAME> #
# -------------------------------------------------- #
# ----------------- SpectrumCoadd ------------------ #
# -------------------------------------------------- #
# Read in calibrated spectral data assuming data is #
# in the format provided by OzDES_calibSpec after #
# coadding. #
# -------------------------------------------------- #
class SpectrumCoadd(object):
# Spectrum class for latest version of the OzDES pipeline
def __init__(self, filepath=None):
assert filepath != None, "No file name is specified."
self.filepath = filepath
try:
self.data = fits.open(filepath)
except IOError:
print("Error: file {0} could not be found".format(filepath))
exit()
data = fits.open(filepath)
self.combined = data[0]
self.combinedVariance = data[1]
self._wavelength = None
self._flux = None
self._variance = None
self._fluxCoadd = None
self._varianceCoadd = None
self._dates = None
self._runs = None
self.numEpochs = int((np.size(data) - 3) / 3)
self.redshift = self.combined.header['z']
self.RA = self.combined.header['RA']
self.DEC = self.combined.header['DEC']
self.field = self.combined.header['FIELD']
@property
def wavelength(self):
"""Define wavelength solution."""
if getattr(self, '_wavelength', None) is None:
crpix = self.combined.header[
'crpix1'] - 1.0 # Central pixel value. The -1.0 is needed as Python is ZERO indexed
crval = self.combined.header['crval1'] # central wavelength value
self.cdelt = self.combined.header['cdelt1'] # Wavelength interval between subsequent pixels
n_pix = self.combined.header["NAXIS1"]
wave = ((np.arange(n_pix) - crpix) * self.cdelt) + crval
self._wavelength = wave
return self._wavelength
@property
def flux(self):
if getattr(self, '_flux', None) is None:
self._flux = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._flux[:, i] = self.data[i * 3 + 3].data
return self._flux
@property
def variance(self):
if getattr(self, '_variance', None) is None:
self._variance = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._variance[:, i] = self.data[i * 3 + 4].data
return self._variance
@property
def fluxCoadd(self):
if getattr(self, '_fluxCoadd', None) is None:
self._fluxCoadd = np.zeros(5000, dtype=float)
self._fluxCoadd[:] = self.data[0].data
return self._fluxCoadd
@property
def varianceCoadd(self):
if getattr(self, '_varianceCoadd', None) is None:
self._varianceCoadd = np.zeros(5000, dtype=float)
self._varianceCoadd[:] = self.data[1].data
return self._varianceCoadd
@property
def dates(self):
if getattr(self, '_dates', None) is None:
self._dates = np.zeros(self.numEpochs, dtype=float)
for i in range(self.numEpochs):
self._dates[i] = self.data[i * 3 + 3].header[
'AVGDATE'] # this give the average Modified Julian Date (UTC) that observation was taken
return self._dates
@property
def runs(self):
if getattr(self, '_runs', None) is None:
self._runs = np.zeros(self.numEpochs, dtype=float)
for i in range(self.numEpochs):
self._runs[i] = self.data[i * 3 + 3].header['RUN'] # this give the run number of the observation
return self._runs
# -------------------------------------------------- #
# ------------------- magToFlux -------------------- #
# -------------------------------------------------- #
# Reads in magnitude, error, and pivot wavelength #
# and converts to f_lambda in units of ergs/s/cm^2/A #
# -------------------------------------------------- #
def magToFlux(mag, err, pivot):
flux = (3*pow(10,18)/pow(pivot,2))*pow(10, -(2./5.)*(mag + 48.6))
flux_err = abs(flux*(-2./5.)*2.30259*err)
return flux, flux_err
# -------------------------------------------------- #
# ------------------- outputLC --------------------- #
# -------------------------------------------------- #
# Creates an output file with date, flux, error #
# columns as is expected by lag recovery tools #
# Javelin and PyCCF. #
# -------------------------------------------------- #
def outputLC(date, flux, error, name, loc, obj_name):
length = len(date)
outname = loc + obj_name + "_" + name + ".txt"
output = open(outname, 'w')
for i in range(length):
if np.isnan(flux[i]) == False:
output.write("%s %s %s \n" % (date[i], flux[i], error[i]))
else:
# Sometimes the flux ends up as nan, this is generally because the SNR is so bad/the emission line so
# small that the continuum subtraction makes the line negative. These are not saved in the data file
# but a warning is outputted so you can have a look at what the problem is.
print("-------\n Houston, we have a problem! " + obj_name + " Night " + str(i) + "\n-------\n ")
output.close()
return
# -------------------------------------------------- #
# ---------------- convertPhotoLC -------------------#
# -------------------------------------------------- #
# Converts photometric light curves from magnitudes #
# to flux and saves the light curves separately for #
# each band. #
# -------------------------------------------------- #
def convertPhotoLC(photoName, source, bandName, bandPivot, scale, makeFig, outLoc):
# Read in the photometric data
photo = pd.read_table(photoName, delim_whitespace=True)
if makeFig == True:
# Define figure and axis for light curves of all bands
fig_photo, ax_photo = ozplot.plot_share_x(len(bandName), source, "Date (MJD)", bandName)
# Make a light curve for each band
for b in range(len(bandName)):
# Create an array for observations of a specified band and sort observations by date
band_data = photo[photo['BAND'] == bandName[b]].sort_values('MJD')
# Find date, mag, and magerr array for the specified band
ph_date = np.array(band_data['MJD'])
ph_mag = np.array(band_data['MAG'])
ph_magerr = np.array(band_data['MAGERR'])
# Loop over each epoch and convert magnitude to flux
ph_flux = np.zeros(len(ph_date))
ph_fluxerr = np.zeros(len(ph_date))
for e in range(len(ph_date)):
ph_flux[e], ph_fluxerr[e] = magToFlux(ph_mag[e], ph_magerr[e], bandPivot[b])
# Scale the fluxes before they are saved, if you are concerned about remembering the scale factor perhaps
# included it in the outputted file name.
ph_flux = ph_flux / scale
ph_fluxerr = ph_fluxerr / scale
# Save the data as a light curve with filename outLoc + source + _ + bandName[b] + .txt
outputLC(ph_date, ph_flux, ph_fluxerr, bandName[b], outLoc, source)
if makeFig == True:
# plot the light curve on the subplot defined above.
ax_photo[b].errorbar(ph_date, ph_flux, yerr=ph_fluxerr, fmt='o', color='black')
# Once all the light curves are plotted save the figure as outLoc + source + "_photo.png"
if makeFig == True:
fig_photo.savefig(outLoc + source + "_photo.png")
return
# -------------------------------------------------- #
# ------------------ findLines ----------------------#
# -------------------------------------------------- #
# Determines which emission lines are present in the #
# spectrum. Returns an array of booleans where True #
# means the emission line is present. #
# -------------------------------------------------- #
def findLines(wavelength, z, lineName, contWinBSMin, contWinBSMax):
# decide which emission lines are available in the spectrum
availLines = np.zeros(len(lineName)).astype(bool)
for l in range(len(lineName)):
# for a line to be in the spectrum you need to include the continuum subtraction windows as well. This can
# be limiting but as we need continuum subtracted spectra it is necessary.
minWave = min(contWinBSMin[lineName[l]])
maxWave = max(contWinBSMax[lineName[l]])
if minWave * (1 + z) > wavelength[0] and maxWave * (1 + z) < wavelength[-1]:
availLines[l] = True
return availLines
# -------------------------------------------------- #
# -------------------- findBin ----------------------#
# -------------------------------------------------- #
# Finds the bin of the given vector (wavelength) #
# where the specified quantity (line) is located. #
# -------------------------------------------------- #
def findBin(line, wavelength):
bin = 0
for i in range(len(wavelength)-1):
if line >= wavelength[i] and line <= wavelength[i+1]:
bin = i
i = len(wavelength)
if line > wavelength[-1]:
bin = len(wavelength)-1
i = len(wavelength)
return bin
# -------------------------------------------------- #
# ---------------- interpolateVals ------------------#
# -------------------------------------------------- #
# Interpolates a linear line between two points and #
# propagates the uncertainty. #
# -------------------------------------------------- #
def interpolateVals(x, y, s, val):
# uncertainty is variance
interp = y[0] + (val - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
interp_var = s[0] + (s[0] + s[1]) * ((val - x[0]) / (x[1] - x[0])) ** 2.
return interp, interp_var
# -------------------------------------------------- #
# ------------------ meanUncert ---------------------#
# -------------------------------------------------- #
# Finds the uncertainty corresponding to the mean #
# of a set of numbers. #
# -------------------------------------------------- #
def meanUncert(variance):
length = len(variance)
var = 0
num = 0
for i in range(length):
if np.isnan(variance[i]) == False:
var = var + variance[i]
num += 1
sigma2 = (var / (num ** 2))
return sigma2
# -------------------------------------------------- #
# ---------------- cont_fit_reject ------------------#
# -------------------------------------------------- #
# Interpolate a linear line through the mean of the #
# continuum subtraction windows to represent the #
# continuum and subtract this line. Modifies the #
# given flux/variance vectors. #
# -------------------------------------------------- #
def cont_fit_reject(wavelength, fluxes, variances, minWin, maxWin):
# Define the wavelength range for the continuum model, between the mean of both windows
wave = np.array([np.nanmean(minWin), np.nanmean(maxWin)])
nBins = len(wavelength)
# Determine how many epochs there are to continuum subtract
number = int(fluxes.size / nBins)
for epoch in range(number):
if number == 1:
flux = fluxes
variance = variances
else:
flux = fluxes[:, epoch]
variance = variances[:, epoch]
# Calculate the average flux at each extreme of the wave vector (ie mean of the continuum subtraction window)
fvals = np.array([np.nanmean(flux[findBin(minWin[0], wavelength):findBin(minWin[1], wavelength)]),
np.nanmean(flux[findBin(maxWin[0], wavelength):findBin(maxWin[1], wavelength)])])
# Calculate the average uncertainty at each extreme of the wave vector
svals = np.array([meanUncert(variance[findBin(minWin[0], wavelength):findBin(minWin[1], wavelength)]),
meanUncert(variance[findBin(maxWin[0], wavelength):findBin(maxWin[1], wavelength)])])
cont = np.zeros(nBins)
contVar = np.zeros(nBins)
# Find the interpolated linear continuum model
for i in range(nBins):
cont[i], contVar[i] = interpolateVals(wave, fvals, svals, wavelength[i])
# Subtract the continuum from the flux and add the error of the model in quadrature with the spectral error
flux -= cont
variance += contVar
return
# -------------------------------------------------- #
# The next three functions are modified from code #
# provided by <NAME> #
# -------------------------------------------------- #
# ------------------ filterCurve ------------------- #
# -------------------------------------------------- #
# creates a class to hold the transmission function #
# for each band. #
# -------------------------------------------------- #
class filterCurve:
"""A filter"""
def __init__(self):
self.wave = np.array([], 'float')
self.trans = np.array([], 'float')
return
def read(self, file):
# DES filter curves express the wavelengths in nms
if 'DES' in file:
factor = 10.
else:
factor = 1.
file = open(file, 'r')
for line in file.readlines():
if line[0] != '#':
entries = line.split()
self.wave = np.append(self.wave, float(entries[0]))
self.trans = np.append(self.trans, float(entries[1]))
file.close()
# We use Angstroms for the wavelength in the filter transmission file
self.wave = self.wave * factor
return
# -------------------------------------------------- #
# ---------------- readFilterCurve ----------------- #
# -------------------------------------------------- #
# Reads in the filter curves and stores it as the #
# filter curve class. #
# -------------------------------------------------- #
def readFilterCurves(bands, filters):
filterCurves = {}
for f in bands:
filterCurves[f] = filterCurve()
filterCurves[f].read(filters[f])
return filterCurves
# -------------------------------------------------- #
# ----------------- computeABmag ------------------- #
# -------------------------------------------------- #
# computes the AB magnitude for given transmission #
# functions and spectrum (f_lambda). Returns the #
# magnitude and variance. #
# -------------------------------------------------- #
def computeABmag(trans_flux, trans_wave, tmp_wave, tmp_flux, tmp_var):
# Takes and returns variance
# trans_ : transmission function data
# tmp_ : spectral data
# trans/tmp not necessarily defined over the same wavelength range
# first determine the wavelength range over which both are defined
minV = min(trans_wave)
if minV < min(tmp_wave):
minV = min(tmp_wave)
maxV = max(trans_wave)
if maxV > max(trans_wave):
maxV = max(trans_wave)
interp_wave = []
tmp_flux2 = []
tmp_var2 = []
# Make new vectors for the flux just using that range (assuming spectral binning)
for i in range(len(tmp_wave)):
if minV < tmp_wave[i] < maxV:
interp_wave.append(tmp_wave[i])
tmp_flux2.append(tmp_flux[i])
tmp_var2.append(tmp_var[i])
# interpolate the transmission function onto this range
# the transmission function is interpolated as it is generally much smoother than the spectral data
trans_flux2 = interp1d(trans_wave, trans_flux)(interp_wave)
# And now calculate the magnitude and uncertainty
c = 2.992792e18 # Angstrom/s
Num = np.nansum(tmp_flux2 * trans_flux2 * interp_wave)
Num_var = np.nansum(tmp_var2 * (trans_flux2 * interp_wave) ** 2)
Den = np.nansum(trans_flux2 / interp_wave)
with np.errstate(divide='raise'):
try:
magAB = -2.5 * np.log10(Num / Den / c) - 48.60
magABvar = 1.17882 * Num_var / (Num ** 2)
except FloatingPointError:
magAB = 99.
magABvar = 99.
return magAB, magABvar
# --------------------------------------------------- #
# --------------- uncertainty_cont ------------------ #
# --------------------------------------------------- #
# This function finds the uncertainty in line flux #
# and width measurements. For line flux you can #
# input a range of potential continuum windows and #
# it will randomly pick regions to use for continuum #
# subtraction. You can also input a region over which #
# to randomly choose the integration window. These #
# all also include flux randomization in order to #
# consider the effect of the variance spectrum. #
# You can also look at the effect flux randomization #
# has on the line width measurements FWHM and #
# velocity dispersion. You can also specify to look #
# at the RMS spectrum (flag='rms') for the line width #
# measurements, the default is to look at the provided#
# spectrum as is. The error is calculated through #
# bootstrap resampling using strapNum iterations. #
# The standard deviation of the calculated quantity #
# is then the associated error. #
# --------------------------------------------------- #
def uncertainty_cont(wavelength, flux, variance, strapNum, z, line, pivotLC, winLimMin, winLimMax, winsize, scale,
calc='cont', flag='mean', res=0):
# calc = cont -> continuum subtraction
# calc = win -> integration window
# calc = fwhm -> FWHM line width: can specify flag=rms
# calc = sigma -> line velocity dispersion: can specify flag=rms
# Performs bootstrap resampling in the range of potentially clean continuum to determine
# uncertainties on the flux measurement
# Continuum window in Angstroms - will be scaled according to redshift
# Winsize means the continuum subtraction windows are all the same size, just the locations shift
winsize = winsize/(1+z)
lineMin = line[0]
lineMax = line[1]
# Option for potentially clean continuum region pass in bootstrap
# Calculate the width of the bootstrapping region on each side of the line
lowW = (winLimMin[1]-winLimMin[0])/(1+z)
highW = (winLimMax[1]-winLimMax[0])/(1+z)
# Check edge conditions: if the bootstraping region goes outside the region of the spectrograph use the spectrograph
# bounds as the edges
if winLimMin[0] < wavelength[0]:
winLimMin[0] = wavelength[0]
winLimMin[1] = (winLimMin[0] / (1 + z) + lowW) * (1 + z)
if winLimMin[1] > wavelength[line[0]]:
winLimMin[1] = wavelength[line[0]]
if winLimMax[1] > wavelength[-1]:
winLimMax[1] = wavelength[-1]
winLimMax[0] = (winLimMax[1] / (1 + z) - highW) * (1 + z)
if winLimMax[0] < wavelength[line[1]]:
winLimMax[0] = wavelength[line[1]]
# Wavelengths to choose in each window in steps of 0.5A
winMinVect = np.arange(winLimMin[0], winLimMin[1] - (winsize - 0.5) * (1 + z), 0.5 * (1 + z))
winMaxVect = np.arange(winLimMax[0], winLimMax[1] - (winsize - 0.5) * (1 + z), 0.5 * (1 + z))
# Array of random continuum window starting points
randVectMin = len(winMinVect) * np.random.rand(strapNum)
randVectMin = randVectMin.astype(int)
randVectMax = len(winMaxVect) * np.random.rand(strapNum)
randVectMax = randVectMax.astype(int)
# An array of values obtained through bootstrapping to determine uncertainties
vals = np.zeros(strapNum)
for i in range(strapNum):
if calc == 'win':
# subtracts from standard continuum but changes integration window, in this case feed in potential
# integration windows instead of bootstrapping regions
lineMinNew = findBin(winMinVect[randVectMin[i]], wavelength)
lineMaxNew = findBin(winMaxVect[randVectMax[i]], wavelength)
# Performs flux resampling to account for variance spectrum. Flux values shifted by Gaussian scaled by
# variance
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
# Continuum Subtract this new vector
cont_fit_reject(wavelength, fluxC, varC, winLimMin, winLimMax)
# Calculate the flux
lc_mag, lc_mag_err = computeABmag(np.ones(len(wavelength[lineMinNew:lineMaxNew])),
wavelength[lineMinNew:lineMaxNew], wavelength[lineMinNew:lineMaxNew],
fluxC[lineMinNew:lineMaxNew]*scale, varC[lineMinNew:lineMaxNew]*
pow(scale,2))
vals[i], lc_mag_err = magToFlux(lc_mag, lc_mag_err**0.5, pivotLC)
if calc == "cont":
# changes cont region
minWin = [winMinVect[randVectMin[i]], winMinVect[randVectMin[i]] + winsize * (1 + z)]
maxWin = [winMaxVect[randVectMax[i]], winMaxVect[randVectMax[i]] + winsize * (1 + z)]
# Performs flux resampling to account for variance spectrum. Flux values shifted by Gaussian scaled by
# variance
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
# Continuum Subtract this new vector
cont_fit_reject(wavelength, fluxC, varC, minWin, maxWin)
# Calculate the flux
lc_mag, lc_mag_err = computeABmag(np.ones(len(wavelength[lineMin:lineMax])),wavelength[lineMin:lineMax],
wavelength[lineMin:lineMax], fluxC[lineMin:lineMax]*scale,
varC[lineMin:lineMax]*pow(scale, 2))
vals[i], lc_mag_err = magToFlux(lc_mag, lc_mag_err**0.5, pivotLC)
if calc == "fwhm":
# Determine uncertainty in FWHM line measurement
# do flux randomization and continuum subtraction
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
cont_fit_reject(wavelength, fluxC, varC, winLimMin, winLimMax)
if flag == 'rms':
# first calculate the RMS spectrum if requested
fluxC, varC = rmsSpec(fluxC, varC)
vals[i] = fwhm(wavelength[lineMin:lineMax], fluxC[lineMin:lineMax], res)
if calc == "sigma":
# Determine uncertainty in velocity dispersion measurement
# do flux randomization and continuum subtraction
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
cont_fit_reject(wavelength, fluxC, varC, winLimMin, winLimMax)
if flag == 'rms':
# first calculate the RMS spectrum if requested
fluxC, varC = rmsSpec(fluxC, varC)
vals[i] = lineVar(wavelength[lineMin:lineMax], fluxC[lineMin:lineMax], res)
stddev_bs = np.nanstd(vals)
return stddev_bs
# --------------------------------------------------- #
# ----------------------- fwhm ---------------------- #
# --------------------------------------------------- #
# Takes an input spectrum and calculate the FWHM of #
# the provided emission line. It will search over #
# the entire provided wavelength window so just #
# include the relevant region of the spectrum. #
# --------------------------------------------------- #
def fwhm(wave, flux, res):
# First I am smoothing the spectrum
exponential_smooth(flux)
# Find the half maximum
peak = max(flux)
valley = min(flux)
peakLoc = wave[np.where(flux == peak)[0][0]]
peakLocB = findBin(peakLoc, wave)
hm = (peak-valley) / 2 + valley
leftUp = wave[0]
leftDown = wave[peakLocB]
rightUp = wave[-1]
rightDown = wave[peakLocB]
# First search for the half max to the left of the line
for i in range(peakLocB):
# First search going up the line
if flux[i] < hm < flux[i+1]:
leftUp = (wave[i] + wave[i+1])/2
# Then going down the line
if flux[peakLocB-i-1] < hm < flux[peakLocB-i]:
leftDown = (wave[peakLocB-i-1] + wave[peakLocB-i])/2
# Then take the average which will account for any double peaks/noise in the spectrum
left = (leftUp + leftDown)/2
# And now to the right
maxSize = len(wave) - 1
for i in range(maxSize - peakLocB):
# Go up
if flux[peakLocB + i + 1] < hm < flux[peakLocB + i]:
rightDown = (wave[peakLocB + i] + wave[peakLocB + i + 1])/2
# Go down
if flux[maxSize-i] < hm < flux[maxSize-i-1]:
rightUp = (wave[maxSize-i] + wave[maxSize-i-1])/2
right = (rightUp + rightDown)/2
# Now calculate the velocity
# km/s
c = 299792.458
widthObs = (right-left)
widthT = pow(widthObs**2 - res**2,0.5)/2
zLeft = -widthT/peakLoc
zRight = widthT/peakLoc
zComb = (1+zRight)/(1+zLeft)-1
vel = c*((1+zComb)**2-1)/((1+zComb)**2+1)
return vel
# --------------------------------------------------- #
# ---------------------- lineVar -------------------- #
# --------------------------------------------------- #
# Takes an input spectrum and calculate the velocity #
# dispersion of the emission line. It will search #
# over the entire provided wavelength window so just #
# include the relevant region of the spectrum. #
# --------------------------------------------------- #
def lineVar(wave, flux, res):
length = len(wave)
peak = max(flux)
peakLoc = wave[np.where(flux == peak)[0][0]]
# Calculate velocity dispersion following equation written in Peterson 2004, the three following constants
# correspond to the main terms in that equation.
Pdl = 0
lPdl = 0
l2Pdl = 0
for i in range(length):
Pdl += flux[i]
lPdl += flux[i] * wave[i]
l2Pdl += flux[i] * pow(wave[i], 2)
lambda0 = lPdl / Pdl
lambda2 = l2Pdl / Pdl
lambda02 = pow(lambda0, 2)
linevar = lambda2 - lambda02
sigma = linevar ** 0.5
c = 299792.458
sigmaT = pow(sigma**2 - res**2, 0.5)
left = peakLoc - sigmaT / 2
right = peakLoc + sigmaT / 2
zLeft = (left - peakLoc) / peakLoc
zRight = (right - peakLoc) / peakLoc
#redshift from lambda_l to lambda_r
zComb = (1 + zRight) / (1 + zLeft) - 1
vel = c * ((1 + zComb) ** 2 - 1) / ((1 + zComb) ** 2 + 1)
return vel
# --------------------------------------------------- #
# --------------- exponential_smooth ---------------- #
# --------------------------------------------------- #
# Function to apply an exponential smoothing kernel #
# to the data. Written by <NAME>. #
# --------------------------------------------------- #
def exponential_smooth(fluxes):
number = int(fluxes.size/fluxes.shape[0])
search_pixels = 5
decay = 0.9
window = np.arange(-search_pixels, search_pixels + 1)
weights = decay ** np.abs(window)
weights /= np.sum(weights)
if (number == 1):
flux = fluxes[:]
flux[:] = np.convolve(flux, weights, mode='same')
else:
for epoch in range(fluxes.shape[1]):
flux = fluxes[:, epoch]
flux[:] = np.convolve(flux, weights, mode='same')
# --------------------------------------------------- #
# -------------------- meanSpec --------------------- #
# --------------------------------------------------- #
# Calculates the mean of multiple spectra as well as #
# the corresponding variance spectrum. #
# --------------------------------------------------- #
def meanSpec(flux, variance):
length = len(flux[:,0])
meanFlux = np.zeros(length)
meanVar = np.zeros(length)
for i in range(length):
meanFlux[i] = np.nanmean(flux[i,:])
meanVar[i] = np.nanmean(variance[i,:])
return meanFlux, meanVar
# --------------------------------------------------- #
# -------------------- rmsSpec ---------------------- #
# --------------------------------------------------- #
# Calculates the RMS of the inputted spectra. Will #
# expect fluxes in [wavelength, epoch] format. An #
# exponential smoothing function is applied to the #
# data as a first and last step to mitigate some of #
# the noise. #
# --------------------------------------------------- #
def rmsSpec(flux, variance):
# smooth the input spectra
exponential_smooth(flux)
length = len(flux[:, 0])
epochs = len(flux[0, :])
# Calculate the RMS spectrum, variance propagated through but not used later
mean, meanVar = meanSpec(flux, variance)
rms = np.zeros(length)
rmsVar = np.zeros(length)
rmsVar2 = np.zeros(length)
for b in range(length):
for e in range(epochs):
rms[b] += (flux[b, e] - mean[b]) ** 2
rmsVar2[b] += 4 * rms[b] * (variance[b, e] + meanVar[b])
rms[b] = (rms[b] / (epochs - 1)) ** 0.5
rmsVar2[b] = rmsVar2[b] / ((epochs - 1) ** 2)
rmsVar[b] = rmsVar2[b] * (0.5 / rms[b]) ** 2
# smooth the final RMS spectrum
exponential_smooth(rms)
return rms, rmsVar
# -------------------------------------------------- #
# -------------------- lineLC ---------------------- #
# -------------------------------------------------- #
# Create emission line light curves by integrating #
# the emission lines after local continuum #
# subtraction. The uncertainties due to the variance#
# of the spectrum and the continuum subtraction is #
# performed through bootstrap resampling. This is #
# done for every emission line from the provided list#
# that is present in the spectrum. #
# -------------------------------------------------- #
def lineLC(dates, lineName, availLines, lineInt, contWinMin, contWinMax, contWinBSMin, contWinBSMax, wavelength,
origFluxes, origVariances, fluxCoadd, numEpochs, scale, z, strapNum, outLoc, source, makeFig, makeFigEpoch):
if makeFig == True:
# Define figure and axis for light curves of all available emission lines
lineAxis = [lineName[i] for i in range(len(lineName)) if availLines[i] == True]
fig_spec, ax_spec = ozplot.plot_share_x(len(lineAxis), source, "Date (MJD)", lineAxis)
for l in range(len(lineName)):
if availLines[l] == True:
line = lineName[l]
# Copy the flux/variance vectors so you have an uncontinuum subtracted version to use for other lines
fluxes = np.copy(origFluxes)
variances = np.copy(origVariances)
# define some variables for line/continuum windows in observed frame
contMin = np.array(contWinMin[line]) * (1 + z)
contMax = np.array(contWinMax[line]) * (1 + z)
contMinBS = np.array(contWinBSMin[line]) * (1 + z)
contMaxBS = np.array(contWinBSMax[line]) * (1 + z)
# similar for the line integration window but I want the wavelength bin number, not just the wavelength
lineMin = findBin(lineInt[line][0] * (1 + z), wavelength)
lineMax = findBin(lineInt[line][1] * (1 + z), wavelength)
# Perform the continuum subtraction
cont_fit_reject(wavelength, fluxes, variances, contMin, contMax)
lc_mag = np.zeros(numEpochs)
lc_mag_sigma = np.zeros(numEpochs)
lc_flux = np.zeros(numEpochs)
lc_flux_sigma = np.zeros(numEpochs)
total_error = np.zeros(numEpochs)
# Calculate the pivot wavelength associated with each line window
pivotLC = pow(np.nansum(wavelength[lineMin:lineMax]) / np.nansum(1 / wavelength[lineMin:lineMax]), 0.5)
# Calculate magnitudes and fluxes for each line
for epoch in range(numEpochs):
# first calculate magnitudes, save these if you want to compare this instead of fluxes
# Here the transmission function is 1 for all wavelengths within the integration window.
lc_mag[epoch], lc_mag_sigma[epoch] = computeABmag(np.ones(len(wavelength[lineMin:lineMax])),
wavelength[lineMin:lineMax],
wavelength[lineMin:lineMax],
fluxes[lineMin:lineMax, epoch] * scale,
variances[lineMin:lineMax, epoch] * pow(scale, 2))
# Now convert to flux, this is what is saved. Note: all fluxes here are actually flux densities
# This uncertainty just considers the variance spectrum, we will take everything in the next step
lc_flux[epoch], lc_flux_sigma[epoch] = magToFlux(lc_mag[epoch], lc_mag_sigma[epoch] ** 0.5, pivotLC)
total_error[epoch] = uncertainty_cont(wavelength, origFluxes[:, epoch], origVariances[:, epoch],
strapNum, z, [lineMin, lineMax], pivotLC, contMinBS,
contMaxBS, contMin[1] - contMin[0], scale)
if makeFigEpoch == True:
# Save figures showing spectrum before/after continuum subtraction for each epoch and line
fig_epoch, ax_epoch = ozplot.plot_share_x(2, source + " epoch " + str(epoch), "Wavelength ($\AA$)",
["Before", " After"], [wavelength[0], wavelength[-1]])
for p in range(2):
ax_epoch[p].axvspan(contMinBS[0], contMinBS[1], color='mediumblue', alpha=0.3)
ax_epoch[p].axvspan(contMaxBS[0], contMaxBS[1], color='mediumblue', alpha=0.3)
ax_epoch[p].axvspan(contMin[0], contMin[1], color='mediumblue', alpha=0.5)
ax_epoch[p].axvspan(contMax[0], contMax[1], color='mediumblue', alpha=0.5)
ax_epoch[p].axvspan(wavelength[lineMin], wavelength[lineMax], color='forestgreen', alpha=0.3)
ax_epoch[0].plot(wavelength, origFluxes[:, epoch], color='black')
ax_epoch[1].plot(wavelength, fluxes[:, epoch], color='black')
fig_epoch.savefig(outLoc + source + "_" + lineName[l] + "_epoch_" + str(epoch) + ".png")
plt.close(fig_epoch)
# Scale the line fluxes as with the photometry
lc_flux = lc_flux / scale
total_error = total_error / scale
# Save the data as a light curve with filename outLoc + source + _ + line + .txt
outputLC(dates, lc_flux, total_error, line, outLoc, source)
if makeFig == True:
# plot the light curve on the subplot defined above. First get the index for the axis associated with
# the line being analyzed.
lbin = lineAxis.index(line)
ax_spec[lbin].errorbar(dates, lc_flux, yerr=total_error, fmt='o', color='black')
# make a plot to show the continuum subtraction regions on the coadded spectrum
fig_coadd, ax_coadd = ozplot.plot_share_x(1, source, "Wavelength ($\AA$)", ["Total Coadded Flux (" +
str(scale) +
" erg/s/cm$^2$/$\AA$)"],
[wavelength[0], wavelength[-1]])
ax_coadd[0].axvspan(contMinBS[0], contMinBS[1], color='mediumblue', alpha=0.3)
ax_coadd[0].axvspan(contMaxBS[0], contMaxBS[1], color='mediumblue', alpha=0.3)
ax_coadd[0].axvspan(contMin[0], contMin[1], color='mediumblue', alpha=0.5)
ax_coadd[0].axvspan(contMax[0], contMax[1], color='mediumblue', alpha=0.5)
ax_coadd[0].axvspan(wavelength[lineMin], wavelength[lineMax], color='forestgreen', alpha=0.3)
ax_coadd[0].plot(wavelength, fluxCoadd, color='black')
fig_coadd.savefig(outLoc + source + "_" + lineName[l] + "_coadd.png")
plt.close(fig_coadd)
# Once all the light curves are plotted save the figure as outLoc + source + "_spec.png"
if makeFig == True:
fig_spec.savefig(outLoc + source + "_spec.png")
return
# -------------------------------------------------- #
# ------------------ makePhotoLC --------------------#
# -------------------------------------------------- #
# Makes light curves by applying photometric filters #
# to a series of spectral data. The data is saved #
# as fluxes. #
# -------------------------------------------------- #
def makePhotoLC(dates, bandName, bandPivot, filters, wavelength, origFluxes, origVariances, numEpochs, scale, outLoc,
source, makeFig):
filterCurves = readFilterCurves(bandName, filters)
if makeFig == True:
# Define figure and axis for light curves of all available emission lines
fig_phot, ax_phot = ozplot.plot_share_x(len(bandName), source, "Date (MJD)", bandName)
for b in range(len(bandName)):
mags = np.zeros(numEpochs)
mags_var = np.zeros(numEpochs)
flux = np.zeros(numEpochs)
flux_err = np.zeros(numEpochs)
for e in range(numEpochs):
# Calculate the magntiude given the transmission function provided
mags[e], mags_var[e] = computeABmag(filterCurves[bandName[b]].trans, filterCurves[bandName[b]].wave,
wavelength, origFluxes[:, e] * scale,
origVariances[:, e] * pow(scale, 2))
# Then convert to fluxes
flux[e], flux_err[e] = magToFlux(mags[e], mags_var[e] ** 0.5, bandPivot[b])
# Scale the fluxes
flux = flux / scale
flux_err = flux_err / scale
# Save the data as a light curve with filename outLoc + source + _ + calc_bandName + .txt
outputLC(dates, flux, flux_err, 'calc_' + bandName[b], outLoc, source)
if makeFig == True:
# plot the light curve on the subplot defined above.
ax_phot[b].errorbar(dates, flux, yerr=flux_err, fmt='o', color='black')
# Once all the light curves are plotted save the figure as outLoc + source + "_makePhot.png"
if makeFig == True:
fig_phot.savefig(outLoc + source + "_makePhot.png")
return
# -------------------------------------------------- #
# ------------------- calcWidth ---------------------#
# -------------------------------------------------- #
# Calculates emission line width (FWHM and velocity #
# dispersion) using the mean and RMS spectra. If #
# possible calculates the BH mass using the R-L #
# relationship. The data is saved to a text file. #
# -------------------------------------------------- #
def calcWidth(wavelength, lineName, lineLoc, availLines, lineInt, lumLoc, contWinMin, contWinMax, contWinBSMin,
contWinBSMax, origFluxes, origVariances, origFluxCoadd, origVarCoadd, z, strapNum, scale, outLoc, source,
makeFig, calcBH):
# open a file to save the data to - outLoc + source + _vel.txt
out = open(outLoc + source + "_vel_and_mass.txt", 'w')
# Type (Mean/RMS), Measure (FWHM, Vel Disp)
out.write("Line Type Measure Vel Vel_Err Mass Lag Lag_Err_Min Lag_Err_Max Mass_Err_Min, Mass_Err_Max\n")
# Convert wavelength vector to rest frame
wave = wavelength/(1+z)
for l in range(len(lineName)):
if availLines[l] == True:
line = lineName[l]
# If calcBH == True estimate BH mass from the R-L relationship. Here I will calculate the lag. If you want
# to use the measured lag feed that in here. If the luminosity needed isn't in the spectroscopic window
# I will just give nan for the black hole mass. The luminosity is determined from the coadded flux
if calcBH == True:
lum, lumerr = luminosity(wavelength, origFluxCoadd, origVarCoadd, z, lumLoc[l]*(1+z), strapNum, scale)
if np.isnan(lum) == True:
lag = np.nan
lag_err_min = np.nan
lag_err_max = np.nan
elif line == 'CIV':
lag, lag_err_max, lag_err_min = RL_CIV(lum, lumerr)
elif line == 'MgII':
lag, lag_err_max, lag_err_min = RL_MgII(lum, lumerr)
elif line == 'Hbeta':
lag, lag_err_max, lag_err_min = RL_Hbeta(lum, lumerr)
else:
lag = np.nan
lag_err_min = np.nan
lag_err_max = np.nan
# Calculate the resolution of the spectrograph at the specified wavelength
res = findRes(lineLoc[l], z)
# define some variables for line/continuum windows in rest frame
contMin = np.array(contWinMin[line])
contMax = np.array(contWinMax[line])
contMinBS = np.array(contWinBSMin[line])
contMaxBS = np.array(contWinBSMax[line])
# similar for the line integration window but I want the wavelength bin number, not just the wavelength
lineMin = findBin(lineInt[line][0], wave)
lineMax = findBin(lineInt[line][1], wave)
fluxes = np.copy(origFluxes)
variances = np.copy(origVariances)
fluxCoadd = np.copy(origFluxCoadd)
varCoadd = np.copy(origVarCoadd)
# Perform the continuum subtraction on epochs and coadd
cont_fit_reject(wave, fluxes, variances, contMin, contMax)
cont_fit_reject(wave, fluxCoadd, varCoadd, contMin, contMax)
# First look at the mean spectrum, let's smooth it
# FWHM
vel_mean_fwhm = fwhm(wave[lineMin:lineMax], fluxCoadd[lineMin:lineMax], res)
err_mean_fwhm = uncertainty_cont(wave, origFluxCoadd, origVarCoadd, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='fwhm',
flag='mean', res=res)
# Sigma
vel_mean_sigma = lineVar(wave[lineMin:lineMax], fluxCoadd[lineMin:lineMax], res)
err_mean_sigma = uncertainty_cont(wave, origFluxCoadd, origVarCoadd, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='sigma',
flag='mean', res=res)
# Now look at the RMS spectrum
rms, rms_var = rmsSpec(fluxes, variances)
vel_rms_fwhm = fwhm(wave[lineMin:lineMax], rms[lineMin:lineMax], res)
err_rms_fwhm = uncertainty_cont(wave, origFluxes, origVariances, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='fwhm',
flag='rms', res=res)
# Sigma
vel_rms_sigma = fwhm(wave[lineMin:lineMax], rms[lineMin:lineMax], res)
err_rms_sigma = uncertainty_cont(wave, origFluxes, origVariances, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='sigma',
flag='rms', res=res)
if calcBH == True and np.isnan(lag) == False:
# Calculate BH mass for all 4 line measurements
mass_mean_fwhm, mass_min_mean_fwhm, mass_max_mean_fwhm = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_mean_fwhm, err_mean_fwhm)
mass_mean_sigma, mass_min_mean_sigma, mass_max_mean_sigma = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_mean_sigma, err_mean_sigma)
mass_rms_fwhm, mass_min_rms_fwhm, mass_max_rms_fwhm = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_rms_fwhm, err_rms_fwhm)
mass_rms_sigma, mass_min_rms_sigma, mass_max_rms_sigma = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_rms_sigma, err_rms_sigma)
else:
mass_mean_fwhm, mass_min_mean_fwhm, mass_max_mean_fwhm = np.nan, np.nan, np.nan
mass_mean_sigma, mass_min_mean_sigma, mass_max_mean_sigma = np.nan, np.nan, np.nan
mass_rms_fwhm, mass_min_rms_fwhm, mass_max_rms_fwhm = np.nan, np.nan, np.nan
mass_rms_sigma, mass_min_rms_sigma, mass_max_rms_sigma = np.nan, np.nan, np.nan
out.write(line + " MEAN FWHM %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_mean_fwhm, err_mean_fwhm, lag,
lag_err_min, lag_err_max,
mass_mean_fwhm, mass_min_mean_fwhm,
mass_max_mean_fwhm))
out.write(line + " MEAN Sigma %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_mean_sigma, err_mean_sigma, lag,
lag_err_min, lag_err_max,
mass_mean_sigma, mass_min_mean_sigma,
mass_max_mean_sigma))
out.write(line + " RMS FWHM %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_rms_fwhm, err_rms_fwhm, lag,
lag_err_min, lag_err_max, mass_rms_fwhm,
mass_min_rms_fwhm, mass_max_rms_fwhm))
out.write(line + " RMS Sigma %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_rms_sigma, err_rms_sigma,
lag,lag_err_min, lag_err_max,
mass_rms_sigma, mass_min_rms_sigma,
mass_max_rms_sigma))
if makeFig == True:
# Define figure and axis for mean and rms spectrum
fig_width, ax_width = ozplot.plot_share_x(2, source, "Wavelength ($\AA$)", ["Mean Flux", "RMS Flux"],
[contMin[1], contMax[0]])
ax_width[0].plot(wave, fluxCoadd, color='black')
ax_width[0].axvline(wave[lineMin], color='forestgreen')
ax_width[0].axvline(wave[lineMax], color='forestgreen')
ax_width[1].plot(wave, rms, color='black')
ax_width[1].axvline(wave[lineMin], color='forestgreen')
ax_width[1].axvline(wave[lineMax], color='forestgreen')
fig_width.savefig(outLoc + source + "_" + line + "_width.png")
plt.close(fig_width)
out.close()
return
# -------------------------------------------------- #
# -------------------- findRes ----------------------#
# -------------------------------------------------- #
# The line width measurements are dependent on the #
# resolution of the spectrograph. The OzDES spectra #
# are made up of two arms of AAOmega with different #
# resolutions. This function will find the #
# resolution at the emission line in question. You #
# will need to modify this if you are using a #
# different spectrograph. Input rest frame emission #
# line wavelength and convert. #
# -------------------------------------------------- #
def findRes(line, z):
#Use OzDES data - splice 5700 and resolution for red/blue arms
splice = 5700
resO = [1600, 1490] #blue/red arm of spectrograph resolution
obsLine = line*(1+z)
if obsLine < splice:
dL = obsLine/resO[0]
else:
dL = obsLine/resO[1]
return dL
# --------------------------------------------------- #
# ---------------- comoving_distance ---------------- #
# --------------------------------------------------- #
# Function to calculate the comoving distance at a #
# given redshift. Written by <NAME>. #
# --------------------------------------------------- #
def comoving_distance(z):
# returns the comoving distance in Mpc
# c in km/s
c = 299792.458
# H0 in km/s/Mpc
H0 = 70.0
f_E = lambda x: 1.0 / np.sqrt(0.3 * (1 + x) ** 3 + 0.7)
d_C = c / H0 * fixed_quad(f_E, 0.0, z, n=500)[0]
return d_C
# --------------------------------------------------- #
# ------------------- luminosity -------------------- #
# --------------------------------------------------- #
# Calculates the lambda L_lambda luminosity for the #
# specified wavelength and gives uncertainty via #
# bootstrapping. If the luminosity is not present in #
# the spectrum return nan. #
# --------------------------------------------------- #
def luminosity(wavelength, flux, variance, z, lum, strapNum, scale):
# Check if the luminosity windows used (lum +/- 10 A in observed frame) are present in the spectrum. If not return
# nan for the luminosity
if wavelength[0] < lum - 10 and lum + 10 < wavelength[-1]:
lumBin = findBin(lum, wavelength)
# calculate the mean flux around the specified luminosity
fluxV = np.nanmean(flux[lumBin-2:lumBin+2]) * scale
# calculate the range of fluxes based on bootstrapping
flux_std = Lum_uncertainty(wavelength, flux, variance, lum, strapNum, scale)
# scale by luminosity - we want lambda L_lambda
fluxV = fluxV*lum
flux_std = flux_std*lum
# flux should be in erg/s/cm^2 the above statement gets rid of the angstroms
d_C = comoving_distance(z)
d_L = (1.0 + z) * d_C
# convert d_L from Mpc to cm
d_L *= 3.0857E24
# scale factor used for uncertainty propogation
scalefact = 4. * np.pi * d_L ** 2
L = fluxV * scalefact
L_std = flux_std * scalefact
# calculate log Luminosity and error
lgL = np.log10(L)
err = lgL- np.log10(L-L_std)
else:
lgL = np.nan
err = np.nan
return lgL, err
# --------------------------------------------------- #
# ---------------- Lum_uncertainty ------------------ #
# --------------------------------------------------- #
# Calculates the uncertainty due to flux resampling #
# and shifting luminosity window. #
# --------------------------------------------------- #
def Lum_uncertainty(wavelength, flux, variance, lum, strapNum, scale):
# Performs bootstrap resampling in the range of potentially clean continuum to determine
# 10 Angstroms on either size of luminosity
nBins = len(wavelength)
winLim = [findBin(lum-10, wavelength), findBin(lum+10, wavelength)]
# vector of wavelengths within winLim spaced by 1 Angstrom
winVect = np.arange(winLim[0], winLim[1]+1, 1)
# Array of random continuum window starting points
randVect = len(winVect)*np.random.rand(strapNum)
randVect = randVect.astype(int)
fluxes = np.zeros(strapNum)
# For each iteration do flux resampling and calculate the line flux and shift window slightly
for i in range(strapNum):
varC = np.copy(variance)
fluxC = np.zeros(nBins)
for w in range(nBins):
err = varC[w] ** 0.5
fluxC[w] = np.random.normal(flux[w], err)
fluxes[i] = np.nanmean(fluxC[winVect[randVect[i]] - 2:winVect[randVect[i]] + 2]) * scale
return np.nanstd(fluxes)
# --------------------------------------------------- #
# -------------------- RL_CIV ----------------------- #
# --------------------------------------------------- #
# Radius Luminosity using CIV line and L1350 from #
# Hoormann et al 2019. L and L_std are log_10. #
# --------------------------------------------------- #
def RL_CIV(L, L_std):
# From Hoormann et al 2019 using L1350
lag = pow(10, 0.81 + 0.47 * (L - 44))
lag_err_p = abs(pow(10, (0.81 + 0.09) + (0.47 + 0.03) * ((L + L_std) - 44)) - lag)
lag_err_m = abs(pow(10, (0.81 - 0.09) + (0.47 - 0.03) * ((L - L_std) - 44)) - lag)
return lag, lag_err_p, lag_err_m
# --------------------------------------------------- #
# -------------------- RL_MgII ---------------------- #
# --------------------------------------------------- #
# Radius Luminosity using MgII line and L3000 from #
# Trakhenbrot & Netzer 2012 best fit BCES method. #
# L and L_std are log_10. #
# --------------------------------------------------- #
def RL_MgII(L, L_std):
lag = pow(10, 1.34 + 0.615 * (L - 44))
lag_err_p = abs(pow(10, (1.34 + 0.019) + (0.615 + 0.014) * ((L + L_std) - 44)) - lag)
lag_err_m = abs(pow(10, (1.34 - 0.019) + (0.615 - 0.014) * ((L - L_std) - 44)) - lag)
return lag, lag_err_p, lag_err_m
# --------------------------------------------------- #
# -------------------- RL_Hbeta --------------------- #
# --------------------------------------------------- #
# Radius Luminosity using Hbeta line and L5100 from #
# Bentz et al 2013. L and L_std are log_10. #
# --------------------------------------------------- #
def RL_Hbeta(L, L_std):
lag = pow(10, 1.527 + 0.533 * (L - 44))
lag_err_p = abs(pow(10, (1.527 + 0.031) + (0.533 + 0.035) * ((L + L_std) - 44)) - lag)
lag_err_m = abs(pow(10, (1.527 - 0.031) + (0.533 - 0.033) * ((L - L_std) - 44)) - lag)
return lag, lag_err_p, lag_err_m
# --------------------------------------------------- #
# ------------------ blackHoleMass ------------------ #
# --------------------------------------------------- #
# Given a lag and velocity calculate the black hole #
# mass. Given in units of 10^9 Solar Masses. #
# --------------------------------------------------- #
def blackHoleMass(lag, lErrMin, lErrMax, velocity, vErr):
# convert everything to cgs
G = 6.67*10**-11
c = 2.998*10**8
Msun = 1.989*10**30
lag = lag*86400
lErrMin = lErrMin*86400
lErrMax = lErrMax*86400
velocity = velocity*1000
vErr = vErr*1000
# Define f factor
f = 4.47
ferr = 1.25 #Woo et al 2014
# Calculate Mass
mass = f*(pow(velocity, 2)*c*lag/G)/Msun/10**9
sigmaMin = mass*pow((ferr/f)**2 + (2*vErr/velocity)**2 + (lErrMin/lag)**2 ,0.5)
sigmaMax = mass*pow((ferr/f)**2 + (2*vErr/velocity)**2 + (lErrMax/lag)**2 ,0.5)
return mass, sigmaMin, sigmaMax
# -------------------------------------------------- #
# Modified from a function originally provided by #
# <NAME> #
# -------------------------------------------------- #
# ------------------ Spectrumv18 ------------------- #
# -------------------------------------------------- #
# Read in spectral data assuming the format from v18 #
# of the OzDES reduction pipeline. Modify if your #
# input data is stored differently #
# -------------------------------------------------- #
class Spectrumv18(object):
def __init__(self, filepath=None):
assert filepath is not None
self.filepath = filepath
try:
self.data = fits.open(filepath)
except IOError:
print("Error: file {0} could not be found".format(filepath))
exit()
data = fits.open(filepath)
self.combinedFlux = data[0]
self.combinedVariance = data[1]
self.combinedPixels = data[2]
self.numEpochs = int((np.size(data) - 3) / 3)
self.field = self.data[3].header['SOURCEF'][19:21]
self.cdelt1 = self.combinedFlux.header['cdelt1'] # Wavelength interval between subsequent pixels
self.crpix1 = self.combinedFlux.header['crpix1']
self.crval1 = self.combinedFlux.header['crval1']
self.n_pix = self.combinedFlux.header['NAXIS1']
self.RA = self.combinedFlux.header['RA']
self.DEC = self.combinedFlux.header['DEC']
self.fluxCoadd = self.combinedFlux.data
self.varianceCoadd = self.combinedVariance.data
self.badpixCoadd = self.combinedPixels.data
self._wavelength = None
self._flux = None
self._variance = None
self._badpix = None
self._dates = None
self._run = None
self._ext = None
self._qc = None
self._exposed = None
@property
def wavelength(self):
"""Define wavelength solution."""
if getattr(self, '_wavelength', None) is None:
wave = ((np.arange(self.n_pix) - self.crpix1) * self.cdelt1) + self.crval1
self._wavelength = wave
return self._wavelength
@property
def flux(self):
if getattr(self, '_flux', None) is None:
self._flux = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._flux[:, i] = self.data[i * 3 + 3].data
return self._flux
@property
def variance(self):
if getattr(self, '_variance', None) is None:
self._variance = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._variance[:, i] = self.data[i * 3 + 4].data
return self._variance
@property
def badpix(self):
if getattr(self, '_badpix', None) is None:
self._badpix = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._badpix[:, i] = self.data[i * 3 + 5].data
return self._badpix
@property
def dates(self):
if getattr(self, '_dates', None) is None:
self._dates = np.zeros(self.numEpochs, dtype=float)
for i in range(self.numEpochs):
self._dates[i] = round(self.data[i * 3 + 3].header['UTMJD'],3)
# this give Modified Julian Date (UTC) that observation was taken
return self._dates
@property
def ext(self):
if getattr(self, '_ext', None) is None:
self._ext = []
for i in range(self.numEpochs):
self._ext.append(i * 3 + 3) # gives the extension in original fits file
return self._ext
@property
def run(self):
if getattr(self, '_run', None) is None:
self._run = []
for i in range(self.numEpochs):
source = self.data[i * 3 + 3].header['SOURCEF']
self._run.append(int(source[3:6])) # this gives the run number of the observation
return self._run
@property
def qc(self):
if getattr(self, '_qc', None) is None:
self._qc = []
for i in range(self.numEpochs):
self._qc.append(self.data[i * 3 + 3].header['QC'])
# this tell you if there were any problems with the spectra that need to be masked out
return self._qc
@property
def exposed(self):
if getattr(self, '_exposed', None) is None:
self._exposed = []
for i in range(self.numEpochs):
self._exposed.append(self.data[i * 3 + 3].header['EXPOSED'])
# this will give you the exposure time of each observation
return self._exposed
# -------------------------------------------------- #
# ------------------- calibSpec -------------------- #
# -------------------------------------------------- #
# This function does the bulk of the work. It will #
# 1) determine extensions which can be calibrated #
# 2) calculate the scale factors #
# 3) calculate the warping function #
# 4) output new fits file with scaled spectra #
# -------------------------------------------------- #
def calibSpec(obj_name, spectra, photo, spectraName, photoName, outBase, bands, filters, centers, plotFlag, coaddFlag,
redshift):
# Assumes scaling given is of the form
# gScale = scaling[0,:] gError = scaling[3,:]
# rScale = scaling[1,:] rError = scaling[4,:]
# iScale = scaling[2,:] iError = scaling[5,:]
# inCoaddWeather = scaling[6,:]
# inCoaddPhoto = scaling[7,:]
# gMag = scaling[8,:] gMagErr = scaling[9,:]
# rMag = scaling[10,:] rMagErr = scaling[11,:]
# iMag = scaling[12,:] iMagErr = scaling[13,:]
# First we decide which extensions are worth scaling
extensions, noPhotometry, badQC = prevent_Excess(spectra, photo, bands)
# Then we calculate the scale factors
nevermind, scaling = scaling_Matrix(spectra, extensions, badQC, noPhotometry, photo, bands, filters)
# Remove last minute trouble makers
extensions = [e for e in extensions if e not in nevermind]
badQC = badQC + nevermind
# And finally warp the data
for s in extensions:
# scale the spectra
if plotFlag != False:
plotName = plotFlag + obj_name + "_" + str(s)
else:
plotName = False
spectra.flux[:, s], spectra.variance[:, s] = warp_spectra(scaling[0:3, s], scaling[3:6, s], spectra.flux[:, s],
spectra.variance[:, s], spectra.wavelength, centers,
plotName)
if coaddFlag == False:
create_output_single(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName,
outBase, redshift)
elif coaddFlag in ['Run', 'Date']:
coadd_output(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName, outBase,
plotFlag, coaddFlag, redshift)
else:
print("What do you want me to do with this data? Please specify output type.")
return
# -------------------------------------------------- #
# ---------------- prevent_Excess ------------------ #
# -------------------------------------------------- #
# This function removes extensions from the list to #
# calibrate because of insufficient photometric data #
# or bad quality flags #
# -------------------------------------------------- #
def prevent_Excess(spectra, photo, bands):
# First, find the min/max date for which we have photometry taken on each side of the spectroscopic observation
# This will be done by finding the highest date for which we have photometry in each band
# and taking the max/min of those values
# This is done because we perform a linear interpolation between photometric data points to estimate the magnitudes
# observed at the specific time of the spectroscopic observation
maxPhot = np.zeros(3)
for e in range(len(photo['Date'][:])):
if photo['Band'][e] == bands[0]:
if photo['Date'][e] > maxPhot[0]:
maxPhot[0] = photo['Date'][e]
if photo['Band'][e] == bands[1]:
if photo['Date'][e] > maxPhot[1]:
maxPhot[1] = photo['Date'][e]
if photo['Band'][e] == bands[2]:
if photo['Date'][e] > maxPhot[2]:
maxPhot[2] = photo['Date'][e]
photLim = min(maxPhot)
minPhot = np.array([100000, 100000, 100000])
for e in range(len(photo['Date'][:])):
if photo['Band'][e] == bands[0]:
if photo['Date'][e] < minPhot[0]:
minPhot[0] = photo['Date'][e]
if photo['Band'][e] == bands[1]:
if photo['Date'][e] < minPhot[1]:
minPhot[1] = photo['Date'][e]
if photo['Band'][e] == bands[2]:
if photo['Date'][e] < minPhot[2]:
minPhot[2] = photo['Date'][e]
photLimMin = max(minPhot)
noPhotometry = []
badQC = []
allowedQC = ['ok', 'backup']
for s in range(spectra.numEpochs):
# Remove data with insufficient photometry
if spectra.dates[s] > photLim:
noPhotometry.append(s)
if spectra.dates[s] < photLimMin:
noPhotometry.append(s)
# Only allow spectra with quality flags 'ok' and 'backup'
if spectra.qc[s] not in allowedQC:
badQC.append(s)
extensions = []
# Make a list of extensions which need to be analyzed
for s in range(spectra.numEpochs):
if s not in noPhotometry and s not in badQC:
extensions.append(s)
return extensions, noPhotometry, badQC
# -------------------------------------------------- #
# ---------------- scaling_Matrix ------------------ #
# -------------------------------------------------- #
# finds the nearest photometry and interpolates mags #
# to find values at the time of the spectroscopic #
# observations. Calculates the mag that would be #
# observed from the spectra and calculates the scale #
# factor to bring them into agreement. Saves the #
# data in the scaling matrix. #
# -------------------------------------------------- #
def scaling_Matrix(spectra, extensions, badQC, noPhotometry, photo, bands, filters):
# scale factors for each extension saved in the following form
# gScale = scaling[0,:] gError = scaling[3,:]
# rScale = scaling[1,:] rError = scaling[4,:]
# iScale = scaling[2,:] iError = scaling[5,:]
# inCoaddWeather = scaling[6,:]
# inCoaddPhoto = scaling[7,:]
# gMag = scaling[8,:] gMagError = scaling[9,:] (interpolated from neighbouring observations)
# rMag = scaling[10,:] rMagError = scaling[11,:]
# iMag = scaling[12,:] iMagError = scaling[13,:]
scaling = np.zeros((14, spectra.numEpochs))
# Judge goodness of spectra
for e in range(spectra.numEpochs):
if e in badQC:
scaling[6, e] = False
else:
scaling[6, e] = True
if e in noPhotometry:
scaling[7, e] = False
else:
scaling[7, e] = True
ozdesPhoto = np.zeros((3, spectra.numEpochs))
desPhoto = np.zeros((3, spectra.numEpochs))
ozdesPhotoU = np.zeros((3, spectra.numEpochs))
desPhotoU = np.zeros((3, spectra.numEpochs))
filterCurves = readFilterCurves(bands, filters)
nevermind = []
for e in extensions:
# Find OzDES photometry
ozdesPhoto[0, e], ozdesPhotoU[0, e] = computeABmag(filterCurves[bands[0]].trans, filterCurves[bands[0]].wave,
spectra.wavelength, spectra.flux[:, e],
spectra.variance[:, e])
ozdesPhoto[1, e], ozdesPhotoU[1, e] = computeABmag(filterCurves[bands[1]].trans, filterCurves[bands[1]].wave,
spectra.wavelength, spectra.flux[:, e],
spectra.variance[:, e])
ozdesPhoto[2, e], ozdesPhotoU[2, e] = computeABmag(filterCurves[bands[2]].trans, filterCurves[bands[2]].wave,
spectra.wavelength, spectra.flux[:, e],
spectra.variance[:, e])
# Sometimes the total flux in the band goes zero and this obviously creates issues further down the line and
# is most noticeable when the calculated magnitude is nan. Sometimes it is because the data is very noisy
# or the occasional negative spectrum is a known artifact of the data, more common in early OzDES runs. In the
# case where the observation doesn't get cut based on quality flag it will start getting ignored here. The runs
# ignored will eventually be saved with the badQC extensions.
if np.isnan(ozdesPhoto[:, e]).any() == True:
nevermind.append(e)
# Find DES photometry
desPhoto[:, e], desPhotoU[:, e] = des_photo(photo, spectra.dates[e], bands)
scaling[8, e] = desPhoto[0, e]
scaling[10, e] = desPhoto[1, e]
scaling[12, e] = desPhoto[2, e]
scaling[9, e] = desPhotoU[0, e]
scaling[11, e] = desPhotoU[1, e]
scaling[13, e] = desPhotoU[2, e]
# Find Scale Factor
scaling[0, e], scaling[3, e] = scale_factors(desPhoto[0, e] - ozdesPhoto[0, e],
desPhotoU[0, e] + ozdesPhotoU[0, e])
scaling[1, e], scaling[4, e] = scale_factors(desPhoto[1, e] - ozdesPhoto[1, e],
desPhotoU[1, e] + ozdesPhotoU[1, e])
scaling[2, e], scaling[5, e] = scale_factors(desPhoto[2, e] - ozdesPhoto[2, e],
desPhotoU[2, e] + ozdesPhotoU[2, e])
return nevermind, scaling
# -------------------------------------------------- #
# --------------- interpolatePhot ----------------- #
# -------------------------------------------------- #
# Performs linear interpolation and propagates the #
# uncertainty to return you a variance. #
# -------------------------------------------------- #
def interpolatePhot(x, y, s, val):
# takes sigma returns variance
# x - x data points (list)
# y - y data points (list)
# s - sigma on y data points (list)
# val - x value to interpolate to (number)
mag = y[0] + (val - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
err = s[0] ** 2 + (s[0] ** 2 + s[1] ** 2) * ((val - x[0]) / (x[1] - x[0])) ** 2
return mag, err
# -------------------------------------------------- #
# ------------------ des_photo -------------------- #
# -------------------------------------------------- #
# Finds nearest photometry on both sides of spectral #
# observations and interpolates to find value at the #
# time of the spectral observation #
# -------------------------------------------------- #
def des_photo(photo, spectral_mjd, bands):
"""Takes in an mjd from the spectra, looks through a light curve file to find the nearest photometric epochs and
performs linear interpolation to get estimate at date, return the photo mags."""
# Assumes dates are in chronological order!!!
for l in range(len(photo['Date']) - 1):
if photo['Band'][l] == bands[0] and photo['Date'][l] < spectral_mjd < photo['Date'][l + 1]:
g_date_v = np.array([photo['Date'][l], photo['Date'][l + 1]])
g_mag_v = np.array([photo['Mag'][l], photo['Mag'][l + 1]])
g_err_v = np.array([photo['Mag_err'][l], photo['Mag_err'][l + 1]])
if photo['Band'][l] == bands[1] and photo['Date'][l] < spectral_mjd < photo['Date'][l + 1]:
r_date_v = np.array([photo['Date'][l], photo['Date'][l + 1]])
r_mag_v = np.array([photo['Mag'][l], photo['Mag'][l + 1]])
r_err_v = np.array([photo['Mag_err'][l], photo['Mag_err'][l + 1]])
if photo['Band'][l] == bands[2] and photo['Date'][l] < spectral_mjd < photo['Date'][l + 1]:
i_date_v = np.array([photo['Date'][l], photo['Date'][l + 1]])
i_mag_v = np.array([photo['Mag'][l], photo['Mag'][l + 1]])
i_err_v = np.array([photo['Mag_err'][l], photo['Mag_err'][l + 1]])
g_mag, g_mag_err = interpolatePhot(g_date_v, g_mag_v, g_err_v, spectral_mjd)
r_mag, r_mag_err = interpolatePhot(r_date_v, r_mag_v, r_err_v, spectral_mjd)
i_mag, i_mag_err = interpolatePhot(i_date_v, i_mag_v, i_err_v, spectral_mjd)
return [g_mag, r_mag, i_mag], [g_mag_err, r_mag_err, i_mag_err]
# -------------------------------------------------- #
# ---------------- scale_factors ------------------ #
# -------------------------------------------------- #
# Calculates the scale factor and variance needed to #
# change spectroscopically derived magnitude to the #
# observed photometry. #
# -------------------------------------------------- #
def scale_factors(mag_diff, mag_diff_var):
# takes and returns variance
flux_ratio = np.power(10., 0.4 * mag_diff) # f_synthetic/f_photometry
scale_factor = (1. / flux_ratio)
scale_factor_sigma = mag_diff_var * (scale_factor * 0.4 * 2.3) ** 2 # ln(10) ~ 2.3
return scale_factor, scale_factor_sigma
# -------------------------------------------------- #
# ----------------- warp_spectra ------------------ #
# -------------------------------------------------- #
# Fits polynomial to scale factors and estimates #
# associated uncertainties with gaussian processes. #
# If the plotFlag variable is not False it will save #
# some diagnostic plots. #
# -------------------------------------------------- #
def warp_spectra(scaling, scaleErr, flux, variance, wavelength, centers, plotFlag):
# associate scale factors with centers of bands and fit 2D polynomial to form scale function.
scale = InterpolatedUnivariateSpline(centers, scaling, k=2)
fluxScale = flux * scale(wavelength)
# add in Gaussian process to estimate uncertainties, /10**-17 because it gets a bit panicky if you use small numbers
stddev = (scaleErr ** 0.5) / 10 ** -17
scale_v = scaling / 10 ** -17
kernel = kernels.RBF(length_scale=300, length_scale_bounds=(.01, 2000.0))
gp = GaussianProcessRegressor(kernel=kernel, alpha=stddev**2)
xprime = np.atleast_2d(centers).T
yprime = np.atleast_2d(scale_v).T
gp.fit(xprime, yprime)
xplot_prime = np.atleast_2d(wavelength).T
y_pred, sigma = gp.predict(xplot_prime, return_std=True)
y_pred = y_pred[:,0]
sigModel = (sigma/y_pred)*scale(wavelength)
# now scale the original variance and combine with scale factor uncertainty
varScale = variance * pow(scale(wavelength), 2) + sigModel ** 2
if plotFlag != False:
figa, ax1a, ax2a = ozplot.makeFigDouble(plotFlag, "Wavelength ($\AA$)", "f$_\lambda$ (arbitrary units)",
"f$_\lambda$ (10$^{-17}$ erg/s/cm$^2$/$\AA$)", [wavelength[0], wavelength[-1]])
ax1a.plot(wavelength, flux, color='black', label="Before Calibration")
ax1a.legend(loc=1, frameon=False, prop={'size': 20})
ax2a.plot(wavelength, fluxScale / 10 ** -17, color='black', label="After Calibration")
ax2a.legend(loc=1, frameon=False, prop={'size': 20})
plt.savefig(plotFlag + "_beforeAfter.png")
plt.close(figa)
figb, ax1b, ax2b = ozplot.makeFigDouble(plotFlag, "Wavelength ($\AA$)", "f$_\lambda$ (10$^{-17}$ erg/s/cm$^2$/$\AA$)",
"% Uncertainty", [wavelength[0], wavelength[-1]])
ax1b.plot(wavelength, fluxScale / 10 ** -17, color='black')
ax2b.plot(wavelength, 100*abs(pow(varScale, 0.5)/fluxScale), color='black', linestyle='-', label='Total')
ax2b.plot(wavelength, 100*abs(sigModel/fluxScale), color='blue', linestyle='-.', label='Warping')
ax2b.legend(loc=1, frameon=False, prop={'size': 20})
ax2b.set_ylim([0, 50])
plt.savefig(plotFlag + "_uncertainty.png")
plt.close(figb)
figc, axc = ozplot.makeFigSingle(plotFlag, "Wavelength ($\AA$)", "Scale Factor (10$^{-17}$ erg/s/cm$^2$/$\AA$/counts)")
axc.plot(wavelength, scale(wavelength)/10**-17, color='black')
axc.errorbar(centers, scaling/10**-17, yerr=stddev, fmt='s', color='mediumblue')
plt.savefig(plotFlag + "_scalefactors.png")
plt.close(figc)
return fluxScale, varScale
# -------------------------------------------------- #
# ------------ create_output_single --------------- #
# -------------------------------------------------- #
# Outputs the warped spectra to a new fits file. #
# -------------------------------------------------- #
def create_output_single(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName, outBase,
redshift):
outName = outBase + obj_name + "_scaled.fits"
print("Saving Data to " + outName)
hdulist = fits.HDUList(fits.PrimaryHDU())
noPhotometryExt = []
if len(noPhotometry) > 0:
for i in range(len(noPhotometry)):
noPhotometryExt.append(spectra.ext[noPhotometry[i]])
badQCExt = []
if len(badQC) > 0:
for i in range(len(badQC)):
badQCExt.append(spectra.ext[badQC[i]])
index = 0
# Create an HDU for each night
for i in extensions:
header = fits.Header()
header['SOURCE'] = obj_name
header['RA'] = spectra.RA
header['DEC'] = spectra.DEC
header['FIELD'] = spectra.field
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
header['EPOCHS'] = len(extensions)
header['z'] = redshift[0]
# save the names of the input data and the extensions ignored
header['SFILE'] = spectraName
header['PFILE'] = photoName
header['NOPHOTO'] = ','.join(map(str, noPhotometryExt))
header['BADQC'] = ','.join(map(str, badQCExt))
# save the original spectrum's extension number and some other details
header["EXT"] = spectra.ext[i]
header["UTMJD"] = spectra.dates[i]
header["EXPOSE"] = spectra.exposed[i]
header["QC"] = spectra.qc[i]
# save scale factors/uncertainties
header["SCALEG"] = scaling[0, i]
header["ERRORG"] = scaling[3, i]
header["SCALER"] = scaling[1, i]
header["ERRORR"] = scaling[4, i]
header["SCALEI"] = scaling[2, i]
header["ERRORI"] = scaling[5, i]
# save photometry/uncertainties used to calculate scale factors
header["MAGG"] = scaling[8, i]
header["MAGUG"] = scaling[9, i]
header["MAGR"] = scaling[10, i]
header["MAGUR"] = scaling[11, i]
header["MAGI"] = scaling[12, i]
header["MAGUI"] = scaling[13, i]
if index == 0:
hdulist[0].header['SOURCE'] = obj_name
hdulist[0].header['RA'] = spectra.RA
hdulist[0].header['DEC'] = spectra.DEC
hdulist[0].header['CRPIX1'] = spectra.crpix1
hdulist[0].header['CRVAL1'] = spectra.crval1
hdulist[0].header['CDELT1'] = spectra.cdelt1
hdulist[0].header['CTYPE1'] = 'wavelength'
hdulist[0].header['CUNIT1'] = 'angstrom'
hdulist[0].header['EPOCHS'] = len(extensions)
# save the names of the input data and the extensions ignored
hdulist[0].header['SFILE'] = spectraName
hdulist[0].header['PFILE'] = photoName
hdulist[0].header['NOPHOTO'] = ','.join(map(str, noPhotometryExt))
hdulist[0].header['BADQC'] = ','.join(map(str, badQCExt))
# save the original spectrum's extension number and some other details
hdulist[0].header["EXT"] = spectra.ext[i]
hdulist[0].header["UTMJD"] = spectra.dates[i]
hdulist[0].header["EXPOSE"] = spectra.exposed[i]
hdulist[0].header["QC"] = spectra.qc[i]
# save scale factors/uncertainties
hdulist[0].header["SCALEG"] = scaling[0, i]
hdulist[0].header["ERRORG"] = scaling[3, i]
hdulist[0].header["SCALER"] = scaling[1, i]
hdulist[0].header["ERRORR"] = scaling[4, i]
hdulist[0].header["SCALEI"] = scaling[2, i]
hdulist[0].header["ERRORI"] = scaling[5, i]
# save photometry/uncertainties used to calculate scale factors
hdulist[0].header["MAGG"] = scaling[8, i]
hdulist[0].header["MAGUG"] = scaling[9, i]
hdulist[0].header["MAGR"] = scaling[10, i]
hdulist[0].header["MAGUR"] = scaling[11, i]
hdulist[0].header["MAGI"] = scaling[12, i]
hdulist[0].header["MAGUI"] = scaling[13, i]
hdulist[0].data = spectra.flux[:, i]
hdulist.append(fits.ImageHDU(data=spectra.variance[:, i], header=header))
hdulist.append(fits.ImageHDU(data=spectra.badpix[:, i], header=header))
index = 2
else:
hdulist.append(fits.ImageHDU(data=spectra.flux[:, i], header=header))
hdulist.append(fits.ImageHDU(data=spectra.variance[:, i], header=header))
hdulist.append(fits.ImageHDU(data=spectra.badpix[:, i], header=header))
hdulist.writeto(outName, overwrite=True)
hdulist.close()
return
# -------------------------------------------------- #
# ------------- create_output_coadd --------------- #
# -------------------------------------------------- #
# Outputs the warped and coadded spectra to a new #
# fits file. #
# -------------------------------------------------- #
def create_output_coadd(obj_name, runList, fluxArray, varianceArray, badpixArray, extensions, scaling, spectra, redshift
,badQC, noPhotometry, spectraName, photoName, outBase, coaddFlag):
outName = outBase + obj_name + "_scaled_" + coaddFlag + ".fits"
hdulist = fits.HDUList(fits.PrimaryHDU())
noPhotometryExt = []
if len(noPhotometry) > 0:
for i in range(len(noPhotometry)):
noPhotometryExt.append(spectra.ext[noPhotometry[i]])
badQCExt = []
if len(badQC) > 0:
for i in range(len(badQC)):
badQCExt.append(spectra.ext[badQC[i]])
#print("Output Filename: %s \n" % (outName))
# First save the total coadded spectrum for the source to the primary extension
hdulist[0].data = fluxArray[:, 0]
hdulist[0].header['CRPIX1'] = spectra.crpix1
hdulist[0].header['CRVAL1'] = spectra.crval1
hdulist[0].header['CDELT1'] = spectra.cdelt1
hdulist[0].header['CTYPE1'] = 'wavelength'
hdulist[0].header['CUNIT1'] = 'angstrom'
hdulist[0].header['SOURCE'] = obj_name
hdulist[0].header['RA'] = spectra.RA
hdulist[0].header['DEC'] = spectra.DEC
hdulist[0].header['FIELD'] = spectra.field
hdulist[0].header['OBSNUM'] = len(runList)
hdulist[0].header['z'] = redshift[0]
hdulist[0].header['SFILE'] = spectraName
hdulist[0].header['PFILE'] = photoName
hdulist[0].header['METHOD'] = coaddFlag
hdulist[0].header['NOPHOTO'] = ','.join(map(str, noPhotometryExt))
hdulist[0].header['BADQC'] = ','.join(map(str, badQCExt))
# First extension is the total coadded variance
header = fits.Header()
header['EXTNAME'] = 'VARIANCE'
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
hdulist.append(fits.ImageHDU(data=varianceArray[:, 0], header=header))
# Second Extension is the total bad pixel map
header = fits.Header()
header['EXTNAME'] = 'BadPix'
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
hdulist.append(fits.ImageHDU(data=badpixArray[:, 0], header=header))
# Create an HDU for each night
index1 = 1
for k in runList:
index = 0
date = 0
header = fits.Header()
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
header['RUN'] = k
for i in extensions:
here = False
if coaddFlag == 'Run':
if spectra.run[i] == k:
here = True
if coaddFlag == 'Date':
if int(spectra.dates[i]) == k:
here = True
if here == True:
head0 = "EXT" + str(index)
header[head0] = spectra.ext[i]
head1 = "UTMJD" + str(index)
header[head1] = spectra.dates[i]
date += spectra.dates[i]
head2 = "EXPOSE" + str(index)
header[head2] = spectra.exposed[i]
head3 = "QC" + str(index)
header[head3] = spectra.qc[i]
head4 = "SCALEG" + str(index)
header[head4] = scaling[0, i]
head5 = "ERRORG" + str(index)
header[head5] = scaling[3, i]
head6 = "SCALER" + str(index)
header[head6] = scaling[1, i]
head7 = "ERRORR" + str(index)
header[head7] = scaling[4, i]
head8 = "SCALEI" + str(index)
header[head8] = scaling[2, i]
head9 = "ERRORI" + str(index)
header[head9] = scaling[5, i]
head10 = "MAGG" + str(index)
header[head10] = scaling[8, i]
head11 = "MAGUG" + str(index)
header[head11] = scaling[9, i]
head12 = "MAGR" + str(index)
header[head12] = scaling[10, i]
head13 = "MAGUR" + str(index)
header[head13] = scaling[11, i]
head14 = "MAGI" + str(index)
header[head14] = scaling[12, i]
head15 = "MAGUI" + str(index)
header[head15] = scaling[13, i]
index += 1
if date > 0:
header['OBSNUM'] = index
header['AVGDATE'] = date / index
hdu_flux = fits.ImageHDU(data=fluxArray[:, index1], header=header)
hdu_fluxvar = fits.ImageHDU(data=varianceArray[:, index1], header=header)
hdu_badpix = fits.ImageHDU(data=badpixArray[:, index1], header=header)
hdulist.append(hdu_flux)
hdulist.append(hdu_fluxvar)
hdulist.append(hdu_badpix)
index1 += 1
hdulist.writeto(outName, overwrite=True)
hdulist.close()
return
# -------------------------------------------------- #
# ----------------- coadd_output ------------------ #
# -------------------------------------------------- #
# Coadds the observations based on run or night. #
# -------------------------------------------------- #
def coadd_output(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName, outBase, plotFlag,
coaddFlag, redshift):
# Get a list of items (dates/runs) over which all observations will be coadded
coaddOver = []
for e in extensions:
# OzDES runs 7,8 were close together in time and run 8 had bad weather so there was only observations of 1
# field - coadd with run 7 to get better signal to noise
if spectra.run[e] == 8:
spectra.run[e] = 7
if coaddFlag == 'Run':
if spectra.run[e] not in coaddOver:
coaddOver.append(spectra.run[e])
if coaddFlag == 'Date':
if int(spectra.dates[e]) not in coaddOver:
coaddOver.append(int(spectra.dates[e]))
coaddFlux = np.zeros((5000, len(coaddOver) + 1))
coaddVar = np.zeros((5000, len(coaddOver) + 1))
coaddBadPix = np.zeros((5000, len(coaddOver) + 1))
speclistC = [] # For total coadd of observation
index = 1
for c in coaddOver:
speclist = []
for e in extensions:
opt = ''
if coaddFlag == 'Run':
opt = spectra.run[e]
if coaddFlag == 'Date':
opt = int(spectra.dates[e])
if opt == c:
speclist.append(SingleSpec(obj_name, spectra.wavelength, spectra.flux[:,e], spectra.variance[:,e],
spectra.badpix[:,e]))
speclistC.append(SingleSpec(obj_name, spectra.wavelength, spectra.flux[:,e], spectra.variance[:,e],
spectra.badpix[:,e]))
if len(speclist) > 1:
runCoadd = outlier_reject_and_coadd(obj_name, speclist)
coaddFlux[:, index] = runCoadd.flux
coaddVar[:, index] = runCoadd.fluxvar
coaddVar[:, index] = runCoadd.fluxvar
coaddBadPix[:,index] = runCoadd.isbad.astype('uint8')
if len(speclist) == 1:
coaddFlux[:, index] = speclist[0].flux
coaddVar[:, index] = speclist[0].fluxvar
coaddBadPix[:, index] = speclist[0].isbad.astype('uint8')
index += 1
if len(speclistC) > 1:
allCoadd = outlier_reject_and_coadd(obj_name, speclistC)
coaddFlux[:, 0] = allCoadd.flux
coaddVar[:, 0] = allCoadd.fluxvar
coaddBadPix[:, 0] = allCoadd.isbad.astype('uint8')
if len(speclistC) == 1:
coaddFlux[:, 0] = speclistC[0].flux
coaddVar[:, 0] = speclistC[0].fluxvar
coaddBadPix[:, 0] = speclistC[0].isbad.astype('uint8')
mark_as_bad(coaddFlux, coaddVar)
create_output_coadd(obj_name, coaddOver, coaddFlux, coaddVar, coaddBadPix, extensions, scaling, spectra, redshift,
badQC, noPhotometry, spectraName, photoName, outBase, coaddFlag)
return
# -------------------------------------------------- #
# Modified from code originally provided by #
# <NAME> #
# -------------------------------------------------- #
# ------------------ mark_as_bad ------------------- #
# -------------------------------------------------- #
# Occasionally you get some big spikes in the data #
# that you do not want messing with your magnitude #
# calculations. Remove these by looking at single #
# bins that have a significantly 4.5 larger than #
# average fluxes or variances and change those to #
# nans. Nans will be interpolated over. The #
# threshold should be chosen to weigh removing #
# extreme outliers and removing noise. #
# -------------------------------------------------- #
def mark_as_bad(fluxes, variances):
number = int(fluxes.size/fluxes.shape[0])
for epoch in range(number):
if number == 1:
flux = fluxes[:]
variance = variances[:]
else:
flux = fluxes[:, epoch]
variance = variances[:, epoch]
nBins = len(flux)
# define the local average in flux and variance to compare outliers to
for i in range(nBins):
if i < 50:
avg = np.nanmean(variance[0:99])
avgf = np.nanmean(flux[0:99])
elif i > nBins - 50:
avg = np.nanmean(variance[i-50:nBins-1])
avgf = np.nanmean(flux[i-50:nBins-1])
else:
avg = np.nanmean(variance[i-50:i+50])
avgf =
|
np.nanmean(flux[i-50:i+50])
|
numpy.nanmean
|
import os
import copy
import math
import pickle
import logging
import numpy as np
import networkx as nx
import scipy.sparse as sp
from copy import deepcopy
from datetime import datetime
from matplotlib import pyplot as plt
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score
from dgl.data.citation_graph import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset
from ogb.linkproppred import Evaluator, PygLinkPropPredDataset
from ogb.linkproppred import DglLinkPropPredDataset
def eval_ep_batched(logits, labels, n_pos):
# roc-auc and ap
roc_auc = roc_auc_score(labels, logits)
ap_score = average_precision_score(labels, logits)
results = {'auc': roc_auc,
'ap': ap_score}
# hits@K
evaluator = Evaluator(name='ogbl-ddi')
for K in [20, 50, 100]:
evaluator.K = K
hits = evaluator.eval({
'y_pred_pos': logits[:n_pos],
'y_pred_neg': logits[n_pos:],
})[f'hits@{K}']
results[f'hits@{K}'] = hits
return results
def eval_ep(A_pred, edges, edges_false):
preds = A_pred[edges.T]
preds_neg = A_pred[edges_false.T]
logits = np.hstack([preds, preds_neg])
labels = np.hstack([np.ones(preds.size(0)), np.zeros(preds_neg.size(0))])
# roc-auc and ap
roc_auc = roc_auc_score(labels, logits)
ap_score = average_precision_score(labels, logits)
results = {'auc': roc_auc,
'ap': ap_score}
# hits@K
evaluator = Evaluator(name='ogbl-ddi')
for K in [20, 50, 100]:
evaluator.K = K
hits = evaluator.eval({
'y_pred_pos': preds,
'y_pred_neg': preds_neg,
})[f'hits@{K}']
results[f'hits@{K}'] = hits
return results
def normalize_sp(adj_matrix):
# normalize adj by D^{-1/2}AD^{-1/2} for scipy sparse matrix input
degrees = np.array(adj_matrix.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(degrees, -0.5).flatten())
degree_mat_inv_sqrt = np.nan_to_num(degree_mat_inv_sqrt)
adj_norm = degree_mat_inv_sqrt @ adj_matrix @ degree_mat_inv_sqrt
return adj_norm
def load_data(args, logger):
path = args.datapath
ds = args.dataset
if ds.startswith('ogbl'):
dataset = DglLinkPropPredDataset(name=ds, root=args.datapath)
graph = dataset[0]
adj_train = graph.adjacency_matrix(scipy_fmt='csr')
g = nx.from_scipy_sparse_matrix(adj_train)
print('density',nx.density(g))
print('edges:', len(g.edges()) )
print('nodes:', len(g.nodes()) )
adj_train.setdiag(1)
if 'feat' in graph.ndata:
features = graph.ndata['feat']
dim_feat = features.shape[-1]
else:
# construct one-hot degree features
degrees = torch.LongTensor(adj_train.sum(0) - 1)
indices = torch.cat((torch.arange(adj_train.shape[0]).unsqueeze(0), degrees), dim=0)
features = torch.sparse.FloatTensor(indices, torch.ones(adj_train.shape[0])).to_dense().numpy()
features = torch.Tensor(features)
# using adj_train as adj_label as training loss is only calculated with train_pairs (excluding val/test edges and no_edges)
adj_label = copy.deepcopy(adj_train)
# load given train/val/test edges and no_edges
split_edge = dataset.get_edge_split()
val_split, test_split = split_edge["valid"], split_edge["test"]
val_edges, val_edges_false = val_split['edge'].numpy(), val_split['edge_neg'].numpy()
test_edges, test_edges_false = test_split['edge'].numpy(), test_split['edge_neg'].numpy()
# get training node pairs (edges and no-edges)
if os.path.exists(f'{path}{ds}_trainpairs.pkl'):
train_pairs = pickle.load(open(f'{path}{ds}_trainpairs.pkl', 'rb'))
else:
train_mask = np.ones(adj_train.shape)
for edges_tmp in [val_edges, val_edges_false, test_edges, test_edges_false]:
train_mask[edges_tmp.T[0], edges_tmp.T[1]] = 0
train_mask[edges_tmp.T[1], edges_tmp.T[0]] = 0
train_pairs = np.asarray(sp.triu(train_mask, 1).nonzero()).T
pickle.dump(train_pairs, open(f'{path}{ds}_trainpairs.pkl', 'wb'))
else:
if args.dataset in ['cora', 'citeseer', 'pubmed']:
# adj matrix (with self-loop): sp.csr_matrix
adj_label = pickle.load(open(f'{path}{ds}_adj.pkl', 'rb'))
# node features: sp.lil_matrix
features = pickle.load(open(f'{path}{ds}_feat.pkl', 'rb'))
if isinstance(features, sp.lil.lil_matrix):
features= features.toarray()
features = torch.FloatTensor(features)
dim_feat = features.shape[-1]
elif args.dataset == 'facebook':
filename = f'data/{args.dataset}.txt'
g = nx.read_edgelist(filename,create_using=nx.Graph(), nodetype = int, data=(("weight", float),))
adj_label = nx.adjacency_matrix(g, nodelist = sorted(g.nodes()))
adj_label = (adj_label > 0).astype('int') # to binary
#load tvt_edges
tvt_edges_file = f'{args.datapath}{args.dataset}_tvtEdges_val{args.val_frac}test{args.test_frac}.pkl'
adj_train, train_pairs, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_label, args.val_frac, args.test_frac, tvt_edges_file, logger)
if args.dataset == 'facebook':
degrees = np.array(adj_train.sum(axis=1)).flatten().astype('int')
dim_feat = degrees.max()
one_hot_feat = np.eye(dim_feat)[degrees - 1]
one_hot_feat = one_hot_feat.reshape((adj_train.shape[0], dim_feat))
features = torch.FloatTensor(one_hot_feat)
dim_feat = features.shape[-1]
# print('dim feature, ', dim_feat)
return adj_label, features, dim_feat, adj_train, train_pairs, val_edges, val_edges_false, test_edges, test_edges_false
def mask_test_edges(adj_orig, val_frac, test_frac, filename, logger):
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
if os.path.exists(filename):
adj_train, train_pairs, val_edges, val_edges_false, test_edges, test_edges_false = pickle.load(open(filename, 'rb'))
logger.info(f'loaded cached val and test edges with fracs of {val_frac} and {test_frac}')
return adj_train, train_pairs, val_edges, val_edges_false, test_edges, test_edges_false
# Remove diagonal elements
adj = deepcopy(adj_orig)
# set diag as all zero
adj.setdiag(0)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj, 1)
# adj_tuple = sparse_to_tuple(adj_triu)
# edges = adj_tuple[0]
edges = sparse_to_tuple(adj_triu)[0]
edges_all = sparse_to_tuple(adj)[0]
num_test = int(np.floor(edges.shape[0] * test_frac))
num_val = int(np.floor(edges.shape[0] * val_frac))
all_edge_idx = list(range(edges.shape[0]))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
noedge_mask = np.ones(adj.shape) - adj_orig
noedges = np.asarray(sp.triu(noedge_mask, 1).nonzero()).T
all_edge_idx = list(range(noedges.shape[0]))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges_false = noedges[test_edge_idx]
val_edges_false = noedges[val_edge_idx]
# following lines for getting the no-edges are substituted with above lines
"""
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
test_edges_false = np.asarray(test_edges_false).astype("int32")
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
val_edges_false.append([idx_i, idx_j])
val_edges_false = np.asarray(val_edges_false).astype("int32")
"""
def ismember(a, b, tol=5):
rows_close = np.all(
|
np.round(a - b[:, None], tol)
|
numpy.round
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
from scipy.io import loadmat
import pandas as pd
from ...model import File
from ...utils import first
bold_filedict = {"datatype": "func", "suffix": "bold"}
def parse_tsv_condition_file(filepath):
conditions = []
onsets = []
durations = []
dtype = {
"subject_id": str,
"session_id": str,
"participant_id": str,
"trial_type": str,
}
data = pd.read_csv(filepath, sep="\t", na_values="n/a", dtype=dtype)
groupby = data.groupby(by="trial_type")
conditions.extend(groupby.groups.keys())
onset_dict = groupby["onset"].apply(list).to_dict()
duration_dict = groupby["duration"].apply(list).to_dict()
onsets.extend(onset_dict[c] for c in conditions)
durations.extend(duration_dict[c] for c in conditions)
return conditions, onsets, durations
def parse_mat_condition_file(filepath):
def extract(x):
"""
Extract single value from n-dimensional array
"""
if isinstance(x, np.ndarray):
return extract(x[0])
return x
conditions = []
onsets = []
durations = []
try:
data = loadmat(filepath)
except NotImplementedError:
# with h5py
raise
assert data is not None
mnames = np.squeeze(data["names"])
mdurations = np.squeeze(data["durations"])
monsets = np.squeeze(data["onsets"])
for i, name in enumerate(mnames):
condition = extract(name)
ionsets = np.ravel(monsets[i])
idurations =
|
np.ravel(mdurations[i])
|
numpy.ravel
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import glob
import matplotlib.pyplot as plt
import clr
import sys
import os
from System.IO import *
from System import String
from System.Threading import AutoResetEvent
from System.Collections.Generic import List
# from fit_code2 import ration
# Add needed dll references
sys.path.append(os.environ['LIGHTFIELD_ROOT'])
sys.path.append(os.environ['LIGHTFIELD_ROOT']+"\\AddInViews")
clr.AddReference('PrincetonInstruments.LightFieldViewV5')
clr.AddReference('PrincetonInstruments.LightField.AutomationV5')
clr.AddReference('PrincetonInstruments.LightFieldAddInSupportServices')
# PI imports
from PrincetonInstruments.LightField.Automation import Automation
from PrincetonInstruments.LightField.AddIns import CameraSettings
from PrincetonInstruments.LightField.AddIns import ExperimentSettings
from PrincetonInstruments.LightField.AddIns import DeviceType
from PrincetonInstruments.LightField.AddIns import SpectrometerSettings
import time
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# %pylab inline
import pandas as pd # python data manipulation and analysis library
import numpy as np # Library with large collection of high-level mathematical functions to operate on arrays
import matplotlib.pyplot as plt #python plotting library
import peakutils #baselining library
from scipy.optimize import curve_fit
import os,glob
# Library with operating system dependent functionality. Example: Reading data from files on the computer
import csv
# from pathlib import *
import mplcursors
from sklearn import preprocessing
from lmfit import Parameters, minimize
from scipy import stats
def capture_photo(begin,exp_no,line,iii):
global device_found
global experiment
global save_file
global file_manager
if begin=="start":
def save_file(filename):
# Set the base file name
experiment.SetValue(
ExperimentSettings.FileNameGenerationBaseFileName,
Path.GetFileName(filename))
# Option to Increment, set to false will not increment
experiment.SetValue(
ExperimentSettings.FileNameGenerationAttachIncrement,
True)
# Option to add date
experiment.SetValue(
ExperimentSettings.FileNameGenerationAttachDate,
True)
# Option to add time
experiment.SetValue(
ExperimentSettings.FileNameGenerationAttachTime,
True)
def device_found():
# Find connected device
for device in experiment.ExperimentDevices:
if (device.Type == DeviceType.Camera):
return True
# If connected device is not a camera inform the user
print("Camera not found. Please add a camera and try again.")
return False
# Create the LightField Application (true for visible)
# The 2nd parameter forces LF to load with no experiment
auto = Automation(True, List[String]())
application= auto.LightFieldApplication
experiment = auto.LightFieldApplication.Experiment
file_manager=application.FileManager
if begin=='adjust':
def set_value(setting, value):
# Check for existence before setting
# gain, adc rate, or adc quality
if experiment.Exists(setting):
experiment.SetValue(setting, value)
def experiment_completed(sender, event_args):
# print("Experiment Completed")
# Sets the state of the event to signaled,
# allowing one or more waiting threads to proceed.
acquireCompleted.Set()
from flipper import mirror
mirror('on')
# Check for device and inform user if one is needed
if (device_found()==True):
experiment.ExperimentCompleted += experiment_completed
# Check this location for saved spe after running
#print("Please Enter the Exposure Time:\n")
#x=int(input())
set_value(CameraSettings.ShutterTimingExposureTime,1000)
#print("Please Enter the Number of Frames")
#y=int(input())
n=3
experiment.SetValue(ExperimentSettings.AcquisitionFramesToStore,n)
for k in range(1,2):
if k==1:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,578.1351880026082)
elif k==2:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,621.2340604418703)
_file_name = "case"
# Pass location of saved file
save_file(_file_name)
# Acquire image
experiment.Acquire()
time.sleep(5)
#directory="C:\\Users\\labuser\\Desktop\\data\\Raman\\Vivek\\2019-10-08"
directory="C:\\Users\\UWAdmin\\Desktop\\AIM-Lab-Automation-master\\AIM-Lab-Automation-master\\spes"
if( os.path.exists(directory)):
# print("\nFound the .spe file...")
print(" ")
# Returns all .spe files
files = glob.glob(directory +'/*.spe')
# Returns recently acquired .spe file
last_image_acquired = max(files, key=os.path.getctime)
try:
# Open file
file_name = file_manager.OpenFile(last_image_acquired, FileAccess.Read)
# Access image
file=file_name
imageData = file.GetFrame(0,0)
#here is a problem 11-18-2019
intensity_frame=np.zeros((n,1340))
# Get image data
buffer = imageData.GetData()
#buffer=imageData.GetDataBuffer()
# Print first 10 pixel intensities
for i in range(0,n):
imageData=file.GetFrame(0,i)
buffer=imageData.GetData()
for pixel in range(0,1340):
intensity_frame[i][pixel]=buffer[pixel]
file_name.Dispose()
except IOError:
print ("Error: can not find file or read data")
else:
# print(".spe file not found...")
print(" ")
# print(String.Format("{0} {1}",
# "Image saved to",
# experiment.GetValue(
# ExperimentSettings.
# FileNameGenerationDirectory)))
mirror('off')
wl= experiment.SystemColumnCalibration
wavelength=np.zeros((1,1340))
for i in range(1340):wavelength[0,i]=wl[i]
#print(intensity_frame)
intensity=np.zeros((1,1340))
for i in range(1340):
x=0
for j in range(n):
x=x+intensity_frame[j][i]
x=x/n
intensity[0,i]=x
check_intensity=x
w=[]
inten=[]
for x in range(1340):
wavelength[0,x]=1e7*(1/532 - 1/wavelength[0,x])
w.append(wavelength[0,x])
inten.append(intensity[0,x])
import csv
ix=np.max(inten)
return ix
if begin=="bg":
def set_value(setting, value):
# Check for existence before setting
# gain, adc rate, or adc quality
if experiment.Exists(setting):
experiment.SetValue(setting, value)
def experiment_completed(sender, event_args):
# print("Experiment Completed")
# Sets the state of the event to signaled,
# allowing one or more waiting threads to proceed.
acquireCompleted.Set()
# Check for device and inform user if one is needed
if (device_found()==True):
experiment.ExperimentCompleted += experiment_completed
# Check this location for saved spe after running
#print("Please Enter the Exposure Time:\n")
#x=int(input())
set_value(CameraSettings.ShutterTimingExposureTime,3000)
#print("Please Enter the Number of Frames")
#y=int(input())
experiment.SetValue(ExperimentSettings.AcquisitionFramesToStore,10)
for k in range(1,3):
if k==1:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,578.1351880026082)
elif k==2:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,621.2340604418703)
_file_name = "case"
# Pass location of saved file
save_file(_file_name)
# Acquire image
experiment.Acquire()
time.sleep(35)
#directory="C:\\Users\\labuser\\Desktop\\data\\Raman\\Vivek\\2019-10-08"
directory="C:\\Users\\UWAdmin\\Desktop\\AIM-Lab-Automation-master\\AIM-Lab-Automation-master\\spes"
if( os.path.exists(directory)):
# print("\nFound the .spe file...")
print(" ")
# Returns all .spe files
files = glob.glob(directory +'/*.spe')
# Returns recently acquired .spe file
last_image_acquired = max(files, key=os.path.getctime)
try:
# Open file
file_name = file_manager.OpenFile(
last_image_acquired, FileAccess.Read)
# Access image
file=file_name
imageData = file.GetFrame(0,0)
#here is a problem 11-18-2019
n=5
intensity_frame=np.zeros((n,1340))
# Get image data
buffer = imageData.GetData()
#buffer=imageData.GetDataBuffer()
# Print first 10 pixel intensities
for i in range(0,n):
imageData=file.GetFrame(0,i)
buffer=imageData.GetData()
for pixel in range(0,1340):
intensity_frame[i][pixel]=buffer[pixel]
file_name.Dispose()
except IOError:
print ("Error: can not find file or read data")
else:
# print(".spe file not found...")
print(" ")
# print(String.Format("{0} {1}",
# "Image saved to",
# experiment.GetValue(
# ExperimentSettings.
# FileNameGenerationDirectory)))
wl= experiment.SystemColumnCalibration
wavelength=np.zeros((1,1340))
for i in range(1340):wavelength[0,i]=wl[i]
#print(intensity_frame)
intensity=np.zeros((1,1340))
for i in range(1340):
x=0
for j in range(n):
x=x+intensity_frame[j][i]
x=x/n
intensity[0,i]=x
check_intensity=x
w=[]
inten=[]
for x in range(1340):
wavelength[0,x]=1e7*(1/532 - 1/wavelength[0,x])
w.append(wavelength[0,x])
inten.append(intensity[0,x])
import csv
m="background"+str(k)+"D.csv"
with open(m, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["W", "I"])
writer.writerows(zip(w,inten))
if begin=="on":
def set_value(setting, value):
# Check for existence before setting
# gain, adc rate, or adc quality
if experiment.Exists(setting):
experiment.SetValue(setting, value)
def experiment_completed(sender, event_args):
# print("Experiment Completed")
# Sets the state of the event to signaled,
# allowing one or more waiting threads to proceed.
acquireCompleted.Set()
# Check for device and inform user if one is needed
if (device_found()==True):
experiment.ExperimentCompleted += experiment_completed
# Check this location for saved spe after running
#print("Please Enter the Exposure Time:\n")
#x=int(input())
set_value(CameraSettings.ShutterTimingExposureTime,3000)
#print("Please Enter the Number of Frames")
#y=int(input())
experiment.SetValue(ExperimentSettings.AcquisitionFramesToStore,10)
for k in range(1,3):
if k==1:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,578.1351880026082)
elif k==2:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,621.2340604418703)
_file_name = "case"
# Pass location of saved file
save_file(_file_name)
# Acquire image
experiment.Acquire()
time.sleep(35)
#directory="C:\\Users\\labuser\\Desktop\\data\\Raman\\Vivek\\2019-10-08"
directory="C:\\Users\\UWAdmin\\Desktop\\AIM-Lab-Automation-master\\AIM-Lab-Automation-master\\spes"
if( os.path.exists(directory)):
# print("\nFound the .spe file...")
print(" ")
# Returns all .spe files
files = glob.glob(directory +'/*.spe')
# Returns recently acquired .spe file
last_image_acquired = max(files, key=os.path.getctime)
try:
# Open file
file_name = file_manager.OpenFile(
last_image_acquired, FileAccess.Read)
# Access image
file=file_name
imageData = file.GetFrame(0,0)
#here is a problem 11-18-2019
n=10
intensity_frame=np.zeros((n,1340))
# Get image data
buffer = imageData.GetData()
#buffer=imageData.GetDataBuffer()
# Print first 10 pixel intensities
for i in range(0,n):
imageData=file.GetFrame(0,i)
buffer=imageData.GetData()
for pixel in range(0,1340):
intensity_frame[i][pixel]=buffer[pixel]
file_name.Dispose()
except IOError:
print ("Error: can not find file or read data")
else:
# print(".spe file not found...")
print(" ")
wl= experiment.SystemColumnCalibration
wavelength=np.zeros((1,1340))
for i in range(1340):wavelength[0,i]=wl[i]
#print(intensity_frame)
intensity=np.zeros((1,1340))
for i in range(1340):
x=0
for j in range(n):
x=x+intensity_frame[j][i]
x=x/n
intensity[0,i]=x
check_intensity=x
w=[]
inten=[]
for x in range(1340):
wavelength[0,x]=1e7*(1/532 - 1/wavelength[0,x])
w.append(wavelength[0,x])
inten.append(intensity[0,x])
import csv
m="line "+ str(line)+" Point "+str(exp_no)+" iteration "+str(iii)+" foreground"+str(k)+"D.csv"
with open(m, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["W", "I"])
writer.writerows(zip(w,inten))
if check_intensity>=40e3:
print("experiment: ",exp_no, ":Patterning not done")
#,twoGD,twoD,G,WD,WG
elif check_intensity<40e3:
gdr=ration(1,2,exp_no,line,iii)
return gdr
if begin=="first":
def set_value(setting, value):
# Check for existence before setting
# gain, adc rate, or adc quality
if experiment.Exists(setting):
experiment.SetValue(setting, value)
def experiment_completed(sender, event_args):
acquireCompleted.Set()
# Check for device and inform user if one is needed
if (device_found()==True):
experiment.ExperimentCompleted += experiment_completed
# Check this location for saved spe after running
#print("Please Enter the Exposure Time:\n")
#x=int(input())
set_value(CameraSettings.ShutterTimingExposureTime,3000)
#print("Please Enter the Number of Frames")
experiment.SetValue(ExperimentSettings.AcquisitionFramesToStore,5)
for k in range(1,3):
if k==1:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,578.1351880026082)
elif k==2:
experiment.SetValue(SpectrometerSettings.GratingCenterWavelength,621.2340604418703)
_file_name = "case"
# Pass location of saved file
save_file(_file_name)
# Acquire image
experiment.Acquire()
time.sleep(25)
#directory="C:\\Users\\labuser\\Desktop\\data\\Raman\\Vivek\\2019-10-08"
directory="C:\\Users\\UWAdmin\\Desktop\\AIM-Lab-Automation-master\\AIM-Lab-Automation-master\\spes"
if( os.path.exists(directory)):
# print("\nFound the .spe file...")
print(" ")
# Returns all .spe files
files = glob.glob(directory +'/*.spe')
# Returns recently acquired .spe file
last_image_acquired = max(files, key=os.path.getctime)
try:
# Open file
file_name = file_manager.OpenFile(
last_image_acquired, FileAccess.Read)
# Access image
file=file_name
imageData = file.GetFrame(0,0)
#here is a problem 11-18-2019
n=5
intensity_frame=np.zeros((n,1340))
# Get image data
buffer = imageData.GetData()
#buffer=imageData.GetDataBuffer()
# Print first 10 pixel intensities
for i in range(0,n):
imageData=file.GetFrame(0,i)
buffer=imageData.GetData()
for pixel in range(0,1340):
intensity_frame[i][pixel]=buffer[pixel]
file_name.Dispose()
except IOError:
print ("Error: can not find file or read data")
else:
# print(".spe file not found...")
print(" ")
wl= experiment.SystemColumnCalibration
wavelength=np.zeros((1,1340))
for i in range(1340):wavelength[0,i]=wl[i]
#print(intensity_frame)
intensity=
|
np.zeros((1,1340))
|
numpy.zeros
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.