prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Phase Contrast Cardiac MRI Segmentation
Prepare MRIs for training a CNN model. Given an input directory of numpy image tensors
containing phase contrast cardiac MRIs:
- Generate candidate value segmentations
- Rank candidates in terms of the most likely atrial value
- Write segmentation masks to numpy files
- Export 32x32, 48x48 cropped images
@author jason-fries [at] stanford [dot] edu
"""
from __future__ import print_function
import os
import re
import sys
import time
import glob
import logging
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.measure import label
from skimage import filters, segmentation
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square, dilation, erosion
from scipy.ndimage.filters import uniform_filter
from skimage.restoration import denoise_wavelet, denoise_nl_means
from skimage.transform import rescale
from skimage.morphology import square, disk
from skimage.filters import threshold_local
from skimage import img_as_float, img_as_ubyte
from utils import *
logger = logging.getLogger(__name__)
def get_centroid(x, y, weights=None):
"""
Compute average of provided points. Optionally weight points (doesn't usually matter).
:param x:
:param y:
:param weights:
:return:
"""
x_mu = np.average(x, weights=weights).astype(int)
y_mu = np.average(y, weights=weights).astype(int)
return [x_mu, y_mu]
def score_segmentations(img, labeled, weighted_centroid=True, min_threshold=2, max_threshold=1000):
"""
Compute a pixel mask for each labeled segment and calculate it's centroid.
Discard masks with more than max_threshold pixels or less than min_threshold.
:param img:
:param labeled:
:param weighted_centroid:
:param min_threshold:
:param max_threshold:
:return:
"""
segments = []
for s_id in range(max(labeled.flatten()) + 1):
# get coordinates of this segment
y, x = np.where(labeled == s_id)
# pixel weights
w = img[labeled == s_id]
num_pixels = len(w.flatten())
if num_pixels >= max_threshold or num_pixels <= min_threshold:
continue
segments.append([np.sum(w), s_id, num_pixels, get_centroid(x, y, weights=w)])
# rank candidates
return rank_valve_cands(sorted(segments, reverse=1))
def rank_valve_cands(segments):
"""
Heuristic for selecting probable atrial valve. Take top 2 weighted segments and
check their spatial orientation. Basic idea is that the atrial valve is *usually*
the largest, highest intensity region located in the lower left region of the MRI image.
2/14/2018 Spot check of 194 examples: 192/194 correct
:param segments:
:return:
"""
assert len(segments) > 0
if len(segments) == 1:
return segments[0:1]
# select top 2 candidates
a = segments[0]
b = segments[1]
c = [] if len(segments) > 2 else segments[2:]
# segments.append([np.sum(w), s_id, num_pixels, get_centroid(x, y, weights=w)])
a_x, a_y = a[-1]
b_x, b_y = b[-1]
a_w = a[0]
b_w = b[0]
# when there is a large disparity between weighted areas, use the largest area
if b_w < 0.50 * a_w:
return segments
# check spatial position of 1st ranked segment vs. 2nd ranked
if (a_x >= b_x and a_y <= b_y) or (a_x <= b_x and a_y <= b_y):
target = [b, a] + c
else:
target = segments
return target
def get_segmentation_masks(labeled, segments):
"""
n x height x width
1...n segmentation masks
Each layer is a single region, ranked by liklihood of being the atrial valve
Last layer is the inverse mask (i.e., all non-valve areas)
:param X:
:return:
"""
masks = []
for seg in segments:
_, seg_id, _, _ = seg
mask = np.copy(labeled)
mask[mask != seg_id] = 0
mask[mask == seg_id] = 1
masks.append(mask)
mask = np.copy(labeled)
mask[mask == 0] = 100
mask[mask != 100] = 0
mask[mask == 100] = 1
masks.append(mask)
return np.array(masks, dtype=np.float32)
def get_segmentation_masks_v2(labeled, segments):
"""
Array of masks, each with a unique int id, 1...n
Each "layer" is a single region, ranked by liklihood of being the atrial valve 1..n
0 is the inverse mask (i.e., all non-valve areas)
:param X:
:return:
"""
mask = np.zeros(labeled.shape)
for i,seg in enumerate(segments):
_, seg_id, _, _ = seg
mask = np.copy(labeled)
mask[np.where(labeled == seg_id)] = i+1
return mask
def crop(img, bbox):
"""
Crop image. Accepts frame data (frames X height X width) or a single 2D image
:param x:
:param bbox:
:return:
"""
assert len(img.shape) >= 2
if len(img.shape) == 3:
return img[...,bbox[0]:bbox[1],bbox[2]:bbox[3]]
else:
return img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
def get_crop_region(x, y, dim=48):
"""
Get bounding box centered on the centroid of the point set x,y.
:param max_dim:
:return:
"""
width = max(x) - min(x)
height = max(y) - min(y)
x_pad = (dim - width) / 2
y_pad = (dim - height) / 2
# add pixels as needed
x_slack = 0
y_slack = 0
if (2 * x_pad) + width != dim:
x_slack = dim - ((2 * x_pad) + width)
if (2 * y_pad) + height != dim:
y_slack = dim - ((2 * y_pad) + height)
return [min(x) - x_pad - x_slack, max(x) + x_pad, min(y) - y_pad - y_slack, max(y) + y_pad]
def localize_aortic_valve(img, pooling="std", outfpath=None, debug=False):
"""
Use a set of heuristics to find the region of the aortic valve.
:return:
"""
# compute pooled pixel intensities
X = np.std(img, axis=0) if pooling == "std" else np.max(img, axis=0)
labeled = segment(X, upscale=1.0, denoise=False)
# rank segment candidates (most likely atrial valve)
segments = score_segmentations(X, labeled)
masks = get_segmentation_masks(labeled, segments)
# debug: save segmentations as a PNG
if debug:
target = segments[0]
cx, cy = target[-1]
plt.figure(figsize=(6, 6))
plt.imshow(labeled, cmap='tab10')
plt.scatter(x=cx, y=cy, c='r', s=20)
plt.savefig(outfpath)
plt.close()
return masks
def segment(X, upscale=1.0, denoise=False):
"""
:param X:
:param upscale:
:param denoise:
:return:
"""
if upscale > 1.0:
X = rescale(X, upscale)
if denoise:
X = denoise_wavelet(X)
thresh = filters.threshold_otsu(X)
bw = closing(X > thresh, square(3))
cleared = clear_border(bw)
cleared = rescale(cleared, 1.0 / upscale)
return label(cleared)
def export_segment(pid, fpath, fpath2, fpath3, outfpath, outfpath2, outfpath3,
dim, pooling="none", mask_type="none", fmt="npy", debug=True):
"""
Given an MRI numpy image of dim: frames X height X width,
generate a segmentation mask for valve candidates.
Segmentation code based on sample from
http://douglasduhaime.com/posts/simple-image-segmentation-with-scikit-image.html
:param fpath:
:param outfpath:
:param dim: crop dimensions
:param fmt: (frames|max_pool|std_pool|video) image format options
:param mask_type: (None|hard|soft) DEFAULT: None
:param debug:
:return:
"""
# 1: LOAD/PREPROCESS IMAGE
img = np.load(fpath)
if len(img.shape) != 3:
raise ValueError('DICOM / numpy array is empty')
# compute pixel intensity SD percentiles
X = np.std(img, axis=0)
# 2: SEGMENTATION
labeled = segment(X, upscale=1.0, denoise=False)
# rank segment candidates (most likely atrial valve)
segments = score_segmentations(X, labeled)
target = segments[0]
cx, cy = target[-1]
# debug: save segmentations as a PNG
if debug:
plt.figure(figsize=(6, 6))
plt.imshow(labeled, cmap='tab10')
plt.scatter(x=cx, y=cy, c='r', s=20)
plt.savefig(outfpath)
plt.close()
# save all valve masks (index 0 is the most likely atrial valve)
masks = get_segmentation_masks(labeled, segments)
# debug: dump each image mask as a PNG
if debug:
for m in range(masks.shape[0]):
plt.figure(figsize=(6, 6))
plt.imshow(masks[m], cmap='tab10')
plt.savefig(outfpath + "_{}".format(m))
plt.close()
# get segment mask points, compute bounding box, and crop original image
px, py = np.where(masks[0] == 1)
print("Patient X :", px)
print("Patient Y :", py)
bbox = get_crop_region(px, py, dim)
print("Bbox :", bbox)
print("X Center :", (bbox[1] + bbox[0])/2)
print("Y Center :", (bbox[3] + bbox[2])/2)
c_img = crop(img, bbox)
# Load Other Series Images and crop based on bbox
img2 = np.load(fpath2)
img3 = np.load(fpath3)
c_img2 = crop(img2, bbox)
c_img3 = crop(img3, bbox)
# mask data: by default, don't mask anything
mask = np.ones((bbox[1] - bbox[0], bbox[3] - bbox[2]), dtype=np.float32)
if mask_type in ["soft", "hard"]:
msk = np.copy(masks[0])
exp_msk = dilation(msk)
exp_msk = crop(exp_msk, bbox)
mask = filters.gaussian(exp_msk, sigma=1.01) if mask_type == "soft" else exp_msk
# 3: EXPORT IMAGE DATA
#img_path = "{}_{}x{}".format(outfpath, dim, dim)
img_path = "{}".format(outfpath)
img_path = "{}_{}pool".format(img_path, pooling) if pooling != "none" else img_path
img_path = "{}_{}".format(img_path, mask_type) if mask_type != "none" else img_path
img_path2 = "{}".format(outfpath2)
img_path2 = "{}_{}pool".format(img_path2, pooling) if pooling != "none" else img_path2
img_path2 = "{}_{}".format(img_path2, mask_type) if mask_type != "none" else img_path2
img_path3 = "{}".format(outfpath3)
img_path3 = "{}_{}pool".format(img_path3, pooling) if pooling != "none" else img_path3
img_path3 = "{}_{}".format(img_path3, mask_type) if mask_type != "none" else img_path3
# pool data
if pooling in ["max", "std", "z_add"]:
if pooling == "max":
c_img = | np.max(c_img, axis=0) | numpy.max |
#!/usr/bin/env python
""" Aggregates features per partner, for further retrieval from HDFS. """
import argparse
import datetime
import glob
import os
import sys
import shutil
import time
import numpy as np
import multiprocessing
from shutil import copyfileobj
from pid import PidFile
#import bz2
import gzip
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
__author__ = "<NAME>"
__copyright__ = ""
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
def _log(msg):
dt_string = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
filename = os.path.basename(__file__)
msg_string = '%s\t[%s]\t%s' % (dt_string, filename, msg)
print(msg_string)
with open('/home/ubuntu/image-search/_%s.log' % filename, 'a') as fp:
fp.write('%s\n' % msg_string)
fp.close()
# log error messages (but ignore HDFS warnings)
if msg.find('ERROR') is not -1 and msg.find('WARN retry.RetryInvocationHandler') == -1:
with open('/home/ubuntu/image-search/_%s.error.log' % os.path.basename(__file__), 'a') as fp:
fp.write('%s\n' % msg_string)
fp.close()
def _take_lock():
filename = '/home/ubuntu/image-search/.akela.lock'
assert (not os.path.isfile(filename))
with open(filename, 'w') as fp:
fp.write('0')
def _is_locked():
filename = '/home/ubuntu/image-search/.akela.lock'
return os.path.isfile(filename)
def _release_lock():
filename = '/home/ubuntu/image-search/.akela.lock'
assert (os.path.isfile(filename))
os.remove(filename)
def compress_files(files):
for c in files:
a = c[0]
b = c[1]
_log('compressing file %s --> %s...' % (a, b))
with open(a, 'r') as input:
with gzip.open(b, 'wb') as output:
output.write(input.read())
# remove uncompressed file
os.remove(a)
def list_partners(files):
partner_ids = []
for file in files:
_log('processing file %s' % file)
with open(file, 'r') as fp:
for line in fp.readlines():
d = line.strip().split(' ')
partner_id = int(d[1])
partner_ids.append(partner_id)
return list(set(partner_ids))
# to_temp is used to dump output in a directory that is not watched by the gateway
#
def split_per_partners(dt_string, partner_ids, feature_filenames, do_stats, to_temp):
if to_temp:
success_file = '/opt/tmp.output-per-partner/.success'
else:
success_file = '/opt/output-per-partner/.success'
# remove success file
if os.path.isfile(success_file):
os.remove(success_file)
# get timestamp
# dt_string = '%d' % int(time.time())
if to_temp:
output_dir = '/opt/tmp.output-per-partner/%s' % dt_string
else:
output_dir = '/opt/output-per-partner/%s' % dt_string
# remove all output files
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
# open files
fps = {}
for partner_id in partner_ids:
filename = '%s/cnn_features_partner_%d.txt' % (output_dir, partner_id)
fps[partner_id] = open(filename, 'w')
_log('opened file for partner id %d' % partner_id)
if do_stats:
# average non-zero values
nbins = 1000
nonzero_values = []
feat_hist = np.zeros(4096) # histogram of non-zero indices
feat_val_hist = np.zeros(nbins) # histogram of vector values
min_val = 1E8
max_val = -1E8
# for debugging - set to -1 to disable
max_files_to_process = -1
n_greater_int16 = 0
n_greater_int16_count = 0
# splitting data
for (file, filecount) in zip(feature_filenames, range(len(feature_filenames))):
_log('processing file %s (%d out of %d)' %
(file, filecount+1, len(feature_filenames)))
with open(file, 'r') as op:
for line in op.readlines():
d = line.strip().split(' ')
# the first integer is used by bagheera and can be discarded
partner_id = int(d[1])
internal_id = int(d[2])
if partner_id not in fps:
_log('***ERROR*** partnerid %d not listed.' % partner_id)
assert (partner_id in fps)
# fps[partner_id].write(line)
vals = [float(y) for y in d[3:]]
feat = np.array(vals)
if np.any(np.isnan(feat)):
_log('*** ERROR *** nan values encountered in file %s' % file)
continue
nzf = np.where(feat > .00000001)[0]
if nzf.size > 0:
n_greater_int16 += 1.0 * \
np.sum(feat[nzf] > 65536.0/1000000) / nzf.size
n_greater_int16_count += 1
# write data to file (if not doing stats)
if not do_stats:
fps[partner_id].write('%d %d ' % (partner_id, internal_id))
for k in vals:
fps[partner_id].write('%d ' % int(round(k*1000000)))
fps[partner_id].write('\n')
if do_stats:
# feature vector
nonzero_values.append(100.0*nzf.size/feat.size)
feat_hist[nzf] += 1
for k in np.around(feat[nzf]*nbins).astype(int):
feat_val_hist[k] += 1
#feat_val_hist[np.around(feat[nzf]*nbins).astype(int)] += 1
min_val = min([min_val, np.amin(feat[nzf])])
max_val = max([max_val, | np.amax(feat[nzf]) | numpy.amax |
from lib.exporter.csv import CSVExporter as csvex
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from scipy import stats
from scipy.optimize import curve_fit
import argparse
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import statistics
import sys
# Example:
# PYTHONPATH=../path/to/lib/ python data_analysis.py *tsv
def data_summary_is_ok(data, pointings=None, time_slots=None, different_seeds=None):
if len(data) != pointings * time_slots:
logging.warning("Data summary length is {} and should be {} (pointings x time_slots)".format(len(data), pointings*time_slots))
return False
# check array data
for k in data:
for sk in data[k]:
if type(data[k][sk]) != type([]):
continue
if len(data[k][sk]) == different_seeds:
continue
logging.warning("not enough data for '{}'".format(k))
logging.warning(" key '{}' has {} values and should be {}".format(sk, len(data[k][sk]), different_seeds))
return False
return True
def data_summary(all_data_info):
o = {}
for i in all_data_info:
key = i['name'] +'_'+ str(i['tmax'])
if key not in o:
o[key] = {
'name': i['name'],
'tmax': int(i['tmax']),
'ra': float(i['ra']) if 'ra' in i else None,
'dec': float(i['dec']) if 'dec' in i else None,
'data': {
'seed': [],
'ts': [],
'index_value': [],
'index_error': [],
'prefactor_value': [],
'prefactor_error': [],
'pivot_value': [],
'pivot_error': [],
'flux': [],
'eflux': [],
'N_on': [],
'N_off': [],
'N_exc': [],
'alpha': [],
'li_ma': [],
}
}
o[key]['data']['seed'].append(int(i['seed']))
o[key]['data']['ts'].append(float(i['ts']))
o[key]['data']['flux'].append(float(i['flux']))
o[key]['data']['eflux'].append(float(i['eflux']))
o[key]['data']['N_on'].append(float(i['on_count']))
o[key]['data']['N_off'].append(float(i['off_count']))
o[key]['data']['N_exc'].append(float(i['excess_count']))
o[key]['data']['alpha'].append(float(i['alpha']))
o[key]['data']['li_ma'].append(float(i['li_ma']) if i['li_ma'] != '' else 0)
if float(i["ts"]) < 0:
logging.warning("{0:15s} ({1:.0f} on, {2:2.0f} off, {3:3d} seed, {4:4d} tmax): Negative ts {5:.2f}".format(i["name"], float(i["on_count"]), float(i["off_count"]), int(i["seed"]), int(i["tmax"]), float(i["ts"])))
elif i["li_ma"] is None:
logging.warning("{0:15s} ({1:.0f} on, {2:2.0f} off, {3:3d} seed, {4:4d} tmax): Cannot calculate Li&Ma".format(i["name"], float(i["on_count"]), float(i["off_count"]), int(i["seed"]), int(i["tmax"])))
return o
# WARNING: this function augment the input data struct
def data_augmentation(data, bins_number=50):
fields = [
{ 'name': 'ts', 'dyn_bins': False },
{ 'name': 'flux', 'dyn_bins': False },
{ 'name': 'eflux', 'dyn_bins': False },
]
for data_name, d in data.items():
logging.warning(data_name)
if 'hist' not in d:
d['hist'] = {}
if 'stats' not in d:
d['stats'] = {}
for f in fields:
f_name = f['name']
data_arr_ref = d['data'][f_name]
n_bins = dynamic_bin_number(data_arr_ref) if f['dyn_bins'] else bins_number
# counts histogram
counts_hist, bins_edges, bin_index_not_used = stats.binned_statistic(data_arr_ref, data_arr_ref, statistic='count', bins=n_bins)
bins_width = np.array(np.diff(bins_edges), float)
bins_centres = (bins_edges[:-1] + bins_edges[1:])/2
# counts_hist_normalized = counts_hist / bins_width / np.sum(counts_hist)
data_stats = array_stats(data_arr_ref)
d['stats'][f_name] = data_stats
starting_parameters = [1., data_stats['mean'], data_stats['stdev']] # A, mu, sigma
fit_coeff, pvalue_err = fitting_data(gauss, initial_params=starting_parameters, x=bins_centres, y=counts_hist, verbosity=False, name=data_name)
d['hist'][f_name] = {
'n_bins': n_bins,
'counts': counts_hist,
'bins_edges': bins_edges,
'bins_centres': bins_centres,
'bins_width': bins_width,
'fit_coeff': fit_coeff,
'pvalue_err': pvalue_err,
# 'counts_norm': counts_hist_normalized,
# 'fit_coeff_norm': fit_coeff_norm,
# 'pvalue_err_norm': pvalue_err_norm,
}
return data
def array_stats(arr):
stat = {
"n": len(arr),
"mean": statistics.mean(arr),
"stdev": statistics.pstdev(arr),
"median": statistics.median(arr),
}
return stat
def print_data_summary(data):
fields = [
# h_format, v_format, title, sub_t
[ "%15s", "%15s", "fs ref", "==========", ],
[ "%10s", "%10s", "RA", "==", ],
[ "%10s", "%10s", "Dec", "===", ],
[ "%6s", "%6d", "tmax", "====", ],
[ "%6s", "%6d", "seeds", "=====", ],
[ "%16s", "%9.2f±%6.2f", "TS", "==", ],
[ "%18s", "%9.2e±%8.2e", "flux [ph/cm²/s]", "===============", ],
[ "%18s", "%9.2e±%8.2e", "eflux [erg/cm²/s]", "===============", ],
[ '%26s', '%10.2f %7.2f %6.2f', 'TS fitting (A, μ, σ)', '=======', ],
[ '%23s', '%10.2f %5.2f %5.2f', 'TS pvalue (A, μ, σ)', '=======', ],
]
header_fmt = " ".join([r[0] for r in fields]) # headers format
values_fmt = " ".join([r[1] for r in fields]) # values format
print(header_fmt % tuple([r[2] for r in fields])) # titles
print(header_fmt % tuple([r[3] for r in fields])) # sub_titles separator
for d in sorted(data, key=lambda i: (-1*i["tmax"], i["ra"], i["dec"])):
n_seeds = len(d['data']['seed'])
ts_m = array_stats(d['data']['ts'])
flux_m = array_stats(d['data']['flux'])
eflux_m = array_stats(d['data']['eflux'])
# print(d)
print(values_fmt % (d["name"], d["ra"], d["dec"], d["tmax"], n_seeds,
ts_m["mean"], ts_m["stdev"],
flux_m["mean"], flux_m["stdev"],
eflux_m["mean"], eflux_m["stdev"],
d['hist']['ts']['fit_coeff'][0],
d['hist']['ts']['fit_coeff'][1],
abs(d['hist']['ts']['fit_coeff'][2]),
d['hist']['ts']['pvalue_err'][0],
d['hist']['ts']['pvalue_err'][1],
d['hist']['ts']['pvalue_err'][2] ))
def fitting_data(curve_fn, initial_params=[], x=[], y=[], verbosity=False, name=None):
res = curve_fit(curve_fn, x, y, p0=initial_params, full_output=verbosity)
coeff, var_matrix = res[:2]
if (len(res) > 2):
infodict, errmsg, ier = res[2:]
print("infodict: {}\nerrmsg: {}\nier: {}".format(infodict, errmsg, ier))
perr = np.sqrt(np.diag(var_matrix))
print("Curve fit params: {}".format(name))
print("{0:>10s} {1:9s} {2:9s}".format("param no.", "value", "error"))
for i, c in enumerate(coeff):
print("{0:10d} {1:+8.6e} {2:+8.6e}".format(i, c, perr[i]))
return coeff, perr
def gauss(x, *params):
A, mu, sigma = params
exp_num = -1 * (x-mu)**2
exp_den = 2. * sigma**2
return A * | np.exp(exp_num / exp_den) | numpy.exp |
import numpy as np
class Dense():
def __init__(self, no_examples, no_units, prev, activation):
self.no_examples = no_examples
self.no_units = no_units
self.neurons = np.random.randn(no_examples, self.no_units)
self.weights = np.random.randn(prev, self.no_units)*0.01
self.bias = np.zeros((1, self.no_units))*0.01
self.activation = activation
self.dweights = np.zeros(self.weights.shape)
self.dbias = np.zeros(self.bias.shape)
self.error = None
def activate(self):
if self.activation == 'sigmoid':
self.neurons = 1.0 / (1.0 + np.exp(-self.neurons))
elif self.activation == 'tanh':
self.neurons = np.tanh(self.neurons)
elif self.activation == 'relu':
self.neurons = np.maximum(0, self.neurons)
else:
pass
def deractivation(self):
if self.activation == 'sigmoid':
self.neurons = self.neurons*(1 - self.neurons)
elif self.activation == 'tanh':
self.neurons = 1 - np.square(self.neurons)
elif self.activation == 'relu':
self.neurons[self.neurons <= 0] = 0
self.neurons[self.neurons > 0] = 1
else:
pass
class Model():
def __init__(self, layers, xdata, ydata):
model = []
self.cost = None
self.xdata = xdata
self.ydata = ydata
for i,layer in enumerate(layers):
if i != 0:
l = Dense(model[-1].no_examples, layer['units'], model[-1].no_units, layer['activation'])
else:
l = Dense(self.xdata.shape[0], layer['units'], self.xdata.shape[1], layer['activation'])
model.append(l)
self.model = model
def forward(self, x_data):
for i,layer in enumerate(self.model):
if i == 0:
layer.neurons = np.dot(x_data, layer.weights) + layer.bias
else:
layer.neurons = np.dot(self.model[i-1].neurons, layer.weights) + layer.bias
layer.activate()
cost = np.sum( | np.square(self.ydata - self.model[-1].neurons) | numpy.square |
# -*- coding: utf-8 -*-
import os
import copy
import numpy as np
import time
import unittest
import numba
import operator
# Allows relative imports when run locally as script
# https://docs.python-guide.org/writing/structure/
#if __name__ == "__main__":
# sys.path.insert(0, os.path.abspath(
# os.path.join(os.path.dirname(__file__), '..')))
#import fractalshades.numpy_utils.xrange as fsx
from fractalshades.numpy_utils.xrange import (
Xrange_array,
Xrange_polynomial,
Xrange_SA,
mpf_to_Xrange
)
import fractalshades.numpy_utils.numba_xr as numba_xr
import fractalshades.models as fsmodels
# Allows relative imports when run locally as script
# https://docs.python-guide.org/writing/structure/
#if __name__ == "__main__":
# sys.path.insert(0, os.path.abspath(
# os.path.join(os.path.dirname(__file__), '..')))
# import fractalshades.numpy_utils.numba_xr as numba_xr
# numba_xr as side effects (defines overload) make sure we only import once
#if "mumba_xr" not in sys.modules:
# import numba_xr
# pass
#else:
# print("ALREADY IMPORTED")
#print(mumba_xr)
#global imported_numba_xr
#if not "imported_numba_xr" in globals():
# import numba_xr
# imported_numba_xr = True
#else:
# print("ALREADY imported")
from_complex = {np.complex64: np.float32,
np.complex128: np.float64}
crossed_dtypes = [
(np.float64, np.float64),
(np.complex128, np.complex128),
(np.float64, np.complex128),
(np.complex128, np.float64)
]
# testing binary operation of reals extended arrays
def generate_random_xr(dtype, nvec=500, max_bin_exp=200, seed=100):
"""
Generates a random Xrange array
dtype: mantissa dtype
nvec: number of pts
max_bin_exp max of base 2 exponent abs
seed : random seed
Return
Xrange array, standard array
"""
rg = np.random.default_rng(seed)
if dtype in from_complex.keys():
r_dtype = from_complex[dtype]
mantissa = ((rg.random([nvec], dtype=r_dtype) * 2. - 1.)
+ 1j * (2. * rg.random([nvec], dtype=r_dtype) - 1.))
else:
mantissa = rg.random([nvec], dtype=dtype) * 2. - 1.
exp = rg.integers(low=-max_bin_exp, high=max_bin_exp, size=[nvec])
xr = Xrange_array(mantissa, exp=exp)
std = mantissa.copy() * (2. ** exp)
return xr, std
def _matching(res, expected, almost=False, dtype=None, cmp_op=False, ktol=1.5):
if not cmp_op:
res = res.to_standard()
if almost:
np.testing.assert_allclose(res, expected,
rtol= ktol * np.finfo(dtype).eps)
else:
np.testing.assert_array_equal(res, expected)
@numba.njit
def numba_test_setitem(arr, idx, val_tuple):
arr[idx] = numba_xr.Xrange_scalar(*val_tuple)
@numba.njit
def numba_test_add(a, b, out):
n, = a.shape
for i in range(n):
out[i] = a[i] + b[i]
@numba.njit
def numba_test_iadd(a, b):
n, = a.shape
for i in range(n):
a[i] += b[i]
return a
@numba.njit
def numba_test_sub(a, b, out):
n, = a.shape
for i in range(n):
out[i] = a[i] - b[i]
@numba.njit
def numba_test_mul(a, b, out):
n, = a.shape
for i in range(n):
out[i] = a[i] * b[i]
@numba.njit#(parallel=True)
def numba_test_div(a, b, out):
n, = a.shape
for i in range(n):
out[i] = a[i] / b[i]
@numba.njit
def numba_test_ldexp(m, exp, out):
n, = m.shape
for i in range(n):
out[i] = numba_xr._exp2_shift(m[i], exp[i])
@numba.njit
def numba_test_frexp(m, out_m, out_exp):
n, = m.shape
for i in range(n):
out_m[i], out_exp[i] = numba_xr._frexp(m[i])
@numba.njit
def numba_test_normalize(m, exp, out_m, out_exp):
n, = m.shape
for i in range(n):
out_m[i], out_exp[i] = numba_xr._normalize(m[i], exp[i])
@numba.njit
def numba_test_sqrt(xa, out):
n, = xa.shape
for i in range(n):
out[i] = np.sqrt(xa[i])
@numba.njit
def numba_test_abs(xa, out):
n, = xa.shape
for i in range(n):
out[i] = np.abs(xa[i])
@numba.njit
def numba_test_abs2(xa, out):
n, = xa.shape
for i in range(n):
out[i] = numba_xr.extended_abs2(xa[i])
class Test_numba_xr(unittest.TestCase):
def test_setitem(self):
for dtype in [np.float64, np.complex128]:
with self.subTest(dtype=dtype):
nvec = 500
xr, std = generate_random_xr(dtype, nvec=nvec)
xr2 = Xrange_array.zeros(xr.shape, dtype)
for i in range(nvec):
val_tuple = (xr._mantissa[i], xr._exp[i])
numba_test_setitem(xr2, i, val_tuple)
_matching(xr2, std)
def test_ldexp(self):
dtype = np.float64
nvec = 5000
xr, std = generate_random_xr(dtype, nvec=nvec, max_bin_exp=200)
exp = np.asarray(xr["exp"])
out = np.empty(std.shape, dtype)
numba_test_ldexp(std, exp, out)
np.testing.assert_array_equal(out, np.ldexp(std, exp))
numba_test_ldexp(std, -exp, out)
np.testing.assert_array_equal(out, np.ldexp(std, -exp))
def test_frexp(self):
dtype = np.float64
nvec = 5000
xr, std = generate_random_xr(dtype, nvec=nvec, max_bin_exp=200)
outm = np.empty(std.shape, dtype)
outexp = np.empty(std.shape, np.int32)
numba_test_frexp(std, outm, outexp)
np.testing.assert_array_equal(std, outm * (2. ** outexp))
def test_normalize(self):
for dtype in (np.float64, np.complex128): #, np.complex128]: # np.complex64 np.float32
with self.subTest(dtype=dtype):
nvec = 5000
xr, std = generate_random_xr(dtype, nvec=nvec, max_bin_exp=200)
# exp = np.asarray(xr["exp"])
outm = np.empty(xr.shape, dtype)
outexp = np.empty(xr.shape, np.int32)
numba_test_normalize(xr["mantissa"], xr["exp"], outm, outexp)
np.testing.assert_array_equal(std, outm * (2. ** outexp))
# print("outm", outm)
def test_add(self):
for (dtypea, dtypeb) in crossed_dtypes:# [(np.float64, np.float64), np.complex128]: #, np.complex128]: # np.complex64 np.float32
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 10000
xa, stda = generate_random_xr(dtypea, nvec=nvec)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=800)
res = Xrange_array.empty(xa.shape,
dtype=np.result_type(dtypea, dtypeb))
expected = stda + stdb
print("res", res.dtype, type(res))
numba_test_add(xa, xb, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_add(xa, xb, res)
t_numba += time.time()
# numpy timing
t_np = - time.time()
res_np = xa + xb
t_np += time.time()
_matching(res, expected)
_matching(res_np, expected)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
# Test add a scalar
numba_test_add(xa, stdb, res)
_matching(res, expected)
numba_test_add(stda, xb, res)
_matching(res, expected)
# def test_iadd(self):
# for (dtypea, dtypeb) in [(np.float64, np.float64)]:# crossed_dtypes:# [(np.float64, np.float64), np.complex128]: #, np.complex128]: # np.complex64 np.float32
# with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
# nvec = 10000
# xa, stda = generate_random_xr(dtypea, nvec=nvec)
# xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=800)
# res = Xrange_array.empty(xa.shape,
# dtype=np.result_type(dtypea, dtypeb))
# expected = stda + stdb
#
# print("res", res.dtype, type(res))
# # Numba timing without compilation
# numba_test_add(xa, xb, res)
# t_numba = - time.time()
# numba_test_add(xa, xb, res)
# t_numba += time.time()
# # Numba iadd timing without compilation
# numba_test_iadd(xa, xb)
# t_numba_iadd = - time.time()
# numba_test_iadd(xa, xb)
# t_numba_iadd += time.time()
# res_iadd = xa.view(Xrange_array)
#
# _matching(res, expected)
# _matching(res_iadd, expected)
#
# print("t_numba", t_numba)
# print("t_numba iadd", t_numba_iadd)
# expr = (t_numba_iadd < t_numba)
# self.assertTrue(expr, msg="iadd longer than regular add")
# # Test add a scalar
# numba_test_add(xa, stdb, res)
# _matching(res, expected)
# numba_test_add(stda, xb, res)
# _matching(res, expected)
def test_sub(self):
for (dtypea, dtypeb) in crossed_dtypes: #, np.complex128]: # np.complex64 np.float32
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 10000
xa, stda = generate_random_xr(dtypea, nvec=nvec)# , max_bin_exp=250)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=800)
res = Xrange_array.empty(xa.shape,
dtype=np.result_type(dtypea, dtypeb))
expected = stda - stdb
numba_test_sub(xa, xb, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_sub(xa, xb, res)
t_numba += time.time()
# numpy timing
t_np = - time.time()
res_np = xa - xb
t_np += time.time()
_matching(res, expected)
_matching(res_np, expected)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
# Test substract a scalar
numba_test_sub(xa, stdb, res)
_matching(res, expected)
numba_test_sub(stda, xb, res)
_matching(res, expected)
def test_mul(self):
for (dtypea, dtypeb) in crossed_dtypes: #, np.complex128]: # np.complex64 np.float32
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 100000
xa, stda = generate_random_xr(dtypea, nvec=nvec,
max_bin_exp=75)
# Adjust the mantissa to be sure to trigger a renorm for some
# (around 30 %) cases
xa = np.asarray(xa)
exp = np.copy(xa["exp"])
xa["mantissa"] *= 2.**(2 * exp)
xa["exp"] = -exp
xa = xa.view(Xrange_array)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=7800)
res = Xrange_array.empty(xa.shape,
dtype=np.result_type(dtypea, dtypeb))
expected = stda * stdb
numba_test_mul(xa, xb, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_mul(xa, xb, res)
t_numba += time.time()
# numpy timing
t_np = - time.time()
res_np = xa * xb
t_np += time.time()
_matching(res, expected)
_matching(res_np, expected)
print("t_numba, numpy", t_numba, t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
# Test multiply by a scalar
numba_test_mul(xa, stdb, res)
_matching(res, expected)
numba_test_mul(stda, xb, res)
_matching(res, expected)
def test_div(self):
for (dtypea, dtypeb) in crossed_dtypes: #, np.complex128]: # np.complex64 np.float32
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 100000
xa, stda = generate_random_xr(dtypea, nvec=nvec,
max_bin_exp=75)
# Adjust the mantissa to be sure to trigger a renorm for some
# (around 30 %) cases
xa = np.asarray(xa)
exp = np.copy(xa["exp"])
xa["mantissa"] *= 2.**(2 * exp)
xa["exp"] = -exp
xa = xa.view(Xrange_array)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=7800)
res = Xrange_array.empty(xa.shape,
dtype=np.result_type(dtypea, dtypeb))
expected = stda / stdb
numba_test_div(xa, xb, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_div(xa, xb, res)
t_numba += time.time()
# numpy timing
t_np = - time.time()
res_np = xa / xb
t_np += time.time()
_matching(res, expected, almost=True, dtype=np.float64,
ktol=2.)
_matching(res_np, expected, almost=True, dtype=np.float64,
ktol=2.)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
# Test divide by a scalar
numba_test_div(xa, stdb, res)
_matching(res, expected, almost=True, dtype=np.float64,
ktol=2.)
numba_test_div(stda, xb, res)
_matching(res, expected, almost=True, dtype=np.float64,
ktol=2.)
def test_compare(self):
for (dtypea, dtypeb) in crossed_dtypes: #, np.complex128]: # np.complex64 np.float32
for compare_operator in (
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.ge,
operator.gt
):
with self.subTest(dtypea=dtypea, dtypeb=dtypeb,
operator=compare_operator):
# Only equality test with complex...
if not(compare_operator in (operator.ne, operator.eq)):
if ((dtypea == np.complex128)
or (dtypeb == np.complex128)):
continue
print(compare_operator, dtypea, dtypeb)
nvec = 10000
xa, stda = generate_random_xr(
dtypea, nvec=nvec, max_bin_exp=75)
xb, stdb = generate_random_xr(
dtypeb, nvec=nvec, max_bin_exp=75)
# Modify to allow precise
if (dtypea == np.complex128) and (dtypeb == np.float64):
stdb[:3000] = stda[:3000].real
xb[:3000] = xa[:3000].real
else:
stdb[:3000] = stda[:3000]
xb[:3000] = xa[:3000]
xb[1000:2000] *= (1. + np.finfo(dtypeb).eps)
xb[2000:3000] *= (1. - np.finfo(dtypeb).eps)
stdb[1000:2000] *= (1. + np.finfo(dtypeb).eps)
stdb[2000:3000] *= (1. - np.finfo(dtypeb).eps)
expected = compare_operator(stda, stdb)
res = np.empty_like(expected)
t_np = - time.time()
res_np = compare_operator(xa, xb)
t_np += time.time()
@numba.njit
def numba_cmp(xa, xb, out):
n, = xa.shape
for i in range(n):
out[i] = compare_operator(xa[i], xb[i])
numba_cmp
numba_cmp(xa, xb, res)
t_numba = - time.time()
numba_cmp(xa, xb, res)
t_numba += time.time()
np.testing.assert_array_equal(res_np, expected)
np.testing.assert_array_equal(res, expected)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
# Test compare with a scalar
numba_cmp(xa, stdb, res)
np.testing.assert_array_equal(res, expected)
numba_cmp(stda, xb, res)
np.testing.assert_array_equal(res, expected)
def test_sqrt(self):
for dtype in (np.float64, np.complex128): # np.complex64 np.float32
with self.subTest(dtype=dtype):
nvec = 10000
xa, stda = generate_random_xr(dtype, nvec=nvec, max_bin_exp=75)
# sqrt not defined for negative reals
if dtype == np.float64:
xa = np.abs(xa)
stda = np.abs(stda)
# Adjust the mantissa to be sure to trigger a renorm for some
# (around 30 %) cases
xa = np.asarray(xa)
exp = np.copy(xa["exp"])
xa["mantissa"] *= 2.**(2 * exp)
xa["exp"] = -exp
xa = xa.view(Xrange_array)
res = Xrange_array.empty(xa.shape, dtype=dtype)
expected = np.sqrt(stda)
numba_test_sqrt(xa, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_sqrt(xa, res)
t_numba += time.time()
# numpy timing
t_np = - time.time()
res_np = np.sqrt(xa)
t_np += time.time()
_matching(res, expected, almost=True, dtype=np.float64,
ktol=2.)
_matching(res_np, expected, almost=True, dtype=np.float64,
ktol=2.)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
def test_abs(self):
for dtype in (np.float64, np.complex128): # np.complex64 np.float32
with self.subTest(dtype=dtype):
nvec = 10000
xa, stda = generate_random_xr(dtype, nvec=nvec, max_bin_exp=75)
# Adjust the mantissa to be sure to trigger a renorm for some
# (around 30 %) cases
xa = np.asarray(xa)
exp = np.copy(xa["exp"])
xa["mantissa"] *= 2.**(2 * exp)
xa["exp"] = -exp
xa = xa.view(Xrange_array)
res = Xrange_array.empty(xa.shape, dtype=dtype)
expected = np.abs(stda)
numba_test_abs(xa, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_abs(xa, res)
t_numba += time.time()
# numpy timing
t_np = - time.time()
res_np = np.abs(xa)
t_np += time.time()
_matching(res, expected, almost=True, dtype=np.float64,
ktol=4.)
_matching(res_np, expected, almost=True, dtype=np.float64,
ktol=4.)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
def test_abs2(self):
for dtype in (np.float64, np.complex128): # np.complex64 np.float32
with self.subTest(dtype=dtype):
nvec = 10000
xa, stda = generate_random_xr(dtype, nvec=nvec, max_bin_exp=75)
# Adjust the mantissa to be sure to trigger a renorm for some
# (around 30 %) cases
xa = np.asarray(xa)
exp = np.copy(xa["exp"])
xa["mantissa"] *= 2.**(2 * exp)
xa["exp"] = -exp
xa = xa.view(Xrange_array)
res = Xrange_array.empty(xa.shape, dtype=dtype)
expected = np.abs(stda) ** 2
numba_test_abs(xa, res)
# Numba timing without compilation
t_numba = - time.time()
numba_test_abs2(xa, res)
t_numba += time.time()
_matching(res, expected, almost=True, dtype=np.float64,
ktol=4.)
def test_expr(self):
for (dtypea, dtypeb) in crossed_dtypes: #, np.complex128]: # np.complex64 np.float32
dtype_res = np.result_type(dtypea, dtypeb)
nvec = 10000
xa, stda = generate_random_xr(dtypea, nvec=nvec)# , max_bin_exp=250)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=800)
res = Xrange_array.empty(xa.shape, dtype=dtype_res)
def get_numba_expr(case):
if case == 0:
def numba_expr(xa, xb, out):
n, = xa.shape
for i in range(n):
out[i] = xa[i] * xb[i] * xa[i]
elif case == 1:
def numba_expr(xa, xb, out):
n, = xa.shape
for i in range(n):
out[i] = xa[i] * xb[i] + xa[i] - 7.8
elif case == 2:
def numba_expr(xa, xb, out):
n, = xa.shape
for i in range(n):
out[i] = (xb[i] * 2.) * (xa[i] + xa[i] * xb[i]) + (xa[i] * xb[i] - 7.8 + xb[i])
elif case == 3:
def numba_expr(xa, xb, out):
n, = xa.shape
for i in range(n):
out[i] = ((xb[i] * 2.) * (xa[i] + np.abs(xa[i] * xb[i]) + 1.)
+ (xa[i] * np.sqrt(np.abs(xb[i]) + 7.8) + xb[i]))
else:
raise ValueError(case)
return numba.njit(numba_expr)
def get_std_expr(case):
if case == 0:
def std_expr(xa, xb):
return xa * xb * xa
elif case == 1:
def std_expr(xa, xb):
return xa * xb + xa - 7.8
elif case == 2:
def std_expr(xa, xb):
return (xb * 2.) * (xa + xa * xb) + (xa * xb - 7.8 + xb)
elif case == 3:
def std_expr(xa, xb):
return ((xb * 2.) * (xa + np.abs(xa * xb) + 1.)
+ (xa * np.sqrt(np.abs(xb) + 7.8) + xb))
else:
raise ValueError(case)
return std_expr
n_case = 4
for case in range(n_case):
with self.subTest(dtypea=dtypea, dtypeb=dtypeb, expr=case):
expected = get_std_expr(case)(stda, stdb)
# numpy timing
t_np = - time.time()
res_np = get_std_expr(case)(xa, xb)
t_np += time.time()
numba_expr = get_numba_expr(case)
numba_expr(xa, xb, res)
# Numba timing without compilation
t_numba = - time.time()
numba_expr(xa, xb, res)
t_numba += time.time()
_matching(res, expected, almost=True, dtype=np.float64,
ktol=2.)
_matching(res_np, expected, almost=True, dtype=np.float64,
ktol=2.)
print("t_numba", t_numba)
print("t_numpy", t_np, t_numba/t_np)
expr = (t_numba < t_np)
self.assertTrue(expr, msg="Numba speed below numpy")
@numba.njit
def numba_test_polyneg(poly):
return -poly
@numba.njit
def numba_test_polyadd(polya, polyb):
return polya + polyb
@numba.njit
def numba_test_polyadd_0(polya, polyb):
return polya + polyb[0]
@numba.njit
def numba_test_polyadd0(polya, polyb):
return polya[0] + polyb
@numba.njit
def numba_test_polycall(poly, val):
return poly.__call__(val)
@numba.njit
def numba_test_polycall0(poly, val):
return poly.__call__(val[0])
@numba.njit
def numba_test_polymul(polya, polyb):
return polya * polyb
@numba.njit
def numba_test_polymul_0(polya, polyb):
return polya * polyb[0]
@numba.njit
def numba_test_polymul0(polya, polyb):
return polya[0] * polyb
@numba.njit
def numba_test_expr(polya, polyb):
# print()
# p = polya * polyb
# q = p + polya
# return q
return polya * polyb + polya #- polyb
class Test_poly_xr(unittest.TestCase):
def test_neg(self):
for dtype in (np.float64, np.complex128):
with self.subTest(dtype=dtype):
nvec = 100
xa, stda = generate_random_xr(dtype, nvec=nvec)# , max_bin_exp=250)
_P = Xrange_polynomial(xa, cutdeg=nvec-1)
P = np.polynomial.Polynomial(stda)
res = numba_test_polyneg(_P)
expected = -P
_matching(res.coeffs, expected.coef)
# Check that the original array has not been modified
_matching(_P.coeffs, P.coef)
def test_add(self):
for (dtypea, dtypeb) in crossed_dtypes: #((np.float64, np.float64),):#crossed_dtypes:
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 100
xa, stda = generate_random_xr(dtypea, nvec=nvec)# , max_bin_exp=250)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=510)# , max_bin_exp=250)
_Pa = Xrange_polynomial(xa, cutdeg=nvec-1)
Pa = np.polynomial.Polynomial(stda)
_Pb = Xrange_polynomial(xb, cutdeg=nvec-1)
Pb = np.polynomial.Polynomial(stdb)
res = numba_test_polyadd(_Pa, _Pb)
expected = Pa + Pb
_matching(res.coeffs, expected.coef)
# Check that the original array has not been modified
_matching(_Pa.coeffs, Pa.coef)
_matching(_Pb.coeffs, Pb.coef)
for (dtypea, dtypeb) in crossed_dtypes: #((np.float64, np.float64),):#crossed_dtypes:
with self.subTest(dtypea=dtypea, dtypeb=dtypeb, kind="scalar"):
nvec = 100
xa, stda = generate_random_xr(dtypea, nvec=nvec)# , max_bin_exp=250)
xb, stdb = generate_random_xr(dtypeb, nvec=1, seed=5)# , max_bin_exp=250)
_Pa = Xrange_polynomial(xa, cutdeg=nvec-1)
Pa = np.polynomial.Polynomial(stda)
res = numba_test_polyadd_0(_Pa, xb)
expected = Pa + stdb[0]
_matching(res.coeffs, expected.coef)
res = numba_test_polyadd0(xb, _Pa)
_matching(res.coeffs, expected.coef)
def test_op_partial(self):
a = [1., 2., 5., 8.]
_Pa = Xrange_polynomial(a, 10)
Pa = np.polynomial.Polynomial(a)
b = [1., 2.]
_Pb = Xrange_polynomial(b, 10)
Pb = np.polynomial.Polynomial(b)
with self.subTest(op="+"):
res = [numba_test_polyadd(_Pa, _Pb),
numba_test_polyadd(_Pb, _Pa),
numba_test_polyadd(_Pa, _Pa)]
expected = [Pa + Pb , Pb + Pa, Pa + Pa]
for i in range(len(res)):
_matching(res[i].coeffs, expected[i].coef)
with self.subTest(op="*"):
res = [numba_test_polymul(_Pa, _Pb),
numba_test_polymul(_Pb, _Pa),
numba_test_polymul(_Pa, _Pa)]
expected = [Pa * Pb , Pb * Pa, Pa * Pa]
for i in range(len(res)):
_matching(res[i].coeffs, expected[i].coef)
def test_call(self):
for (dtypea, dtypeb) in crossed_dtypes:
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 100
xa, stda = generate_random_xr(dtypea, nvec=10 , max_bin_exp=3)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec , max_bin_exp=5, seed=510)
_Pa = Xrange_polynomial(xa, cutdeg=nvec-1)
Pa = np.polynomial.Polynomial(stda)
# Scalar call test
res = numba_test_polycall0(_Pa, xb).view(Xrange_array)
expected = Pa(stdb[0])
_matching(res, expected)
# Array call test
res = numba_test_polycall(_Pa, xb).view(Xrange_array)
expected = Pa(stdb)
_matching(res, expected)
def test_mul(self):
for (dtypea, dtypeb) in crossed_dtypes:
with self.subTest(dtypea=dtypea, dtypeb=dtypeb):
nvec = 100
xa, stda = generate_random_xr(dtypea, nvec=nvec, seed=110,
max_bin_exp=25)
xb, stdb = generate_random_xr(dtypeb, nvec=nvec, seed=510,
max_bin_exp=25)
_Pa = Xrange_polynomial(xa, cutdeg=nvec-1)
Pa = np.polynomial.Polynomial(stda)
_Pb = Xrange_polynomial(xb, cutdeg=nvec-1)
Pb = np.polynomial.Polynomial(stdb)
res = numba_test_polymul(_Pa, _Pb)
expected = Pa * Pb
_matching(res.coeffs, expected.coef[:nvec], almost=True,
dtype=np.float64, ktol=10.)
# Check that the original array has not been modified
_matching(_Pa.coeffs, Pa.coef)
_matching(_Pb.coeffs, Pb.coef)
for (dtypea, dtypeb) in crossed_dtypes: #((np.float64, np.float64),):#crossed_dtypes:
with self.subTest(dtypea=dtypea, dtypeb=dtypeb, kind="scalar"):
nvec = 100
xa, stda = generate_random_xr(dtypea, nvec=nvec)# , max_bin_exp=250)
xb, stdb = generate_random_xr(dtypeb, nvec=1, seed=510)# , max_bin_exp=250)
_Pa = Xrange_polynomial(xa, cutdeg=nvec-1)
Pa = | np.polynomial.Polynomial(stda) | numpy.polynomial.Polynomial |
import os
import sys
import time
import json
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')
from util import get_detection_from_file,draw,nms
from keras_retinanet import models
# from tensorflow import keras
graph = tf.get_default_graph()
class detectp:
def __init__(self):
with open('settings.json') as json_data_file:
json_data = json.load(json_data_file)
self.model2_path = json_data["MODEL_101"]
self.model2 = models.load_model(self.model2_path, backbone_name='resnet101', convert=True, nms=False)
def detect(self,fpath):
im = cv2.imread(fpath)
sz = 224
# threshold for non-max-suppresion for each model
nms_threshold = 0
# shrink bounding box dimensions by this factor, improves test set performance
shrink_factor = 0.17
# threshold for judging overlap of bounding boxes between different networks (for weighted average)
wt_overlap = 0
# threshold for including boxes from model 1
score_threshold1 = 0.04
# threshold for including boxes from model 2
score_threshold2 = 0.03
# threshold for including isolated boxes from either model
solo_min = 0.15
#boxes_pred1, scores1 = util.get_detection_from_file(fpath, model1, sz)
global graph
with graph.as_default():
boxes_pred2, scores2 = get_detection_from_file(fpath, self.model2, sz)
# indices1 = np.where(scores1 > score_threshold1)[0]
# scores1 = scores1[indices1]
# boxes_pred1 = boxes_pred1[indices1]
# boxes_pred1, scores1 = util.nms(boxes_pred1, scores1, nms_threshold)
indices2 = | np.where(scores2 > score_threshold2) | numpy.where |
"""
Test DOE Driver and Generators.
"""
import unittest
import os
import os.path
import glob
import csv
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.paraboloid_distributed import DistParab
from openmdao.test_suite.groups.parallel_groups import FanInGrouped
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.general_utils import run_driver, printoptions
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class ParaboloidArray(om.ExplicitComponent):
"""
Evaluates the equation f(x,y) = (x-3)^2 + x*y + (y+4)^2 - 3.
Where x and y are xy[0] and xy[1] respectively.
"""
def setup(self):
self.add_input('xy', val=np.array([0., 0.]))
self.add_output('f_xy', val=0.0)
def compute(self, inputs, outputs):
"""
f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = inputs['xy'][0]
y = inputs['xy'][1]
outputs['f_xy'] = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
class ParaboloidDiscrete(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=10, tags='xx')
self.add_discrete_input('y', val=0, tags='yy')
self.add_discrete_output('f_xy', val=0, tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = int(f_xy)
class ParaboloidDiscreteArray(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=np.ones((2, )), tags='xx')
self.add_discrete_input('y', val=np.ones((2, )), tags='yy')
self.add_discrete_output('f_xy', val=np.ones((2, )), tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = f_xy.astype(np.int)
class TestErrors(unittest.TestCase):
def test_generator_check(self):
prob = om.Problem()
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.FullFactorialGenerator)
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: FullFactorialGenerator")
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.Problem())
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but an instance of Problem was found.")
def test_lhc_criterion(self):
with self.assertRaises(ValueError) as err:
om.LatinHypercubeGenerator(criterion='foo')
self.assertEqual(str(err.exception),
"Invalid criterion 'foo' specified for LatinHypercubeGenerator. "
"Must be one of ['center', 'c', 'maximin', 'm', 'centermaximin', "
"'cm', 'correlation', 'corr', None].")
@use_tempdirs
class TestDOEDriver(unittest.TestCase):
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_no_generator(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 0)
def test_list(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# create DOEDriver using provided list of cases
prob.driver = om.DOEDriver(cases)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_list_errors(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# data does not contain a list
cases = {'desvar': 1.0}
with self.assertRaises(RuntimeError) as err:
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
self.assertEqual(str(err.exception), "Invalid DOE case data, "
"expected a list but got a dict.")
# data contains a list of non-list
cases = [{'desvar': 1.0}]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n{'desvar': 1.0}")
# data contains a list of list, but one has the wrong length
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.y', 1., 'foo']]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n"
"[['p1.x', 1.0], ['p2.y', 1.0, 'foo']]")
# data contains a list of list, but one case has an invalid design var
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"'p2.z' is not a valid design variable:\n"
"[['p1.x', 1.0], ['p2.z', 1.0]]")
# data contains a list of list, but one case has multiple invalid design vars
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.y', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"['p1.y', 'p2.z'] are not valid design variables:\n"
"[['p1.y', 1.0], ['p2.z', 1.0]]")
def test_csv(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for (var, val) in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_csv_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', [0., 1.]))
model.add_subsystem('p2', om.IndepVarComp('y', [0., 1.]))
model.add_subsystem('comp1', Paraboloid())
model.add_subsystem('comp2', Paraboloid())
model.connect('p1.x', 'comp1.x', src_indices=[0])
model.connect('p2.y', 'comp1.y', src_indices=[0])
model.connect('p1.x', 'comp2.x', src_indices=[1])
model.connect('p2.y', 'comp2.y', src_indices=[1])
model.add_design_var('p1.x', lower=0.0, upper=1.0)
model.add_design_var('p2.y', lower=0.0, upper=1.0)
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = [
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 1.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 16)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['p1.x'][0], expected_case['p1.x'][0])
self.assertEqual(outputs['p2.y'][0], expected_case['p2.y'][0])
self.assertEqual(outputs['p1.x'][1], expected_case['p1.x'][1])
self.assertEqual(outputs['p2.y'][1], expected_case['p2.y'][1])
def test_csv_errors(self):
# test invalid file name
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator(1.23)
self.assertEqual(str(err.exception),
"'1.23' is not a valid file name.")
# test file not found
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator('nocases.csv')
self.assertEqual(str(err.exception),
"File not found: nocases.csv")
# create problem and a list of DOE cases
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# test CSV file with an invalid design var
header = [var for var, _ in cases[0]]
header[-1] = 'foobar'
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"'foobar' is not a valid design variable.")
# test CSV file with invalid design vars
header = [var + '_bad' for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"%s are not valid design variables." %
str(header))
# test CSV file with invalid values
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([np.ones((2, 2)) * val for _, val in case])
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= LooseVersion("1.14"):
opts = {'legacy': '1.13'}
else:
opts = {}
with printoptions(**opts):
# have to use regex to handle differences in numpy print formats for shape
msg = f"Error assigning p1.x = \[ 0. 0. 0. 0.\]: could not broadcast " \
f"input array from shape \(4.*\) into shape \(1.*\)"
with self.assertRaisesRegex(ValueError, msg):
prob.run_driver()
def test_uniform(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# all values should be between -10 and 10, check expected values for seed = 0
expected = [
{'x': np.array([0.97627008]), 'y': np.array([4.30378733])},
{'x': np.array([2.05526752]), 'y': np.array([0.89766366])},
{'x': np.array([-1.52690401]), 'y': np.array([2.91788226])},
{'x': np.array([-1.24825577]), 'y': np.array([7.83546002])},
{'x': np.array([9.27325521]), 'y': np.array([-2.33116962])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 5)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y'):
assert_near_equal(outputs[name], expected_case[name], 1e-4)
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_full_factorial_factoring(self):
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 16)
# Testing uniqueness. If all elements are unique, it should be the same length as the
# number of cases
self.assertEqual(len(set(objs)), 16)
def test_full_factorial_array(self):
prob = om.Problem()
model = prob.model
model.set_input_defaults('xy', np.array([0., 0.]))
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=np.array([-10., -50.]), upper=np.array([10., 50.]))
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'xy': np.array([-10., -50.])},
{'xy': np.array([0., -50.])},
{'xy': np.array([10., -50.])},
{'xy': np.array([-10., 0.])},
{'xy': np.array([0., 0.])},
{'xy': np.array([10., 0.])},
{'xy': np.array([-10., 50.])},
{'xy': np.array([0., 50.])},
{'xy': np.array([10., 50.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['xy'][0], expected_case['xy'][0])
self.assertEqual(outputs['xy'][1], expected_case['xy'][1])
def test_full_fact_dict_levels(self):
# Specifying levels only for one DV, the other is defaulted
prob = om.Problem()
model = prob.model
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
# size = prob.comm.size
# rank = prob.comm.rank
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels={"y": 3}))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 6)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_case['x'])
self.assertEqual(outputs['y'], expected_case['y'])
self.assertEqual(outputs['f_xy'], expected_case['f_xy'])
def test_generalized_subset(self):
# All DVs have the same number of levels
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels=2, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.0]), 'y': np.array([0.0]), 'f_xy': np.array([22.0])},
{'x': np.array([1.0]), 'y': np.array([1.0]), 'f_xy': np.array([27.0])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 2)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_generalized_subset_dict_levels(self):
# Number of variables specified individually for all DVs (scalars).
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 3, 'y': 6}, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.])},
{'x': np.array([0.]), 'y': np.array([0.4]), 'f_xy': np.array([25.36])},
{'x': np.array([0.]), 'y': np.array([0.8]), 'f_xy': np.array([29.04])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.])},
{'x': np.array([1.]), 'y': np.array([0.4]), 'f_xy': np.array([20.76])},
{'x': np.array([1.]), 'y': np.array([0.8]), 'f_xy': np.array([24.84])},
{'x': np.array([0.5]), 'y': np.array([0.2]), 'f_xy': np.array([20.99])},
{'x': np.array([0.5]), 'y': np.array([0.6]), 'f_xy': np.array([24.71])},
{'x': np.array([0.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertAlmostEqual(outputs[name][0], expected_case[name][0])
def test_generalized_subset_array(self):
# Number of levels specified individually for all DVs (arrays).
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 5, 'y': 8}, reduction=14))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 104) # The number can be verified with standalone pyDOE2
# Testing uniqueness. If all elements are unique, it should be the same length as the number of cases
self.assertEqual(len(set(objs)), 104)
def test_plackett_burman(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.PlackettBurmanGenerator())
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_box_behnken(self):
upper = 10.
center = 1
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp(), promotes=['*'])
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
indep.add_output('z', 0.0)
model.add_subsystem('comp', om.ExecComp('a = x**2 + y - z'), promotes=['*'])
model.add_design_var('x', lower=0., upper=upper)
model.add_design_var('y', lower=0., upper=upper)
model.add_design_var('z', lower=0., upper=upper)
model.add_objective('a')
prob.driver = om.DOEDriver(om.BoxBehnkenGenerator(center=center))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
# The Box-Behnken design for 3 factors involves three blocks, in each of
# which 2 factors are varied thru the 4 possible combinations of high & low.
# It also includes centre points (all factors at their central values).
# ref: https://en.wikipedia.org/wiki/Box-Behnken_design
self.assertEqual(len(cases), (3*4)+center)
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([5.]), 'z': np.array([5.])},
]
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'z'):
self.assertEqual(outputs[name], expected_case[name])
def test_latin_hypercube(self):
samples = 4
bounds = np.array([
[-1, -10], # lower bounds for x and y
[1, 10] # upper bounds for x and y
])
xlb, xub = bounds[0][0], bounds[1][0]
ylb, yub = bounds[0][1], bounds[1][1]
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=xlb, upper=xub)
model.add_design_var('y', lower=ylb, upper=yub)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.options['generator'] = om.LatinHypercubeGenerator(samples=4, seed=0)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'x': np.array([-0.19861831]), 'y': np.array([-6.42405317])},
{'x': np.array([0.2118274]), 'y': np.array([9.458865])},
{'x': np.array([0.71879361]), 'y': np.array([3.22947057])},
{'x': np.array([-0.72559325]), 'y': np.array([-2.27558409])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['x']
y = outputs['y']
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['x'], 1e-4)
assert_near_equal(y, expected_case['y'], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_array(self):
samples = 4
bounds = np.array([
[-10, -50], # lower bounds for x and y
[10, 50] # upper bounds for x and y
])
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('xy', np.array([50., 50.])), promotes=['*'])
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=bounds[0], upper=bounds[1])
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=4, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
xlb, xub = bounds[0][0], bounds[1][0]
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
ylb, yub = bounds[0][1], bounds[1][1]
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'xy': np.array([-1.98618312, -32.12026584])},
{'xy': np.array([2.118274, 47.29432502])},
{'xy': np.array([7.18793606, 16.14735283])},
{'xy': np.array([-7.25593248, -11.37792043])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['xy'][0]
y = outputs['xy'][1]
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['xy'][0], 1e-4)
assert_near_equal(y, expected_case['xy'][1], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_center(self):
samples = 4
upper = 10.
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp())
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
model.add_subsystem('comp', Paraboloid())
model.connect('indep.x', 'comp.x')
model.connect('indep.y', 'comp.y')
model.add_design_var('indep.x', lower=0., upper=upper)
model.add_design_var('indep.y', lower=0., upper=upper)
model.add_objective('comp.f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=samples, criterion='c'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), samples)
# the sample space for each variable (0 to upper) should be divided into
# equal size buckets and each variable should have a value in each bucket
bucket_size = upper / samples
all_buckets = set(range(samples))
x_buckets_filled = set()
y_buckets_filled = set()
# with criterion of 'center', each value should be in the center of it's bucket
valid_values = [round(bucket_size * (bucket + 1 / 2), 3) for bucket in all_buckets]
for case in cases:
outputs = cr.get_case(case).outputs
x = float(outputs['indep.x'])
y = float(outputs['indep.y'])
x_buckets_filled.add(int(x/bucket_size))
y_buckets_filled.add(int(y/bucket_size))
self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values))
self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values))
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_record_bug(self):
# There was a bug that caused values to be recorded in driver_scaled form.
prob = om.Problem()
model = prob.model
ivc = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
ivc.add_output('x', val=1.)
model.add_subsystem('obj_comp', om.ExecComp('y=2*x'), promotes=['*'])
model.add_subsystem('con_comp', om.ExecComp('z=3*x'), promotes=['*'])
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.recording_options['includes'] = ['*']
model.add_design_var('x', lower=0., upper=10., ref=3.0)
model.add_constraint('z', lower=2.0, scaler=13.0)
model.add_objective('y', scaler=-1)
prob.setup(check=True)
prob.run_driver()
cr = om.CaseReader("cases.sql")
final_case = cr.list_cases('driver', out_stream=None)[-1]
outputs = cr.get_case(final_case).outputs
assert_near_equal(outputs['x'], 10.0, 1e-7)
assert_near_equal(outputs['y'], 20.0, 1e-7)
assert_near_equal(outputs['z'], 30.0, 1e-7)
def test_discrete_desvar_list(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_discrete_desvar_alltypes(self):
# Make sure we can handle any allowed type for discrete variables.
class PassThrough(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val='abc')
self.add_discrete_output('y', val='xyz')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
discrete_outputs['y'] = discrete_inputs['x']
prob = om.Problem()
model = prob.model
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 'abc')
model.add_subsystem('parab', PassThrough(), promotes=['*'])
model.add_design_var('x')
model.add_constraint('y')
my_obj = Paraboloid()
samples = [[('x', 'abc'), ],
[('x', None), ],
[('x', my_obj, ), ]
]
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = ['abc', None]
for case, expected_value in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_value)
# Can't read/write objects through SQL case.
self.assertEqual(prob['y'], my_obj)
def test_discrete_array_output(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', np.ones((2, ), dtype=np.int))
indeps.add_discrete_output('y', np.ones((2, ), dtype=np.int))
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x', np.array([5, 1]))
model.add_design_var('y', np.array([1, 4]))
model.add_objective('f_xy')
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
prob.add_recorder(recorder)
prob.recording_options['record_inputs'] = True
prob.setup()
prob.run_driver()
prob.record("end")
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('problem', out_stream=None)
case = cr.get_case('end')
inputs = case.inputs
outputs = case.outputs
for name in ('x', 'y'):
self.assertTrue(isinstance(inputs[name], np.ndarray))
self.assertTrue(inputs[name].shape, (2,))
self.assertTrue(isinstance(outputs[name], np.ndarray))
self.assertTrue(outputs[name].shape, (2,))
def test_discrete_arraydesvar_list(self):
prob = om.Problem()
model = prob.model
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', np.array([5, 1])), ('y', np.array([1, 4]))],
[('x', np.array([3, 2])), ('y', np.array([6, -3]))],
[('x', np.array([-1, 0])), ('y', np.array([3, 5]))],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.set_val('x', np.ones((2, ), dtype=np.int))
prob.set_val('y', np.ones((2, ), dtype=np.int))
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': np.array([5, 1]), 'y': np.array([1, 4]), 'f_xy': np.array([31, 69])},
{'x': np.array([3, 2]), 'y': np.array([6, -3]), 'f_xy': np.array([115, -7])},
{'x': np.array([-1, 0]), 'y': np.array([3, 5]), 'f_xy': np.array([59, 87])},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name][0], expected_case[name][0])
self.assertEqual(outputs[name][1], expected_case[name][1])
def test_discrete_desvar_csv(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = '\n'.join([" x , y",
"5, 1",
"3, 6",
"-1, 3",
])
# this file contains design variable inputs in CSV format
with open('cases.csv', 'w') as f:
f.write(samples)
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_desvar_indices(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x**2',
x=np.array([1., 2., 3.]),
y=np.zeros(3)), promotes=['*'])
prob.model.add_design_var('x', lower=7.0, upper=11.0, indices=[0])
prob.model.add_objective('y', index=0)
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.setup()
prob.run_driver()
# Last value in fullfactorial DOE is 11, which gives 121.
assert_near_equal(prob.get_val('y'), np.array([121., 4., 9.]))
def test_multidimensional_inputs(self):
# Create a subsystem with multidimensional array inputs
matmul_comp = om.ExecComp('z = matmul(x,y)',
x=np.ones((3, 3)),
y=np.ones((3, 3)),
z=np.ones((3, 3)))
# Single execution test
prob = om.Problem()
prob.model.add_subsystem('matmul', matmul_comp, promotes=['*'])
prob.setup()
prob['x'] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
prob['y'] = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
prob.run_model()
# DOE test
prob2 = om.Problem()
prob2.model.add_subsystem('matmul', matmul_comp, promotes=['*'])
prob2.model.add_design_var('x')
prob2.model.add_design_var('y')
prob2.model.add_objective('z')
prob2.setup()
case_list = [
[('x', prob['x']), ('y', prob['y'])]
]
prob2.driver = om.DOEDriver(case_list)
prob2.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob2.run_driver()
prob2.cleanup()
cr = om.CaseReader("cases.sql")
outputs = cr.get_case(0).outputs
for name in ('x', 'y', 'z'):
assert_near_equal(outputs[name], prob[name])
def test_multi_constraint_doe(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x**2 + b',
x=np.array([1., 2., 3.]),
b=np.array([1., 2., 3.]),
y=np.zeros(3)), promotes=['*'])
prob.model.add_design_var('x', lower=7.0, upper=11.0, indices=[0])
prob.model.add_constraint('b', lower=7., indices=[0])
prob.model.add_constraint('b', upper=11., indices=[-1], alias='TEST')
prob.model.add_objective('y', index=0)
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
for case in cases:
outputs = cr.get_case(case).outputs
assert_near_equal(outputs['b'], np.array([1., 2, 3]))
@use_tempdirs
class TestDOEDriverListVars(unittest.TestCase):
def test_list_problem_vars(self):
# this passes if no exception is raised
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.setup(derivatives=False)
prob.run_driver()
prob.cleanup()
prob.list_problem_vars()
@use_tempdirs
class TestDOEDriverListVars(unittest.TestCase):
def test_list_problem_vars(self):
# this passes if no exception is raised
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.setup(derivatives=False)
prob.run_driver()
prob.cleanup()
prob.list_problem_vars()
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
@use_tempdirs
class TestParallelDOE4Proc(unittest.TestCase):
N_PROCS = 4
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_indivisible_error(self):
prob = om.Problem()
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 3
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"The total number of processors is not evenly divisible by the "
"specified number of processors per model.\n Provide a number of "
"processors that is a multiple of 3, or specify a number "
"of processors per model that divides into 4.")
def test_minprocs_error(self):
prob = om.Problem(FanInGrouped())
# require 2 procs for the ParallelGroup
prob.model._proc_info['sub'] = (2, None, 1.0)
# run cases on all procs
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 1
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"<model> <class FanInGrouped>: MPI process allocation failed: can't meet "
"min_procs required for the following subsystems: ['sub']")
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3), procs_per_model=1,
run_parallel=True)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = self.expected_fullfact3
size = prob.comm.size
rank = prob.comm.rank
# cases will be split across files for each proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size))
for n in range(num_cases):
outputs = cr.get_case(cases[n]).outputs
idx = n * size + rank # index of expected case
self.assertEqual(outputs['x'], expected[idx]['x'])
self.assertEqual(outputs['y'], expected[idx]['y'])
self.assertEqual(outputs['f_xy'], expected[idx]['f_xy'])
# total number of cases recorded across all procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_parallel_2x2(self):
# run cases in parallel with 2 procs per model
# (cases will be split between the 2 parallel model instances)
run_parallel = True
procs_per_model = 2
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
from openmdao.utils.mpi import multi_proc_exception_check
with multi_proc_exception_check(prob.comm):
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
num_cases = 0
# we can run two models in parallel on our 4 procs
num_models = prob.comm.size // procs_per_model
# a separate case file will be written by rank 0 of each parallel model
# (the top two global ranks)
rank = prob.comm.rank
filename = "cases.sql_%d" % rank
if rank < num_models:
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver')
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // num_models+(rank < len(expected) % num_models))
for n, case in enumerate(cases):
idx = n * num_models + rank # index of expected case
outputs = cr.get_case(case).outputs
for name in ('x1', 'x2', 'c3.y'):
self.assertEqual(outputs[name], expected[idx][name])
else:
self.assertFalse("Cases from rank %d are being written" % rank in output)
self.assertFalse(os.path.exists(filename))
# total number of cases recorded across all requested procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_parallel_4x1(self):
# run cases in parallel with 1 proc per model
# (cases will be split between the 4 serial model instances)
run_parallel = True
procs_per_model = 1
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
rank = prob.comm.rank
# there will be a separate case file for each proc, containing the cases
# run by the instance of the model that runs in serial mode on that proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
# we are running 4 models in parallel, each using 1 proc
num_models = prob.comm.size // procs_per_model
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // num_models + (rank < len(expected) % num_models))
for n, case in enumerate(cases):
idx = n * num_models + rank # index of expected case
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x1'], expected[idx]['x1'])
self.assertEqual(outputs['x2'], expected[idx]['x2'])
self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])
# total number of cases recorded across all requested procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_serial_2x2(self):
# do not run cases in parallel, but with 2 procs per model
# (all cases will run on each of the 2 model instances)
run_parallel = False
procs_per_model = 2
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': | np.array([0.]) | numpy.array |
# source contrast get averaged
# reset -f
import os
import numpy
import numpy as np
import mne
from mne.io import read_raw_fif
from scipy import stats as stats
from mne.stats import permutation_t_test
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from sklearn.base import clone
from mne.connectivity import spectral_connectivity, seed_target_indices
from operator import itemgetter
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
import re
from mne.connectivity import envelope_correlation
from mne.stats import permutation_cluster_1samp_test
# fs source space
src_fs = mne.read_source_spaces('/Users/boo/Desktop/MEG_data_script/PreProcessed_data/fsaverage-src.fif')
fsave_vertices = [s['vertno'] for s in src_fs]
stc_template = mne.read_source_estimate(
'/Users/boo/Desktop/MEG_data_script/analysis_source_result/stc_template-rh.stc')
stc_template.subject = 'fsaverage'
# label
label_name_list_mtl = ['Hippocampus', 'ParaHippocampal', 'Enterinal', 'Perirhinal']
hemi_pool = ['_lh', '_rh']
label_list_path = []
for r, d, f in os.walk('/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/'):
for ith_hemi in list(range(0, len(hemi_pool))):
for ith_label_path in list(range(0, len(label_name_list_mtl))):
for file in f:
if hemi_pool[ith_hemi] in file and label_name_list_mtl[ith_label_path] in file:
label_list_path.append(os.path.join(r, file))
label_list = []
label_parietal = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Parietal_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Parietal_lh.label')
label_precuneus = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Precuneus_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Precuneus_lh.label')
label_SMA = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/SMA_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/SMA_lh.label')
label_FEF = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/FEF_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/FEF_lh.label')
label_list.append(label_parietal)
label_list.append(label_precuneus)
label_list.append(label_SMA)
label_list.append(label_FEF)
for ith_label in list(range(0, len(label_list_path))):
label_list.append(mne.read_label(label_list_path[ith_label]))
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
# band
iter_freqs = [
('Alpha', 8, 13),
('Beta', 13, 30),
('Low gamma', 30, 60),
('High gamma', 60, 99)
]
method_pool = ['pli'] #'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
# the maximum point for b-lr is 0.28
# the maximum point for lr-b is 0.76
# 150 200 250 300 350 400
time_seed_pool = [0.28, 0.76]
time_sep_pool = [0.375, 0.4, 0.5, 0.6, 0.7] #0.15, 0.2, 0.25, 0.3, 0.35, 0.4
tmin_pool = []
tmax_pool = []
for ith_prep1 in list(range(0, len(time_seed_pool))):
for ith_prep2 in list(range(0, len(time_sep_pool))):
tmin_pool.append(time_seed_pool[ith_prep1] - time_sep_pool[ith_prep2] / 2)
tmax_pool.append(time_seed_pool[ith_prep1] + time_sep_pool[ith_prep2] / 2)
curr_tp = 0
for ith_tp in list(range(0, len(tmin_pool))):
curr_tmin = round(tmin_pool[ith_tp], 3)
curr_tmax = round(tmax_pool[ith_tp], 3)
for ith_method in list(range(0, len(method_pool))):
curr_method = method_pool[ith_method]
for ith_band in list(range(0, len(iter_freqs))):
curr_fre_info = iter_freqs[ith_band]
band_name = curr_fre_info[0]
vmin = curr_fre_info[1]
vmax = curr_fre_info[2]
for ith_condition in list(range(0, len(naming_list))):
curr_condition = naming_list[ith_condition]
index_sub = 0
output_array = np.zeros((len(list(range(2, 14))), len(label_list), len(label_list)))
for ith_sub in list(range(2, 14)):
stcs_epoch_morphed_nocrop = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn/stc_ego_epoch_sub' +
str(ith_sub) + '_200hz_' + curr_condition +
'.npy', allow_pickle=True)
stcs_evoke_morphed_nocrop = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn/stc_sourceEstimate_ego_evoke_sub' +
str(ith_sub) + '_200hz_' + curr_condition +
'.npy', allow_pickle=True)
stcs_epoch_morphed_nocrop = stcs_epoch_morphed_nocrop.tolist()
stcs_evoke_morphed_nocrop = stcs_evoke_morphed_nocrop.tolist()
# crop time period
stcs_epoch_morphed = []
for ith_ele in list(range(0, len(stcs_epoch_morphed_nocrop))):
stcs_epoch_morphed.append(
stcs_epoch_morphed_nocrop[ith_ele].crop(tmin=curr_tmin, tmax=curr_tmax))
stcs_evoke_morphed = stcs_evoke_morphed_nocrop.crop(tmin=curr_tmin, tmax=curr_tmax)
seed_idx_pool = []
for ith_seed in list(range(0, len(yaxis_label_list))):
# search max vertice
seed_pool_ts_evoke = stcs_evoke_morphed.in_label(label_list[ith_seed])
src_pow = np.sum(seed_pool_ts_evoke.data ** 2, axis=1)
total_seed_vertice_list = seed_pool_ts_evoke.vertices[0].tolist() + seed_pool_ts_evoke.vertices[
1].tolist()
seed_vertno = total_seed_vertice_list[np.argmax(src_pow)]
total_wb_vertice_list = stcs_evoke_morphed.vertices[0].tolist() + stcs_evoke_morphed.vertices[
1].tolist()
seed_idx_pool.append(np.searchsorted(total_wb_vertice_list, seed_vertno))
# create max epoch array for conn
conn_array = np.zeros((len(yaxis_label_list), len(yaxis_label_list), 1))
for ith_curr_seed in list(range(0, len(yaxis_label_list))):
max_epoch_array = np.zeros(
(np.shape(stcs_epoch_morphed)[0], 1, np.shape(stcs_evoke_morphed)[1]))
epoch_array = np.zeros(
(np.shape(stcs_epoch_morphed)[0], len(yaxis_label_list), np.shape(stcs_evoke_morphed)[1]))
for ith_epoch in list(range(0, np.shape(stcs_epoch_morphed)[0])):
max_epoch_array[ith_epoch, 0, ...] = stcs_epoch_morphed[ith_epoch].data[
seed_idx_pool[ith_curr_seed], ...]
for ith_other_seed in list(range(0, len(yaxis_label_list))):
epoch_array[ith_epoch, ith_other_seed, ...] = stcs_epoch_morphed[ith_epoch].data[
seed_idx_pool[ith_other_seed], ...]
# create indices
comb_ts = list(zip(max_epoch_array, epoch_array))
indices = seed_target_indices([0], np.arange(1, 13))
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
comb_ts, method=curr_method, sfreq=200, fmin=vmin, fmax=vmax, mode='fourier',
indices=indices, faverage=True) # fourier
conn_array[ith_curr_seed, ...] = con
output_array[index_sub, ...] = conn_array[..., 0]
index_sub = index_sub + 1
np.save('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + curr_condition + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy',
output_array)
curr_tp = curr_tp + 1
## watching
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
method_pool = ['pli'] #'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
iter_freqs = [
('Alpha', 8, 13),
('Beta', 13, 30),
('Low gamma', 30, 60),
('High gamma', 60, 99)
]
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
yaxis_label = ['Parietal-SMA', 'Parietal-FEF', 'Precuneus-SMA','Precuneus-FEF',
'ERC(R)-SMA', 'ERC(R)-FEF', 'ERC(R)-Parietal', 'ERC(R)-Precuneus']
fontsize = 7
time_seed_pool = [0.28, 0.76]
time_sep_pool = [0.375, 0.4, 0.5, 0.6, 0.7] #[0.15, 0.2, 0.25, 0.3, 0.35, 0.4]
tmin_pool = []
tmax_pool = []
for ith_prep1 in list(range(0, len(time_seed_pool))):
for ith_prep2 in list(range(0, len(time_sep_pool))):
tmin_pool.append(time_seed_pool[ith_prep1] - time_sep_pool[ith_prep2] / 2)
tmax_pool.append(time_seed_pool[ith_prep1] + time_sep_pool[ith_prep2] / 2)
for ith_band in list(range(0, len(iter_freqs))):
curr_fre_info = iter_freqs[ith_band]
band_name = curr_fre_info[0]
plot_array = np.zeros((10, len(yaxis_label)))
title_array = np.array(range(10), dtype='<U20')
ith_position=0
for ith_method in list(range(0, len(method_pool))):
curr_method = method_pool[ith_method]
for ith_tp in list(range(0, len(tmin_pool))):
curr_tmin = round(tmin_pool[ith_tp], 3)
curr_tmax = round(tmax_pool[ith_tp], 3)
curr_array_b = np.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_b' + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy')
curr_array_l = np.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_l' + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy')
curr_array_r = np.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_r' + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy')
output_array_b_lr = curr_array_b - (curr_array_l + curr_array_r) / 2
statistic, pvalue = stats.ttest_1samp(output_array_b_lr, 0, axis=0)
plot_array[ith_position, ...] = np.array(
(statistic[0][2], statistic[0][3], statistic[1][2], statistic[1][3],
statistic[10][2], statistic[10][3], statistic[10][0], statistic[10][1]))
title_array[ith_position]= np.array((str(curr_tmin) + '-' + str(curr_tmax) + 's(' + curr_method + ')'))
ith_position = ith_position+1
fig, axes = plt.subplots(nrows=1, ncols=10, figsize=(30, 3)) # figsize=(16, 8.5)
ith_plot = 0
for ax in axes.flat:
ax.set_xticklabels(yaxis_label, rotation=90, fontsize=fontsize)
ax.set_xticks(np.arange(len(yaxis_label)))
ax.bar(yaxis_label, plot_array[ith_plot], width=0.6, color='0.5', edgecolor='black', linewidth=1, capsize=10)
ax.set_ylim([-3, 3])
ax.axhline(y=2.2, ls='--', linewidth=1, color='r')
ax.axhline(y=-2.2, ls='--', linewidth=1, color='r')
ax.set_title(title_array[ith_plot], fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ith_plot = ith_plot+1
plt.subplots_adjust(left=.03, right=.97, top=0.9, bottom=0.35, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/connectivity_' + band_name + '.png') # bbox_inches='tight'
plt.close()
## make figure horizontal bar
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
fontsize = 17
time_seed_pool = [0.28, 0.76]
band_name = 'Beta'
curr_method = 'pli'
tmin_t1 = round(time_seed_pool[0] - 0.2, 3)
tmax_t1 = round(time_seed_pool[0] + 0.2, 3)
tmin_t2 = round(time_seed_pool[1] - 0.2, 3)
tmax_t2 = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_l_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_r_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_b_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_l_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_r_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
output_array_b_lr_t1 = curr_array_b_t1 - (curr_array_l_t1 + curr_array_r_t1) / 2
output_array_b_lr_t2 = curr_array_b_t2 - (curr_array_l_t2 + curr_array_r_t2) / 2
statistic_t1, pvalue_t1 = stats.ttest_1samp(output_array_b_lr_t1, 0, axis=0)
statistic_t2, pvalue_t2 = stats.ttest_1samp(output_array_b_lr_t2, 0, axis=0)
mean_t1 = np.mean(output_array_b_lr_t1, axis=0)
mean_t2 = np.mean(output_array_b_lr_t2, axis=0)
se_t1 = np.std(output_array_b_lr_t1, axis=0)/ np.sqrt(12)
se_t2 = np.std(output_array_b_lr_t2, axis=0)/ np.sqrt(12)
# stats.ttest_rel(output_array_b_lr_t1[..., 10,0], output_array_b_lr_t2[..., 10,0])
stats.ttest_1samp(output_array_b_lr_t2[..., 3,0], 0)
# plot_array_t1 = [statistic_t1[3][0], statistic_t1[2][0], statistic_t1[8][0], statistic_t1[9][0], statistic_t1[11][0], statistic_t1[10][0]]
# plot_array_t2 = [statistic_t2[3][0], statistic_t2[2][0], statistic_t2[8][0], statistic_t2[9][0], statistic_t2[11][0], statistic_t2[10][0]]
t1_str = str(tmin_t1)+' ~ '+str(tmax_t1)+'s'
t2_str = str(tmin_t2)+' ~ '+str(tmax_t2)+'s'
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
# yaxis_label = ['FEF-Parietal', 'SMA-Parietal', 'HPC(R)-Parietal', 'PHC(R)-Parietal', 'PRC(R)-Parietal',
# 'ERC(R)-Parietal']
yaxis_label = ['FEF-Precuneus', 'SMA-Precuneus', 'HPC(R)-Precuneus', 'PHC(R)-Precuneus', 'PRC(R)-Precuneus',
'ERC(R)-Precuneus']
ith_region = 1
dataFrame_mean = pd.DataFrame(data=[[mean_t1[3][ith_region], mean_t2[3][ith_region]], [mean_t1[2][ith_region], mean_t2[2][ith_region]], \
[mean_t1[8][ith_region], mean_t2[8][ith_region]], [mean_t1[9][ith_region], mean_t2[9][ith_region]], \
[mean_t1[11][ith_region], mean_t2[11][ith_region]], [mean_t1[10][ith_region], mean_t2[10][ith_region]]],
index=yaxis_label,
columns=[t1_str, t2_str])
dataFrame_se = pd.DataFrame(data=[[se_t1[3][ith_region], se_t2[3][ith_region]], [se_t1[2][ith_region], se_t2[2][ith_region]], \
[se_t1[8][ith_region], se_t2[8][ith_region]], [se_t1[9][ith_region], se_t2[9][ith_region]], \
[se_t1[11][ith_region], se_t2[11][ith_region]], [se_t1[10][ith_region], se_t2[10][ith_region]]],
index=yaxis_label,
columns=[t1_str, t2_str])
handle = dataFrame_mean.plot.barh(xerr=dataFrame_se, figsize=(6, 6), legend=False, color=['darkgreen', 'red'])
handle.spines['right'].set_visible(False)
handle.spines['top'].set_visible(False)
handle.set_yticklabels(yaxis_label, rotation=0, fontsize=fontsize)
handle.set_xticks([-0.15, 0, 0.1])
handle.set_xlabel('t value', fontsize=fontsize)
handle.axvline(x=0, ls='-', linewidth=0.5, color='black')
handle.invert_yaxis() # labels read top-to-bottom
handle.tick_params(labelsize=fontsize)
handle.set_aspect('auto')
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.35, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_Precuneus_roi_' + band_name + '_' + '.png') # bbox_inches='tight'
plt.close()
## make figure vertical bar - old
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
for ith_time_p in list(range(0, len(time_seed_pool))):
band_name = band_list[ith_band]
tmin = round(time_seed_pool[ith_time_p] - 0.2, 3)
tmax = round(time_seed_pool[ith_time_p] + 0.2, 3)
curr_array_b = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin) + '_' + str(tmax) + '.npy')
curr_array_l = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin) + '_' + str(tmax) + '.npy')
curr_array_r = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin) + '_' + str(tmax) + '.npy')
if ith_time_p == 0:
# color = 'red'
output_array_contrast = curr_array_b - (curr_array_l + curr_array_r) / 2
if ith_time_p == 1:
# color = 'darkgreen'
output_array_contrast = (curr_array_l + curr_array_r) / 2 - curr_array_b
mean = np.mean(output_array_contrast, axis=0)
se = np.std(output_array_contrast, axis=0) / np.sqrt(12)
# statistic
statistic, pvalue = stats.ttest_1samp(output_array_contrast, 0, axis=0)
# stats.ttest_rel(output_array_b_lr_t1[..., 10,0], output_array_b_lr_t2[..., 10,0])
stat_fef, pval_fef = stats.ttest_1samp(output_array_contrast[..., 3, ith_region], 0)
stat_sma, pval_sma = stats.ttest_1samp(output_array_contrast[..., 2, ith_region], 0)
stat_hpc, pval_hpc = stats.ttest_1samp(output_array_contrast[..., 8, ith_region], 0)
stat_phc, pval_phc = stats.ttest_1samp(output_array_contrast[..., 9, ith_region], 0)
stat_prc, pval_prc = stats.ttest_1samp(output_array_contrast[..., 11, ith_region], 0)
stat_erc, pval_erc = stats.ttest_1samp(output_array_contrast[..., 10, ith_region], 0)
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
label_x = ['FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC']
color = ['limegreen', 'limegreen', 'red', 'red', 'red', 'red']
value_y = [mean[3][ith_region], mean[2][ith_region],
mean[8][ith_region], mean[9][ith_region],
mean[11][ith_region], mean[10][ith_region]]
value_errorbar = [se[3][ith_region], se[2][ith_region],
se[8][ith_region], se[9][ith_region],
se[11][ith_region], se[10][ith_region]]
fig, ax = plt.subplots(figsize=(7, 5.5))
ax.bar([1, 2, 4, 5, 6, 7], value_y, width=0.5, yerr=value_errorbar, capsize=3, color=color) # (89/255, 88/255, 89/255)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([1, 2, 4, 5, 6, 7])
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
ax.set_yticks([-0.08, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('PLI', fontsize=fontsize)
# ax.axvline(x=0, ls='-', linewidth=0.5, color='black')
# ax.invert_xaxis() # labels read top-to-bottom
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '_' + str(time_seed_pool[ith_time_p]) + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## make figure vertical bar - new - paired t test
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tmin_early = round(time_seed_pool[0] - 0.2, 3)
tmax_early = round(time_seed_pool[0] + 0.2, 3)
tmin_late = round(time_seed_pool[1] - 0.2, 3)
tmax_late = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_l_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_r_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_b_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_l_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_r_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
output_array_contrast_early = curr_array_b_early - (curr_array_l_early + curr_array_r_early) / 2
output_array_contrast_late = curr_array_b_late - (curr_array_l_late + curr_array_r_late) / 2
mean_early = np.mean(output_array_contrast_early, axis=0)
mean_late = np.mean(output_array_contrast_late, axis=0)
se_early = np.std(output_array_contrast_early, axis=0) / np.sqrt(12)
se_late = np.std(output_array_contrast_late, axis=0) / np.sqrt(12)
# two sample t test
# statistic, pvalue = stats.ttest_1samp(output_array_contrast_early, 0, axis=0)
# # stats.ttest_rel(output_array_b_lr_t1[..., 10,0], output_array_b_lr_t2[..., 10,0])
# stat_fef, pval_fef = stats.ttest_1samp(, 0)
# stat_sma, pval_sma = stats.ttest_1samp(output_array_contrast_early[..., 2, ith_region], 0)
# stat_hpc, pval_hpc = stats.ttest_1samp(output_array_contrast_early[..., 8, ith_region], 0)
# stat_phc, pval_phc = stats.ttest_1samp(output_array_contrast_early[..., 9, ith_region], 0)
# stat_prc, pval_prc = stats.ttest_1samp(output_array_contrast_early[..., 11, ith_region], 0)
# stat_erc, pval_erc = stats.ttest_1samp(output_array_contrast_early[..., 10, ith_region], 0)
# paired t test
stat_fef, pval_fef = stats.ttest_rel(output_array_contrast_early[..., 3, ith_region], output_array_contrast_late[..., 3, ith_region])
stat_sma, pval_sma = stats.ttest_rel(output_array_contrast_early[..., 2, ith_region], output_array_contrast_late[..., 2, ith_region])
stat_hpc, pval_hpc = stats.ttest_rel(output_array_contrast_early[..., 8, ith_region], output_array_contrast_late[..., 8, ith_region])
stat_phc, pval_phc = stats.ttest_rel(output_array_contrast_early[..., 9, ith_region], output_array_contrast_late[..., 9, ith_region])
stat_erc, pval_erc = stats.ttest_rel(output_array_contrast_early[..., 10, ith_region], output_array_contrast_late[..., 10, ith_region])
stat_prc, pval_prc = stats.ttest_rel(output_array_contrast_early[..., 11, ith_region], output_array_contrast_late[..., 11, ith_region])
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' fef' + ' tval:' + str(stat_fef) + ' pval:' + str(pval_fef))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' sma' + ' tval:' + str(stat_sma) + ' pval:' + str(pval_sma))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' hpc' + ' tval:' + str(stat_hpc) + ' pval:' + str(pval_hpc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' phc' + ' tval:' + str(stat_phc) + ' pval:' + str(pval_phc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' prc' + ' tval:' + str(stat_prc) + ' pval:' + str(pval_prc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' erc' + ' tval:' + str(stat_erc) + ' pval:' + str(pval_erc))
# reference
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
# array
label_x = ['HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA']
color_early = ['skyblue', 'skyblue', 'skyblue', 'skyblue', 'gold', 'gold']
color_late = ['blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod']
value_y_early = [mean_early[8][ith_region], mean_early[9][ith_region], mean_early[11][ith_region], mean_early[10][ith_region],
mean_early[3][ith_region], mean_early[2][ith_region]]
value_y_late = [mean_late[8][ith_region], mean_late[9][ith_region], mean_late[11][ith_region], mean_late[10][ith_region],
mean_late[3][ith_region], mean_late[2][ith_region]]
value_errorbar_early = [se_early[8][ith_region], se_early[9][ith_region], se_early[11][ith_region], se_early[10][ith_region],
se_early[3][ith_region], se_early[2][ith_region]]
value_errorbar_late = [se_late[8][ith_region], se_late[9][ith_region], se_late[11][ith_region], se_late[10][ith_region],
se_late[3][ith_region], se_late[2][ith_region]]
width = 0.25 # the width of the bars
ind = np.arange(len(value_y_early))
fig, ax = plt.subplots(figsize=(10, 4))
ax.bar(ind - width / 2, value_y_early, width, yerr=value_errorbar_early, capsize=3, color=color_early)
ax.bar(ind + width / 2, value_y_late, width, yerr=value_errorbar_late, capsize=3, color=color_late)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks(ind)
if ith_band==0:
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
else:
ax.set_xticklabels([])
ax.set_yticks([-0.17, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('Back - Left/Right', fontsize=fontsize)
# ax.axvline(x=0, ls='-', linewidth=0.5, color='black')
# ax.invert_xaxis() # labels read top-to-bottom
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## make figure vertical bar - new - anova-like
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tmin_early = round(time_seed_pool[0] - 0.2, 3)
tmax_early = round(time_seed_pool[0] + 0.2, 3)
tmin_late = round(time_seed_pool[1] - 0.2, 3)
tmax_late = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_l_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_r_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_b_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_l_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_r_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
output_array_contrast_early = curr_array_b_early - (curr_array_l_early + curr_array_r_early) / 2
output_array_contrast_late = curr_array_b_late - (curr_array_l_late + curr_array_r_late) / 2
mean_early = np.mean(output_array_contrast_early, axis=0)
mean_late = np.mean(output_array_contrast_late, axis=0)
se_early = np.std(output_array_contrast_early, axis=0) / np.sqrt(12)
se_late = np.std(output_array_contrast_late, axis=0) / np.sqrt(12)
# reference
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
# array
label_x = ['HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA']
color = ['blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod', 'blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod']
value_y = [mean_early[8][ith_region], mean_early[9][ith_region], mean_early[11][ith_region], mean_early[10][ith_region],
mean_early[3][ith_region], mean_early[2][ith_region], mean_late[8][ith_region], mean_late[9][ith_region],
mean_late[11][ith_region], mean_late[10][ith_region], mean_late[3][ith_region], mean_late[2][ith_region]]
value_errorbar = [se_early[8][ith_region], se_early[9][ith_region], se_early[11][ith_region], se_early[10][ith_region],
se_early[3][ith_region], se_early[2][ith_region], se_late[8][ith_region], se_late[9][ith_region],
se_late[11][ith_region], se_late[10][ith_region], se_late[3][ith_region], se_late[2][ith_region]]
width = 0.5 # the width of the bars
ind = np.arange(len(value_y))
fig, ax = plt.subplots(figsize=(12, 4))
ax.bar([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14], value_y, width, yerr=value_errorbar, capsize=3, color=color)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14])
if ith_band==0:
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
else:
ax.set_xticklabels([])
ax.set_yticks([-0.17, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('Back - Left/Right', fontsize=fontsize)
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## anova two way
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.multicomp import (pairwise_tukeyhsd, MultiComparison)
fontsize = 25
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
label_x = ['FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC']
for ith_region in list(range(0, len(seed_pool))): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tmin_t1 = round(time_seed_pool[0] - 0.2, 3)
tmax_t1 = round(time_seed_pool[0] + 0.2, 3)
tmin_t2 = round(time_seed_pool[1] - 0.2, 3)
tmax_t2 = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_l_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_r_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_b_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_l_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_r_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
array_t1_fef = curr_array_b_t1[..., 3, ith_region] - (curr_array_l_t1[..., 3, ith_region] + curr_array_r_t1[..., 3, ith_region])/2
array_t1_sma = curr_array_b_t1[..., 2, ith_region] - (curr_array_l_t1[..., 2, ith_region] + curr_array_r_t1[..., 2, ith_region])/2
array_t1_hpc = curr_array_b_t1[..., 8, ith_region] - (curr_array_l_t1[..., 8, ith_region] + curr_array_r_t1[..., 8, ith_region])/2
array_t1_phc = curr_array_b_t1[..., 9, ith_region] - (curr_array_l_t1[..., 9, ith_region] + curr_array_r_t1[..., 9, ith_region])/2
array_t1_prc = curr_array_b_t1[..., 11, ith_region] - (curr_array_l_t1[..., 11, ith_region] + curr_array_r_t1[..., 11, ith_region])/2
array_t1_erc = curr_array_b_t1[..., 10, ith_region] - (curr_array_l_t1[..., 10, ith_region] + curr_array_r_t1[..., 10, ith_region])/2
array_t2_fef = curr_array_b_t2[..., 3, ith_region] - (curr_array_l_t2[..., 3, ith_region] + curr_array_r_t2[..., 3, ith_region])/2
array_t2_sma = curr_array_b_t2[..., 2, ith_region] - (curr_array_l_t2[..., 2, ith_region] + curr_array_r_t2[..., 2, ith_region])/2
array_t2_hpc = curr_array_b_t2[..., 8, ith_region] - (curr_array_l_t2[..., 8, ith_region] + curr_array_r_t2[..., 8, ith_region])/2
array_t2_phc = curr_array_b_t2[..., 9, ith_region] - (curr_array_l_t2[..., 9, ith_region] + curr_array_r_t2[..., 9, ith_region])/2
array_t2_prc = curr_array_b_t2[..., 11, ith_region] - (curr_array_l_t2[..., 11, ith_region] + curr_array_r_t2[..., 11, ith_region])/2
array_t2_erc = curr_array_b_t2[..., 10, ith_region] - (curr_array_l_t2[..., 10, ith_region] + curr_array_r_t2[..., 10, ith_region])/2
statistic, pvalue = stats.ttest_1samp(array_t2_sma, 0, axis=0)
create_array = {'value': np.concatenate((array_t1_fef, array_t1_sma, array_t1_hpc, array_t1_phc, array_t1_prc, array_t1_erc,
array_t2_fef, array_t2_sma, array_t2_hpc, array_t2_phc, array_t2_prc, array_t2_erc)),
'area': np.concatenate((np.repeat('fef', 12), np.repeat('sma', 12), np.repeat('hpc', 12), np.repeat('phc', 12), np.repeat('prc', 12), np.repeat('erc', 12),
np.repeat('fef', 12), np.repeat('sma', 12), np.repeat('hpc', 12), np.repeat('phc', 12), np.repeat('prc', 12), np.repeat('erc', 12),)),
'time': np.concatenate((np.repeat('t1', 12*6), np.repeat('t2', 12*6)))}
create_array = {'value': np.concatenate((mean(array_t1_fef), array_t1_sma, array_t1_hpc, array_t1_phc, array_t1_prc, array_t1_erc,
array_t2_fef, array_t2_sma, array_t2_hpc, array_t2_phc, array_t2_prc, array_t2_erc)),
'area': np.concatenate((np.repeat('fef', 12), np.repeat('sma', 12), | np.repeat('hpc', 12) | numpy.repeat |
"""
explore steady-state dynamics
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as scio
from mpl_toolkits.mplot3d import Axes3D
g=9.81
theta=np.deg2rad(30.0)
gx = g*np.sin(theta)
gz = g*np.cos(theta)
rho_s = 2700.0
rho_f = 1000.0
rho_a = 2000.0
#rho = 2000.0
delta = 0.05
mu = 5.0*1.e-3
alpha = rho_a*delta*np.sqrt(gx)
k0 = 80.0*1.e-9
a0 = 0.005
sig0 = 1000.0
compress = .01/1000.
m_crit = 0.64
phi = np.deg2rad(43.0)
def plot_meqn():
m_critv = np.linspace(0.0,1.0)
a = 1.0 +0.0J
b = alpha/mu + 0.0J
c = -1.0 + 0.0J
d = (m_crit-1.0)*alpha/mu + 0.0J
q = (3.*c - b**2)/9.0
r = (c*b - 3.0*d)/6.0 - b**3/27.0
s1 = (r + np.sqrt(q**3 + r**2))**(1./3.)
s2 = (r - np.sqrt(q**3 + r**2))**(1./3.)
#q = (3.*c - b**2)/9.0
#r = -27*d + b*(9.0*c-2.0*b**2)
#discriminant = q**3 + r**2
#s = r + np.sqrt(discriminant)
#t = r - np.sqrt(discriminant)
#term1 = np.sqrt(3.0)*((-t + s)/2.0)
#r13 = 2.0*np.sqrt(q)
#x1 = (-term1 + r13*np.cos(q**3/3.0))
x1 = (s1+s2) - b/3.0
meqn = (1.0 - x1**2)
#import pdb;pdb.set_trace()
plt.plot(m_crit,m_crit,'r')
plt.plot(m_crit,meqn,'b')
plt.show()
def quiver_plot():
def meqn04mcrit(m_crit):
a = 1.0 +0.0J
b = alpha/mu + 0.0J
c = -1.0 + 0.0J
d = (m_crit-1.0)*alpha/mu + 0.0J
q = (3.*c - b**2)/9.0
r = (c*b - 3.0*d)/6.0 - b**3/27.0
s1 = (r + np.sqrt(q**3 + r**2))**(1./3.)
s2 = (r - np.sqrt(q**3 + r**2))**(1./3.)
x1 = (s1+s2) - b/3.0
meqn = np.real(1.0 - x1**2)
return meqn
mcrit = 0.64
meqn0 = meqn04mcrit(mcrit)
A = np.sqrt(g)*mu/(rho*g*k)
C = k/(compress*mu*np.sqrt(g))
B = rho*np.sqrt(g)/mu
m = np.linspace(0,1)
p = np.linspace(-1.,1.)
M,P = np.meshgrid(m,p)
u0 = B*gx/(2.0*g*(1-meqn0))
m0 = meqn0
p0 = 0.0
import pdb; pdb.set_trace()
fM = (2./A)*M*P
fP = -3.*C*P - 3.*A*C*(M-meqn)*u
plt.quiver(M,P,fM,fP)
plt.show()
def integrate(m0,h0,u0,pe0,npoints=100,tend=10.0):
def rhs(mp,hp,up,pep):
pp = pep + rho_f*gz*hp
rhop = mp*rho_s + (1.0-mp)*rho_f
shear = 2.*up/hp
sigbed = max(0.0,rhop*gz*hp - pp)
sigbedc = rho_s*(shear*delta)**2.0 + sigbed
N = shear*mu/(sigbedc)
meqn = m_crit/(1.0+np.sqrt(N))
compress = a0/(mp*(sigbed+sig0))
k = k0*np.exp((0.6-mp)/0.04)
f = np.ones(4)
rhorat = (rhop-rho_f)/rhop
f[0] = 2.*k*mp*pep/(mu*hp**2)
f[1] = rhorat*2.0*k*pep/(mu*hp)
f[2] = gx - max(0.,gz*rhorat - pep/(rhop*hp))*max(0.0,np.tan(phi+np.arctan(mp-meqn))) - (1.-mp)*2.0*mu*up/(rhop*hp**2)
f[3] = -(3.*k/(compress*mu*hp**2))*(1.0 - 0.5*compress*rho_f*gz*hp*rhorat)*pep - 3*up*np.tan(mp-meqn)/(compress*hp)
#import pdb;pdb.set_trace()
tt=5.
return (f,meqn)
t = np.linspace(0.0,tend,npoints)
dt = t[1]-t[0]
m = np.zeros(npoints)
h = np.zeros(npoints)
u = np.zeros(npoints)
pe = np.zeros(npoints)
m_eqn = np.zeros(npoints)
m[0] = m0
h[0] = h0
u[0] = u0
pe[0]= pe0
m_eqn[0]=m_crit
for n in xrange(1,npoints):
(f,meqn) = rhs(m[n-1],h[n-1],u[n-1],pe[n-1])
m[n] = max(0.0,m[n-1] + dt*f[0])
m[n] = min(1.0,m[n])
h[n] = h[n-1] + dt*f[1]
u[n] = max(0.0,u[n-1] + dt*f[2])
pe[n] = pe[n-1] + dt*f[3]
m_eqn[n] = meqn
#import pdb;pdb.set_trace()
p = pe + rho_f*gz*h
rho = m*rho_s + (1.0-m)*rho_f
pa = pe/(gz*h*rho-rho_f*(gz*h))
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(m[0],u[0],pa[0],c='r',marker='o')
ax.scatter(m,u,pe,c='b',marker='o')
ax.set_xlabel('m')
ax.set_ylabel('u')
ax.set_zlabel('pa')
fig = plt.figure(2)
ax = fig.add_subplot(111)
#plt.plot(u[0],pe[0],'ro')
plt.plot(u,pa,'bo-')
ax.set_xlabel('u')
ax.set_ylabel('pa')
fig = plt.figure(3)
ax = fig.add_subplot(111)
#plt.plot(u[0],pe[0],'ro')
plt.plot(m,pa,'bo-')
ax.set_xlabel('m')
ax.set_ylabel('pa')
fig = plt.figure(4)
ax = fig.add_subplot(111)
#plt.plot(u[0],pe[0],'ro')
plt.plot(t,pa,'b-')
ax.set_xlabel('t')
ax.set_ylabel('pa')
fig = plt.figure(5)
ax = fig.add_subplot(111)
#plt.plot(u[0],pe[0],'ro')
plt.plot(t,u,'b-')
ax.set_xlabel('t')
ax.set_ylabel('u')
fig = plt.figure(6)
ax = fig.add_subplot(111)
#plt.plot(u[0],pe[0],'ro')
plt.plot(t,m,'b-',t,m_eqn,'r-')
ax.set_xlabel('t')
ax.set_ylabel('m, m_eqn')
fig = plt.figure(7)
ax = fig.add_subplot(111)
#plt.plot(u[0],pe[0],'ro')
plt.plot(t,h,'b-')
ax.set_xlabel('t')
ax.set_ylabel('h')
plt.show()
def integrate2(m0,h0,u0,pe0,npoints=100,tend=10.0):
def fobject(q,mn,hn,un,pen,dt,mu,compress,k0,a0,gz,rho_f,rho_s,delta,m_crit):
mp=q[0]
hp=q[1]
up=q[2]
pep=q[3]
pp = pep + rho_f*gz*hp
rhop = mp*rho_s + (1.0-mp)*rho_f
shear = 2.*up/hp
sigbed = max(0.0,rhop*gz*hp - pp)
sigbedc = rho_s*(shear*delta)**2.0 + sigbed
N = shear*mu/(sigbedc)
meqn = m_crit/(1.0+np.sqrt(N))
compress = a0/(mp*(sigbed+sig0))
k = k0*np.exp((0.6-mp)/0.04)
rhorat = (rhop-rho_f)/rhop
psi0 = 2.*k*mp*pep/(mu*hp**2)
psi1 = rhorat*2.0*k*pep/(mu*hp)
psi2 = gx - max(0.,gz*rhorat - pep/(rhop*hp))*max(0.0,np.tan(phi+np.arctan(mp-meqn))) - (1.-mp)*2.0*mu*up/(rhop*hp**2)
psi3 = -(3.*k/(compress*mu*hp**2))*(1.0 - 0.5*compress*rho_f*gz*hp*rhorat)*pep - 3.*up*np.tan(mp-meqn)/(compress*hp)
f = np.ones(4)
f[0] = mn + dt*psi0 - mp
f[1] = hn + dt*psi1 - hp
f[2] = un + dt*psi2 - up
f[3] = pen + dt*psi3 - pep
return f
t = np.linspace(0.0,tend,npoints)
dt = t[1]-t[0]
m = np.zeros(npoints)
h = np.zeros(npoints)
u = np.zeros(npoints)
pe = np.zeros(npoints)
m_eqn = | np.zeros(npoints) | numpy.zeros |
from dataloader_utils import Gender, HeartPart, EndPhase
from enum import Enum
import numpy as np
import math
import cv2
# --------------------------------------
# Shape (contour) similarity
# --------------------------------------
def __areas(curve1, curve2):
# floats come in
# find the corners of the bbox
def _bbox(cv):
mins = np.min(cv, axis=0)
maxs = np.max(cv, axis=0)
x_min, y_min = mins[0], mins[1]
x_max, y_max = maxs[0], maxs[1]
return x_min, y_min, x_max, y_max
box1 = _bbox(curve1)
box2 = _bbox(curve2)
xr = max(box1[2], box2[2])
yb = max(box1[3], box2[3])
xl = min(box1[0], box2[0])
yu = max(box1[1], box2[1])
# shift and rescale the curves (DC, JC will not change)
curve1[:, 0] = (curve1[:, 0] - xl) / (xr - xl + 1e-5)
curve1[:, 1] = (curve1[:, 1] - yu) / (yb - yu + 1e-5)
curve2[:, 0] = (curve2[:, 0] - xl) / (xr - xl + 1e-5)
curve2[:, 1] = (curve2[:, 1] - yu) / (yb - yu + 1e-5)
# map the coordinates to 410 x 410 mask
image1 = np.zeros((410, 410), dtype=np.uint8)
curve1 = curve1 * 400 + 5
cv2.drawContours(image1, [np.expand_dims(curve1, axis=1).astype(np.int32)], -1, (255, 0, 0), cv2.FILLED)
image2 = np.zeros((410, 410), dtype=np.uint8)
curve2 = curve2 * 400 + 5
cv2.drawContours(image2, [np.expand_dims(curve2, axis=1).astype(np.int32)], -1, (255, 0, 0), cv2.FILLED)
A = (image1 // 255 == 1).astype(np.float32)
B = (image2 // 255 == 1).astype(np.float32)
area1 = np.sum(A)
area2 = np.sum(B)
area_inter = np.sum(A * B)
area_union = area1 + area2 - area_inter
return area_union, area_inter, area1, area2
def dice(curve1, curve2): # can be viewed as F1 score
"""
Calculate the dice metric for the two curves.
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:return: a real number (the dice value)
"""
_, inter, a1, a2 = __areas(curve1, curve2)
# dice metric
return 2.0 * inter / (a1 + a2)
def jaccard(curve1, curve2): # aka. Tanimoto index
"""
Calculate the jaccard metric for the two curves.
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:return: a real number (the jaccard index)
"""
union, inter, _, _ = __areas(curve1, curve2)
# dice metric
return inter / union
def hausdorff(curve1, curve2): # aka. Pompeiu-Hausdorff distance
"""
Calculate the Hausdorff distance between two curves. (https://en.wikipedia.org/wiki/Hausdorff_distance)
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
:return: a real number (hausdorff distance)
"""
N2 = curve2.shape[0]
temp = np.expand_dims(curve1, 2)
temp = np.repeat(temp, N2, 2)
temp = temp - curve2.T
distances = temp[:, 0, :] ** 2 + temp[:, 1, :] ** 2
d1 = np.max(np.min(distances, 0))
d2 = np.max(np.min(distances, 1))
return math.sqrt(max(d1, d2))
# --------------------------------------
# Volume calculation
# --------------------------------------
def ratio(pixel_spacing: tuple, slice_thickness: float, gap: float) -> (float, float):
ratio_slice = pixel_spacing[0] * pixel_spacing[1] * slice_thickness / 1000.0 # mm^3 -> ml conversion
ratio_gap = pixel_spacing[0] * pixel_spacing[1] * gap / 1000.0
return ratio_slice, ratio_gap
def bsa(height, weight): # Mosteller BSA
if not(height is None or weight is None):
return math.sqrt(height * weight / 3600.0)
else:
return None
def area_triangular(curve):
"""
Calculates the area of a closed curve based on
crossproducts.
:param curve: a numpy matrix with shape (N, 2), points are in x, y format
elements are floats
:return: area
"""
# calculate center of mass
crm = np.sum(curve, axis=0) / curve.shape[0]
# vector between crm and a point of the curve
r = curve - crm
# side vector
curve_mtx_shifted = np.ones_like(curve)
curve_mtx_shifted[0] = curve[-1]
curve_mtx_shifted[1:] = curve[0:-1]
dr = curve - curve_mtx_shifted
# vector product
rxdr = np.cross(r, dr)
# sum up the pieces of triangulars
return np.abs(0.5 * np.sum(rxdr))
def convert_to_hierarchical(contours):
"""
convert list of contours into a hierarchical structure
slice > frame > heartpart -- Contour
:param contours: list of Contour objects
:return: a hierarchical structure which contains Contour objects
"""
hierarchical_contours = {}
for contour in contours:
if not(contour.slice in hierarchical_contours.keys()):
hierarchical_contours[contour.slice] = {}
if not(contour.frame in hierarchical_contours[contour.slice].keys()):
hierarchical_contours[contour.slice][contour.frame] = {}
hierarchical_contours[contour.slice][contour.frame][contour.part] = contour
return hierarchical_contours
def calculate_contour_area(curve: np.ndarray):
"""
calculate area with triangulars
:param curve: numpy matrix (N, 2)
:return: area of the closed curve
"""
return area_triangular(curve)
def grouping(hierarchical_contours, calculate_area):
"""
Determines the contour which phase belongs to (systole or diastole).
Calculates the areas of each contour.
:param hierarchical_contours: a hierarchical structure which contains Contour objects
(slice > frame > heartpart -- Contour)
:param calculate_area: function to calculate area of the contour
:return: hierarchical structure with areas (slice > heartpart > phase -- area)
"""
def set_endphase(slice, frame, part, phase):
hierarchical_contours[slice][frame][part].phase = phase
hierarchical_contours[slice][frame][part].corresponding_image.phase = phase
contour_areas = {}
slices = hierarchical_contours.keys()
for slice in slices:
contour_areas[slice] = {}
for part in HeartPart:
areas = []
frames = []
contour_areas[slice][part] = {}
for frame in hierarchical_contours[slice].keys():
if part in hierarchical_contours[slice][frame]:
curve = hierarchical_contours[slice][frame][part]
frames.append(frame)
areas.append(calculate_area(curve.contour_mtx))
if len(areas) > 1:
contour_areas[slice][part][EndPhase.DIA] = max(areas)
contour_areas[slice][part][EndPhase.SYS] = min(areas)
set_endphase(slice, frames[areas.index(max(areas))], part, EndPhase.DIA)
set_endphase(slice, frames[areas.index(min(areas))], part, EndPhase.SYS)
elif len(areas) == 1:
ds = np.array([frames[0] - 0, frames[0] - 20, frames[0] - 9]) # this is a heuristic
idx = np.argmin( | np.abs(ds) | numpy.abs |
#!/usr/bin/env python
from __future__ import print_function
import sys
import numpy as np
import os, glob
import caffe
import lmdb
from PIL import Image
import argparse
import random
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--label', action="store_true", help='Whether the input images are labels')
parser.add_argument('--list_file', type=str, help='Path to a file containing list of images')
parser.add_argument('--image_dir', type=str, default=None, help='Path to image folder')
parser.add_argument('--search_string', type=str, default='*.png', help='Wildcard. eg. train/*/*.png')
parser.add_argument('--output_dir', type=str, default='image-lmdb', help='Path to output folder')
parser.add_argument('--label_dict', type=str, default=None, help='Label type translation. eg. {17:0, 19:1}')
parser.add_argument('--width', type=int, default=None, help='Output Image Width')
parser.add_argument('--height', type=int, default=None, help='Output Image Height')
parser.add_argument('--rand_seed', type=int, default=0, help='Rand seed for shuffling')
parser.add_argument('--shuffle', action="store_true", help='Shuffle list of images')
return parser.parse_args()
def create_lut(args):
if args.label_dict:
lut = np.zeros(256, dtype=np.uint8)
for k in range(256):
lut[k] = k
for k in args.label_dict.keys():
lut[k] = args.label_dict[k]
return lut
else:
return None
def create_lmdb(args, image_indices):
if args.label_dict:
lut = create_lut(args)
in_db = lmdb.open(args.output_dir, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for in_idx, in_ in enumerate(image_indices):
print('{} {} '.format(in_idx, in_), end='')
im = Image.open(in_) # or load whatever ndarray you need
if args.label:
im = | np.array(im, dtype=np.uint8) | numpy.array |
from __future__ import print_function, division, absolute_import
try:
import typing
except ImportError:
import collections as typing
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import colors
from matplotlib import patches
from matplotlib.tight_layout import get_renderer
from numbers import Number
import functools
import distutils
import warnings
def generate_samples(seed=0, n_samples=10000, n_categories=3):
"""Generate artificial samples assigned to set intersections
Parameters
----------
seed : int
A seed for randomisation
n_samples : int
Number of samples to generate
n_categories : int
Number of categories (named "cat0", "cat1", ...) to generate
Returns
-------
DataFrame
Field 'value' is a weight or score for each element.
Field 'index' is a unique id for each element.
Index includes a boolean indicator mask for each category.
Note: Further fields may be added in future versions.
See Also
--------
generate_counts : Generates the counts for each subset of categories
corresponding to these samples.
"""
rng = np.random.RandomState(seed)
df = pd.DataFrame({'value': np.zeros(n_samples)})
for i in range(n_categories):
r = rng.rand(n_samples)
df['cat%d' % i] = r > rng.rand()
df['value'] += r
df.reset_index(inplace=True)
df.set_index(['cat%d' % i for i in range(n_categories)], inplace=True)
return df
def generate_counts(seed=0, n_samples=10000, n_categories=3):
"""Generate artificial counts corresponding to set intersections
Parameters
----------
seed : int
A seed for randomisation
n_samples : int
Number of samples to generate statistics over
n_categories : int
Number of categories (named "cat0", "cat1", ...) to generate
Returns
-------
Series
Counts indexed by boolean indicator mask for each category.
See Also
--------
generate_samples : Generates a DataFrame of samples that these counts are
derived from.
"""
df = generate_samples(seed=seed, n_samples=n_samples,
n_categories=n_categories)
return df.value.groupby(level=list(range(n_categories))).count()
def generate_data(seed=0, n_samples=10000, n_sets=3, aggregated=False):
warnings.warn('generate_data was replaced by generate_counts in version '
'0.3 and will be removed in version 0.4.',
DeprecationWarning)
if aggregated:
return generate_counts(seed=seed, n_samples=n_samples,
n_categories=n_sets)
else:
return generate_samples(seed=seed, n_samples=n_samples,
n_categories=n_sets)['value']
def from_indicators(indicators, data=None):
"""Load category membership indicated by a boolean indicator matrix
This loader also supports the case where the indicator columns can be
derived from `data`.
.. versionadded:: 0.6
Parameters
----------
indicators : DataFrame-like of booleans, Sequence of str, or callable
Specifies the category indicators (boolean mask arrays) within
``data``, i.e. which records in ``data`` belong to which categories.
If a list of strings, these should be column names found in ``data``
whose values are boolean mask arrays.
If a DataFrame, its columns should correspond to categories, and its
index should be a subset of those in ``data``, values should be True
where a data record is in that category, and False or NA otherwise.
If callable, it will be applied to ``data`` after the latter is
converted to a Series or DataFrame.
data : Series-like or DataFrame-like, optional
If given, the index of category membership is attached to this data.
It must have the same length as `indicators`.
If not given, the series will contain the value 1.
Returns
-------
DataFrame or Series
`data` is returned with its index indicating category membership.
It will be a Series if `data` is a Series or 1d numeric array or None.
Notes
-----
Categories with indicators that are all False will be removed.
Examples
--------
>>> import pandas as pd
>>> from upsetplot import from_indicators
Just indicators
>>> indicators = {"cat1": [True, False, True, False],
... "cat2": [False, True, False, False],
... "cat3": [True, True, False, False]}
>>> from_indicators(indicators)
cat1 cat2 cat3
True False True 1.0
False True True 1.0
True False False 1.0
False False False 1.0
Name: ones, dtype: float64
Where indicators are included within data, specifying columns by name
>>> data = pd.DataFrame({"value": [5, 4, 6, 4], **indicators})
>>> from_indicators(["cat1", "cat3"], data=data)
value cat1 cat2 cat3
cat1 cat3
True True 5 True False True
False True 4 False True True
True False 6 True False False
False False 4 False False False
Making indicators out of all boolean columns
>>> from_indicators(lambda data: data.select_dtypes(bool), data=data)
value cat1 cat2 cat3
cat1 cat2 cat3
True False True 5 True False True
False True True 4 False True True
True False False 6 True False False
False False False 4 False False False
Using a dataset with missing data, we can use missingness as an indicator
>>> data = pd.DataFrame({"val1": [pd.NA, .7, pd.NA, .9],
... "val2": ["male", pd.NA, "female", "female"],
... "val3": [pd.NA, pd.NA, 23000, 78000]})
>>> from_indicators(pd.isna, data=data)
val1 val2 val3
val1 val2 val3
True False True <NA> male <NA>
False True True 0.7 <NA> <NA>
True False False <NA> female 23000
False False False 0.9 female 78000
"""
if data is not None:
data = _convert_to_pandas(data)
if callable(indicators):
if data is None:
raise ValueError("data must be provided when indicators is "
"callable")
indicators = indicators(data)
try:
indicators[0]
except Exception:
pass
else:
if isinstance(indicators[0], (str, int)):
if data is None:
raise ValueError("data must be provided when indicators are "
"specified as a list of columns")
if isinstance(indicators, tuple):
raise ValueError("indicators as tuple is not supported")
# column array
indicators = data[indicators]
indicators = pd.DataFrame(indicators).fillna(False).infer_objects()
# drop all-False (should we be dropping all-True also? making an option?)
indicators = indicators.loc[:, indicators.any(axis=0)]
if not all(dtype.kind == 'b' for dtype in indicators.dtypes):
raise ValueError('The indicators must all be boolean')
if data is not None:
if not (isinstance(indicators.index, pd.RangeIndex)
and indicators.index[0] == 0
and indicators.index[-1] == len(data) - 1):
# index is specified on indicators. Need to align it to data
if not indicators.index.isin(data.index).all():
raise ValueError("If indicators.index is not the default, "
"all its values must be present in "
"data.index")
indicators = indicators.reindex(index=data.index, fill_value=False)
else:
data = pd.Series(np.ones(len(indicators)), name="ones")
indicators.set_index(list(indicators.columns), inplace=True)
data.index = indicators.index
return data
def _convert_to_pandas(data, copy=True):
is_series = False
if hasattr(data, 'loc'):
if copy:
data = data.copy(deep=False)
is_series = data.ndim == 1
elif len(data):
try:
is_series = isinstance(data[0], Number)
except KeyError:
is_series = False
if is_series:
data = pd.Series(data)
else:
data = pd.DataFrame(data)
return data
def from_memberships(memberships, data=None):
"""Load data where each sample has a collection of category names
The output should be suitable for passing to `UpSet` or `plot`.
Parameters
----------
memberships : sequence of collections of strings
Each element corresponds to a data point, indicating the sets it is a
member of. Each category is named by a string.
data : Series-like or DataFrame-like, optional
If given, the index of category memberships is attached to this data.
It must have the same length as `memberships`.
If not given, the series will contain the value 1.
Returns
-------
DataFrame or Series
`data` is returned with its index indicating category membership.
It will be a Series if `data` is a Series or 1d numeric array.
The index will have levels ordered by category names.
Examples
--------
>>> from upsetplot import from_memberships
>>> from_memberships([
... ['cat1', 'cat3'],
... ['cat2', 'cat3'],
... ['cat1'],
... []
... ])
cat1 cat2 cat3
True False True 1
False True True 1
True False False 1
False False False 1
Name: ones, dtype: ...
>>> # now with data:
>>> import numpy as np
>>> from_memberships([
... ['cat1', 'cat3'],
... ['cat2', 'cat3'],
... ['cat1'],
... []
... ], data=np.arange(12).reshape(4, 3))
0 1 2
cat1 cat2 cat3
True False True 0 1 2
False True True 3 4 5
True False False 6 7 8
False False False 9 10 11
"""
df = pd.DataFrame([{name: True for name in names}
for names in memberships])
for set_name in df.columns:
if not hasattr(set_name, 'lower'):
raise ValueError('Category names should be strings')
if df.shape[1] == 0:
raise ValueError('Require at least one category. None were found.')
df.sort_index(axis=1, inplace=True)
df.fillna(False, inplace=True)
df = df.astype(bool)
df.set_index(list(df.columns), inplace=True)
if data is None:
return df.assign(ones=1)['ones']
data = _convert_to_pandas(data)
if len(data) != len(df):
raise ValueError('memberships and data must have the same length. '
'Got len(memberships) == %d, len(data) == %d'
% (len(memberships), len(data)))
data.index = df.index
return data
def from_contents(contents, data=None, id_column='id'):
"""Build data from category listings
Parameters
----------
contents : Mapping (or iterable over pairs) of strings to sets
Keys are category names, values are sets of identifiers (int or
string).
data : DataFrame, optional
If provided, this should be indexed by the identifiers used in
`contents`.
id_column : str, default='id'
The column name to use for the identifiers in the output.
Returns
-------
DataFrame
`data` is returned with its index indicating category membership,
including a column named according to id_column.
If data is not given, the order of rows is not assured.
Notes
-----
The order of categories in the output DataFrame is determined from
`contents`, which may have non-deterministic iteration order.
Examples
--------
>>> from upsetplot import from_contents
>>> contents = {'cat1': ['a', 'b', 'c'],
... 'cat2': ['b', 'd'],
... 'cat3': ['e']}
>>> from_contents(contents)
id
cat1 cat2 cat3
True False False a
True False b
False False c
False True False d
False True e
>>> import pandas as pd
>>> contents = {'cat1': [0, 1, 2],
... 'cat2': [1, 3],
... 'cat3': [4]}
>>> data = pd.DataFrame({'favourite': ['green', 'red', 'red',
... 'yellow', 'blue']})
>>> from_contents(contents, data=data)
id favourite
cat1 cat2 cat3
True False False 0 green
True False 1 red
False False 2 red
False True False 3 yellow
False True 4 blue
"""
cat_series = [pd.Series(True, index=list(elements), name=name)
for name, elements in contents.items()]
if not all(s.index.is_unique for s in cat_series):
raise ValueError('Got duplicate ids in a category')
concat = pd.concat
if distutils.version.LooseVersion(pd.__version__) >= '0.23.0':
# silence the warning
concat = functools.partial(concat, sort=False)
df = concat(cat_series, axis=1)
if id_column in df.columns:
raise ValueError('A category cannot be named %r' % id_column)
df.fillna(False, inplace=True)
cat_names = list(df.columns)
if data is not None:
if set(df.columns).intersection(data.columns):
raise ValueError('Data columns overlap with category names')
if id_column in data.columns:
raise ValueError('data cannot contain a column named %r' %
id_column)
not_in_data = df.drop(data.index, axis=0, errors='ignore')
if len(not_in_data):
raise ValueError('Found identifiers in contents that are not in '
'data: %r' % not_in_data.index.values)
df = df.reindex(index=data.index).fillna(False)
df = concat([data, df], axis=1)
df.index.name = id_column
return df.reset_index().set_index(cat_names)
def _aggregate_data(df, subset_size, sum_over):
"""
Returns
-------
df : DataFrame
full data frame
aggregated : Series
aggregates
"""
_SUBSET_SIZE_VALUES = ['auto', 'count', 'sum']
if subset_size not in _SUBSET_SIZE_VALUES:
raise ValueError('subset_size should be one of %s. Got %r'
% (_SUBSET_SIZE_VALUES, subset_size))
if df.ndim == 1:
# Series
input_name = df.name
df = pd.DataFrame({'_value': df})
if subset_size == 'auto' and not df.index.is_unique:
raise ValueError('subset_size="auto" cannot be used for a '
'Series with non-unique groups.')
if sum_over is not None:
raise ValueError('sum_over is not applicable when the input is a '
'Series')
if subset_size == 'count':
sum_over = False
else:
sum_over = '_value'
else:
# DataFrame
if sum_over is False:
raise ValueError('Unsupported value for sum_over: False')
elif subset_size == 'auto' and sum_over is None:
sum_over = False
elif subset_size == 'count':
if sum_over is not None:
raise ValueError('sum_over cannot be set if subset_size=%r' %
subset_size)
sum_over = False
elif subset_size == 'sum':
if sum_over is None:
raise ValueError('sum_over should be a field name if '
'subset_size="sum" and a DataFrame is '
'provided.')
gb = df.groupby(level=list(range(df.index.nlevels)), sort=False)
if sum_over is False:
aggregated = gb.size()
aggregated.name = 'size'
elif hasattr(sum_over, 'lower'):
aggregated = gb[sum_over].sum()
else:
raise ValueError('Unsupported value for sum_over: %r' % sum_over)
if aggregated.name == '_value':
aggregated.name = input_name
return df, aggregated
def _check_index(df):
# check all indices are boolean
if not all(set([True, False]) >= set(level)
for level in df.index.levels):
raise ValueError('The DataFrame has values in its index that are not '
'boolean')
df = df.copy(deep=False)
# XXX: this may break if input is not MultiIndex
kw = {'levels': [x.astype(bool) for x in df.index.levels],
'names': df.index.names,
}
if hasattr(df.index, 'codes'):
# compat for pandas <= 0.20
kw['codes'] = df.index.codes
else:
kw['labels'] = df.index.labels
df.index = pd.MultiIndex(**kw)
return df
def _scalar_to_list(val):
if not isinstance(val, (typing.Sequence, set)) or isinstance(val, str):
val = [val]
return val
def _get_subset_mask(agg, min_subset_size, max_subset_size,
min_degree, max_degree,
present, absent):
"""Get a mask over subsets based on size, degree or category presence"""
subset_mask = True
if min_subset_size is not None:
subset_mask = np.logical_and(subset_mask, agg >= min_subset_size)
if max_subset_size is not None:
subset_mask = np.logical_and(subset_mask, agg <= max_subset_size)
if (min_degree is not None and min_degree >= 0) or max_degree is not None:
degree = agg.index.to_frame().sum(axis=1)
if min_degree is not None:
subset_mask = np.logical_and(subset_mask, degree >= min_degree)
if max_degree is not None:
subset_mask = np.logical_and(subset_mask, degree <= max_degree)
if present is not None:
for col in _scalar_to_list(present):
subset_mask = np.logical_and(
subset_mask,
agg.index.get_level_values(col).values)
if absent is not None:
for col in _scalar_to_list(absent):
exclude_mask = np.logical_not(
agg.index.get_level_values(col).values)
subset_mask = np.logical_and(subset_mask, exclude_mask)
return subset_mask
def _filter_subsets(df, agg,
min_subset_size, max_subset_size,
min_degree, max_degree):
subset_mask = _get_subset_mask(agg,
min_subset_size=min_subset_size,
max_subset_size=max_subset_size,
min_degree=min_degree,
max_degree=max_degree,
present=None, absent=None)
if subset_mask is True:
return df, agg
agg = agg[subset_mask]
df = df[df.index.isin(agg.index)]
return df, agg
def _process_data(df, sort_by, sort_categories_by, subset_size,
sum_over, min_subset_size=None, max_subset_size=None,
min_degree=None, max_degree=None, reverse=False):
df, agg = _aggregate_data(df, subset_size, sum_over)
total = agg.sum()
df = _check_index(df)
totals = [agg[agg.index.get_level_values(name).values.astype(bool)].sum()
for name in agg.index.names]
totals = pd.Series(totals, index=agg.index.names)
# filter subsets:
df, agg = _filter_subsets(df, agg,
min_subset_size, max_subset_size,
min_degree, max_degree)
# sort:
if sort_categories_by == 'cardinality':
totals.sort_values(ascending=False, inplace=True)
elif sort_categories_by is not None:
raise ValueError('Unknown sort_categories_by: %r' % sort_categories_by)
df = df.reorder_levels(totals.index.values)
agg = agg.reorder_levels(totals.index.values)
if sort_by == 'cardinality':
agg = agg.sort_values(ascending=False)
elif sort_by == 'degree':
index_tuples = sorted(agg.index,
key=lambda x: (sum(x),) + tuple(reversed(x)))
agg = agg.reindex(pd.MultiIndex.from_tuples(index_tuples,
names=agg.index.names))
elif sort_by is None:
pass
else:
raise ValueError('Unknown sort_by: %r' % sort_by)
# add '_bin' to df indicating index in agg
# XXX: ugly!
def _pack_binary(X):
X = pd.DataFrame(X)
out = 0
for i, (_, col) in enumerate(X.items()):
out *= 2
out += col
return out
df_packed = _pack_binary(df.index.to_frame())
data_packed = _pack_binary(agg.index.to_frame())
df['_bin'] = pd.Series(df_packed).map(
pd.Series(np.arange(len(data_packed))[::-1 if reverse else 1],
index=data_packed))
if reverse:
agg = agg[::-1]
return total, df, agg, totals
def _multiply_alpha(c, mult):
r, g, b, a = colors.to_rgba(c)
a *= mult
return colors.to_hex((r, g, b, a), keep_alpha=True)
class _Transposed:
"""Wrap an object in order to transpose some plotting operations
Attributes of obj will be mapped.
Keyword arguments when calling obj will be mapped.
The mapping is not recursive: callable attributes need to be _Transposed
again.
"""
def __init__(self, obj):
self.__obj = obj
def __getattr__(self, key):
return getattr(self.__obj, self._NAME_TRANSPOSE.get(key, key))
def __call__(self, *args, **kwargs):
return self.__obj(*args, **{self._NAME_TRANSPOSE.get(k, k): v
for k, v in kwargs.items()})
_NAME_TRANSPOSE = {
'width': 'height',
'height': 'width',
'hspace': 'wspace',
'wspace': 'hspace',
'hlines': 'vlines',
'vlines': 'hlines',
'bar': 'barh',
'barh': 'bar',
'xaxis': 'yaxis',
'yaxis': 'xaxis',
'left': 'bottom',
'right': 'top',
'top': 'right',
'bottom': 'left',
'sharex': 'sharey',
'sharey': 'sharex',
'get_figwidth': 'get_figheight',
'get_figheight': 'get_figwidth',
'set_figwidth': 'set_figheight',
'set_figheight': 'set_figwidth',
'set_xlabel': 'set_ylabel',
'set_ylabel': 'set_xlabel',
'set_xlim': 'set_ylim',
'set_ylim': 'set_xlim',
'get_xlim': 'get_ylim',
'get_ylim': 'get_xlim',
'set_autoscalex_on': 'set_autoscaley_on',
'set_autoscaley_on': 'set_autoscalex_on',
}
def _transpose(obj):
if isinstance(obj, str):
return _Transposed._NAME_TRANSPOSE.get(obj, obj)
return _Transposed(obj)
def _identity(obj):
return obj
class UpSet:
"""Manage the data and drawing for a basic UpSet plot
Primary public method is :meth:`plot`.
Parameters
----------
data : pandas.Series or pandas.DataFrame
Elements associated with categories (a DataFrame), or the size of each
subset of categories (a Series).
Should have MultiIndex where each level is binary,
corresponding to category membership.
If a DataFrame, `sum_over` must be a string or False.
orientation : {'horizontal' (default), 'vertical'}
If horizontal, intersections are listed from left to right.
sort_by : {'cardinality', 'degree', None}
If 'cardinality', subset are listed from largest to smallest.
If 'degree', they are listed in order of the number of categories
intersected. If None, the order they appear in the data input is
used.
.. versionchanged:: 0.5
Setting None was added.
sort_categories_by : {'cardinality', None}
Whether to sort the categories by total cardinality, or leave them
in the provided order.
.. versionadded:: 0.3
subset_size : {'auto', 'count', 'sum'}
Configures how to calculate the size of a subset. Choices are:
'auto' (default)
If `data` is a DataFrame, count the number of rows in each group,
unless `sum_over` is specified.
If `data` is a Series with at most one row for each group, use
the value of the Series. If `data` is a Series with more than one
row per group, raise a ValueError.
'count'
Count the number of rows in each group.
'sum'
Sum the value of the `data` Series, or the DataFrame field
specified by `sum_over`.
sum_over : str or None
If `subset_size='sum'` or `'auto'`, then the intersection size is the
sum of the specified field in the `data` DataFrame. If a Series, only
None is supported and its value is summed.
min_subset_size : int, optional
Minimum size of a subset to be shown in the plot. All subsets with
a size smaller than this threshold will be omitted from plotting.
Size may be a sum of values, see `subset_size`.
.. versionadded:: 0.5
max_subset_size : int, optional
Maximum size of a subset to be shown in the plot. All subsets with
a size greater than this threshold will be omitted from plotting.
.. versionadded:: 0.5
min_degree : int, optional
Minimum degree of a subset to be shown in the plot.
.. versionadded:: 0.5
max_degree : int, optional
Maximum degree of a subset to be shown in the plot.
.. versionadded:: 0.5
facecolor : 'auto' or matplotlib color or float
Color for bar charts and active dots. Defaults to black if
axes.facecolor is a light color, otherwise white.
.. versionchanged:: 0.6
Before 0.6, the default was 'black'
other_dots_color : matplotlib color or float
Color for shading of inactive dots, or opacity (between 0 and 1)
applied to facecolor.
.. versionadded:: 0.6
shading_color : matplotlib color or float
Color for shading of odd rows in matrix and totals, or opacity (between
0 and 1) applied to facecolor.
.. versionadded:: 0.6
with_lines : bool
Whether to show lines joining dots in the matrix, to mark multiple
categories being intersected.
element_size : float or None
Side length in pt. If None, size is estimated to fit figure
intersection_plot_elements : int
The intersections plot should be large enough to fit this many matrix
elements. Set to 0 to disable intersection size bars.
.. versionchanged:: 0.4
Setting to 0 is handled.
totals_plot_elements : int
The totals plot should be large enough to fit this many matrix
elements.
show_counts : bool or str, default=False
Whether to label the intersection size bars with the cardinality
of the intersection. When a string, this formats the number.
For example, '%d' is equivalent to True.
show_percentages : bool, default=False
Whether to label the intersection size bars with the percentage
of the intersection relative to the total dataset.
This may be applied with or without show_counts.
label_position : position of the category labels when using
vertical orientation
tsep : thousands separator
dec : decimal separator
digits : number of digits when percentages are shown
totals_label_position : position of the labels for the total plot when
counts are shown
totals_label_rotation : rotation of the labels
intersections_label_position : position of the labels for the
intersection plot when counts are shown
intersections_label_rotation : rotation of the labels
.. versionadded:: 0.4
"""
_default_figsize = (10, 6)
def __init__(self, data, orientation='horizontal', sort_by='degree',
sort_categories_by='cardinality',
subset_size='auto', sum_over=None,
min_subset_size=None, max_subset_size=None,
min_degree=None, max_degree=None,
facecolor='auto', other_dots_color=.18, shading_color=.05,
with_lines=True, element_size=32,
intersection_plot_elements=6, totals_plot_elements=2,
show_counts='', show_percentages=False,
label_position=None,
tsep=',',
dec='.',
digits=1,
totals_label_position=None,
totals_label_rotation=None,
intersections_label_position=None,
intersections_label_rotation=None,
scatter_kws=None
):
self.__tsep__ = ','
self.__dec__ = '.'
self.__digits__ = digits
self._horizontal = orientation == 'horizontal'
self.__totals_label_position__ = (totals_label_position
if totals_label_position
else ('left' if self._horizontal
else 'top'))
self.__totals_label_rotation__ = (totals_label_rotation if
totals_label_rotation else 0)
self.__intersections_label_position__ = (intersections_label_position
if intersections_label_position
else ('top'
if self._horizontal
else 'right'))
self.__intersections_label_rotation__ = (intersections_label_rotation
if intersections_label_rotation
else 0)
self._reorient = _identity if self._horizontal else _transpose
if facecolor == 'auto':
bgcolor = matplotlib.rcParams.get('axes.facecolor', 'white')
r, g, b, a = colors.to_rgba(bgcolor)
lightness = colors.rgb_to_hsv((r, g, b))[-1] * a
facecolor = 'black' if lightness >= .5 else 'white'
self._facecolor = facecolor
self._shading_color = (_multiply_alpha(facecolor, shading_color)
if isinstance(shading_color, float)
else shading_color)
self._other_dots_color = (_multiply_alpha(facecolor, other_dots_color)
if isinstance(other_dots_color, float)
else other_dots_color)
self._with_lines = with_lines
self._element_size = element_size
self._totals_plot_elements = totals_plot_elements
self._subset_plots = [{'type': 'default',
'id': 'intersections',
'elements': intersection_plot_elements}]
if not intersection_plot_elements:
self._subset_plots.pop()
self._show_counts = show_counts
self._show_percentages = show_percentages
self.__scatter_kws__ = scatter_kws
self.__label_position__ = label_position
# format data
# -----------
data = data.astype(int).dot(data.columns + ",").str.rstrip(',')
data = from_memberships(data.str.split(','), data=data)
(self.total, self._df, self.intersections,
self.totals) = _process_data(data,
sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size=subset_size,
sum_over=sum_over,
min_subset_size=min_subset_size,
max_subset_size=max_subset_size,
min_degree=min_degree,
max_degree=max_degree,
reverse=not self._horizontal)
self.subset_styles = [{"facecolor": facecolor}
for i in range(len(self.intersections))]
self.subset_legend = [] # pairs of (style, label)
def _swapaxes(self, x, y):
if self._horizontal:
return x, y
return y, x
def style_subsets(self, present=None, absent=None,
min_subset_size=None, max_subset_size=None,
min_degree=None, max_degree=None,
facecolor=None, edgecolor=None, hatch=None,
linewidth=None, linestyle=None, label=None):
"""Updates the style of selected subsets' bars and matrix dots
Parameters are either used to select subsets, or to style them with
attributes of :class:`matplotlib.patches.Patch`, apart from label,
which adds a legend entry.
Parameters
----------
present : str or list of str, optional
Category or categories that must be present in subsets for styling.
absent : str or list of str, optional
Category or categories that must not be present in subsets for
styling.
min_subset_size : int, optional
Minimum size of a subset to be styled.
max_subset_size : int, optional
Maximum size of a subset to be styled.
min_degree : int, optional
Minimum degree of a subset to be styled.
max_degree : int, optional
Maximum degree of a subset to be styled.
facecolor : str or matplotlib color, optional
Override the default UpSet facecolor for selected subsets.
edgecolor : str or matplotlib color, optional
Set the edgecolor for bars, dots, and the line between dots.
hatch : str, optional
Set the hatch. This will apply to intersection size bars, but not
to matrix dots.
linewidth : int, optional
Line width in points for edges.
linestyle : str, optional
Line style for edges.
label : str, optional
If provided, a legend will be added
"""
style = {"facecolor": facecolor, "edgecolor": edgecolor,
"hatch": hatch,
"linewidth": linewidth, "linestyle": linestyle}
style = {k: v for k, v in style.items() if v is not None}
mask = _get_subset_mask(self.intersections,
present=present, absent=absent,
min_subset_size=min_subset_size,
max_subset_size=max_subset_size,
min_degree=min_degree, max_degree=max_degree)
for idx in np.flatnonzero(mask):
self.subset_styles[idx].update(style)
if label is not None:
if "facecolor" not in style:
style["facecolor"] = self._facecolor
for i, (other_style, other_label) in enumerate(self.subset_legend):
if other_style == style:
if other_label != label:
self.subset_legend[i] = (style,
other_label + '; ' + label)
break
else:
self.subset_legend.append((style, label))
def _plot_bars(self, ax, data, title, colors=None, use_labels=False):
ax = self._reorient(ax)
ax.set_autoscalex_on(False)
data_df = pd.DataFrame(data)
if self._horizontal:
data_df = data_df.loc[:, ::-1] # reverse: top row is top of stack
# TODO: colors should be broadcastable to data_df shape
if callable(colors):
colors = colors(range(data_df.shape[1]))
elif isinstance(colors, (str, type(None))):
colors = [colors] * len(data_df)
if self._horizontal:
colors = reversed(colors)
x = np.arange(len(data_df))
cum_y = None
all_rects = []
for (name, y), color in zip(data_df.items(), colors):
rects = ax.bar(x, y, .5, cum_y,
color=color, zorder=10,
label=name if use_labels else None,
align='center')
cum_y = y if cum_y is None else cum_y + y
all_rects.extend(rects)
self._label_sizes(ax, rects,
self.__intersections_label_position__,
self.__intersections_label_rotation__
)
ax.xaxis.set_visible(False)
ax.grid(b=None, which='major', axis='both', linestyle='-', alpha=.3)
ax.set_axisbelow(True) # to put the grid below the plot
for x in ['top', 'bottom', 'right']:
ax.spines[self._reorient(x)].set_visible(False)
tick_axis = ax.yaxis
tick_axis.grid(True)
ax.set_ylabel(title)
return all_rects
def _plot_stacked_bars(self, ax, by, sum_over, colors, title):
df = self._df.set_index("_bin").set_index(by, append=True, drop=False)
gb = df.groupby(level=list(range(df.index.nlevels)), sort=True)
if sum_over is None and "_value" in df.columns:
data = gb["_value"].sum()
elif sum_over is None:
data = gb.size()
else:
data = gb[sum_over].sum()
data = data.unstack(by).fillna(0)
if isinstance(colors, str):
colors = matplotlib.cm.get_cmap(colors)
elif isinstance(colors, typing.Mapping):
colors = data.columns.map(colors).values
if pd.isna(colors).any():
raise KeyError("Some labels mapped by colors: %r" %
data.columns[pd.isna(colors)].tolist())
self._plot_bars(ax, data=data, colors=colors, title=title,
use_labels=True)
handles, labels = ax.get_legend_handles_labels()
if self._horizontal:
# Make legend order match visual stack order
ax.legend(reversed(handles), reversed(labels))
else:
ax.legend()
def add_stacked_bars(self, by, sum_over=None, colors=None, elements=3,
title=None):
"""Add a stacked bar chart over subsets when :func:`plot` is called.
Used to plot categorical variable distributions within each subset.
.. versionadded:: 0.6
Parameters
----------
by : str
Column name within the dataframe for color coding the stacked bars,
containing discrete or categorical values.
sum_over : str, optional
Ordinarily the bars will chart the size of each group. sum_over
may specify a column which will be summed to determine the size
of each bar.
colors : Mapping, list-like, str or callable, optional
The facecolors to use for bars corresponding to each discrete
label, specified as one of:
Mapping
Maps from label to matplotlib-compatible color specification.
list-like
A list of matplotlib colors to apply to labels in order.
str
The name of a matplotlib colormap name.
callable
When called with the number of labels, this should return a
list-like of that many colors. Matplotlib colormaps satisfy
this callable API.
None
Uses the matplotlib default colormap.
elements : int, default=3
Size of the axes counted in number of matrix elements.
title : str, optional
The axis title labelling bar length.
Returns
-------
None
"""
# TODO: allow sort_by = {"lexical", "sum_squares", "rev_sum_squares",
# list of labels}
self._subset_plots.append({'type': 'stacked_bars',
'by': by,
'sum_over': sum_over,
'colors': colors,
'title': title,
'id': 'extra%d' % len(self._subset_plots),
'elements': elements})
def add_catplot(self, kind, value=None, elements=3, **kw):
"""Add a seaborn catplot over subsets when :func:`plot` is called.
Parameters
----------
kind : str
One of {"point", "bar", "strip", "swarm", "box", "violin", "boxen"}
value : str, optional
Column name for the value to plot (i.e. y if
orientation='horizontal'), required if `data` is a DataFrame.
elements : int, default=3
Size of the axes counted in number of matrix elements.
**kw : dict
Additional keywords to pass to :func:`seaborn.catplot`.
Our implementation automatically determines 'ax', 'data', 'x', 'y'
and 'orient', so these are prohibited keys in `kw`.
Returns
-------
None
"""
assert not set(kw.keys()) & {'ax', 'data', 'x', 'y', 'orient'}
if value is None:
if '_value' not in self._df.columns:
raise ValueError('value cannot be set if data is a Series. '
'Got %r' % value)
else:
if value not in self._df.columns:
raise ValueError('value %r is not a column in data' % value)
self._subset_plots.append({'type': 'catplot',
'value': value,
'kind': kind,
'id': 'extra%d' % len(self._subset_plots),
'elements': elements,
'kw': kw})
def _check_value(self, value):
if value is None and '_value' in self._df.columns:
value = '_value'
elif value is None:
raise ValueError('value can only be None when data is a Series')
return value
def _plot_catplot(self, ax, value, kind, kw):
df = self._df
value = self._check_value(value)
kw = kw.copy()
if self._horizontal:
kw['orient'] = 'v'
kw['x'] = '_bin'
kw['y'] = value
else:
kw['orient'] = 'h'
kw['x'] = value
kw['y'] = '_bin'
import seaborn
kw['ax'] = ax
getattr(seaborn, kind + 'plot')(data=df, **kw)
ax = self._reorient(ax)
if value == '_value':
ax.set_ylabel('')
ax.xaxis.set_visible(False)
for x in ['top', 'bottom', 'right']:
ax.spines[self._reorient(x)].set_visible(False)
tick_axis = ax.yaxis
tick_axis.grid(True)
def make_grid(self, fig=None):
"""Get a SubplotSpec for each Axes, accounting for label text width
"""
n_cats = len(self.totals)
n_inters = len(self.intersections)
if fig is None:
fig = plt.gcf()
# Determine text size to determine figure size / spacing
r = get_renderer(fig)
text_kw = {"size": matplotlib.rcParams['xtick.labelsize']}
# adding "x" ensures a margin
t = fig.text(0, 0, '\n'.join(str(label) + "x"
for label in self.totals.index.values),
**text_kw)
textw = t.get_window_extent(renderer=r).width
t.remove()
figw = self._reorient(fig.get_window_extent(renderer=r)).width
sizes = np.asarray([p['elements'] for p in self._subset_plots])
fig = self._reorient(fig)
non_text_nelems = len(self.intersections) + self._totals_plot_elements
if self._element_size is None:
colw = (figw - textw) / non_text_nelems
else:
render_ratio = figw / fig.get_figwidth()
colw = self._element_size / 72 * render_ratio
figw = colw * (non_text_nelems + np.ceil(textw / colw) + 1)
fig.set_figwidth(figw / render_ratio)
fig.set_figheight((colw * (n_cats + sizes.sum())) /
render_ratio)
text_nelems = int(np.ceil(figw / colw - non_text_nelems))
# print('textw', textw, 'figw', figw, 'colw', colw,
# 'ncols', figw/colw, 'text_nelems', text_nelems)
GS = self._reorient(matplotlib.gridspec.GridSpec)
gridspec = GS(*self._swapaxes(n_cats + (sizes.sum() or 0),
n_inters + text_nelems +
self._totals_plot_elements),
hspace=1)
if self._horizontal:
out = {'matrix': gridspec[-n_cats:, -n_inters:],
'shading': gridspec[-n_cats:, :],
'totals': gridspec[-n_cats:, :self._totals_plot_elements],
'gs': gridspec}
cumsizes = np.cumsum(sizes[::-1])
for start, stop, plot in zip(np.hstack([[0], cumsizes]), cumsizes,
self._subset_plots[::-1]):
out[plot['id']] = gridspec[start:stop, -n_inters:]
else:
out = {'matrix': gridspec[-n_inters:, :n_cats],
'shading': gridspec[:, :n_cats],
'totals': gridspec[:self._totals_plot_elements, :n_cats],
'gs': gridspec}
cumsizes = | np.cumsum(sizes) | numpy.cumsum |
import pandas as pd
from src.tools.config_loader import Configuration
from operator import or_ as union
from functools import reduce
import numpy as np
config = Configuration.get_instance()
io = config["IO"]
local_config = config["CostCurveConfig"]
column_map = {"id": "id",
"source": "source",
"geographical_label": "geographical_label",
"year": "year",
"production_capacity": "production_capacity",
"amount": "amount",
"cost": "cost",
"lat": "lat",
"lon": "lon"}
def create_scenario_dataframes_geco(scenario):
"""
Reads GECO dataset and creates a dataframe of the given scenario
"""
df_sc = pd.read_csv(io["scenario_geco_path"])
df_sc_europe = df_sc.loc[df_sc["Country"] == "EU28"]
df_scenario = df_sc_europe.loc[df_sc_europe["Scenario"] == scenario]
return df_scenario
def unique_scenarios():
"""
Find unique scenarios in the GECO dataset
"""
return pd.read_csv(io["scenario_geco_path"]).Scenario.unique()
def fetch_objective_value(df, fuel, year):
"""
Get specific energy production for the desired fuel in the given year
"""
if fuel in ["Natural Gas", "natural gas"]:
fuel = "Gas"
if fuel == "Fossil fuels":
return df.loc[(df.Level1 == "Fossil fuels") & (df.Year == year)].Value.sum()
elif fuel in ["Gas", "Coal", "Biomass"]:
return df.loc[(df.Level2 == fuel) & (df.Year == year)].Value.values[0]
def close_powerplants(df, objective, capacity_factor, fuel, year):
"""
Simple algorithm to close power plants based on a given objetive value
"""
df = df.copy()
power = objective * 1000 / (capacity_factor * 8.6)
if fuel == "Coal" and year >= 2038:
drop_index = df.loc[(df["geographical_label"] == "DE") & (df["source"].isin(["Hard Coal", "Lignite"]))].index
df = df.drop(drop_index)
while df.production_capacity.sum() > power:
min_year = df.year.min()
drop_index_year = df.loc[(df["year"] == min_year)].index
df_year = df.loc[drop_index_year]
min_prod = df_year.production_capacity.min()
drop_index = df_year.loc[(df_year["production_capacity"] == min_prod)].index
df = df.drop(drop_index)
return df.index
def map_capacity_factor(fuel):
"""
Get capacity factor of the plant from the config file
"""
return local_config["Scenario"]["CapacityFactors"][fuel]
def close_power_plants_per_fuel(df, fuel, year, scenario_df):
"""
Apply the close power plants algorithm per fuel
"""
if fuel == "Coal":
df_cut = df.loc[df.source.isin(["Hard Coal", "Lignite"])].copy()
else:
df_cut = df.loc[df.source == fuel].copy()
if fuel in ["Lignite", "Hard Coal", "Coal", "Coals"]:
fuel_s = "Coal"
elif fuel in ["Bioenergy"]:
fuel_s = "Biomass"
elif fuel in ["Natural Gas"]:
fuel_s = "Gas"
capacity_factor = map_capacity_factor(fuel_s)
objective = fetch_objective_value(scenario_df, fuel_s, year)
index = close_powerplants(df_cut, objective, capacity_factor, fuel, year)
return index
def idx_union(mylist):
"""
Support funcion to create an index
"""
idx = reduce(union, (index for index in mylist))
return idx
def create_scenario_data_by_points(data, year, scenario):
"""
Creates a scenario dataset at point resolution
"""
final_index = create_index_for_scenario_mapping(data, year, scenario)
return data.loc[final_index]
def query_scenario_data(scenario):
"""
Get the desired scenario
"""
scenario_data = create_scenario_dataframes_geco(scenario)
return scenario_data
def create_index_for_scenario_mapping(data, year, scenario):
"""
Creates an updated index to create a scenario dataset
"""
scendata = query_scenario_data(scenario)
idx_dic = {"others": data[~(data["source"].isin(["Lignite", "Hard Coal", "Natural Gas", "Bioenergy"]))].index}
for fuel in ["Coal", "Natural Gas", "Bioenergy"]:
idx_dic[fuel] = close_power_plants_per_fuel(data, fuel, year, scendata)
idx_list = list(idx_dic.values())
final_index = idx_union(idx_list)
return final_index
def create_scenario_data_by_clusters(data, year, scenario, step=50):
"""
Creates scenarios using clustered points
"""
data = data.copy()
data["amount"] = data["amount"] / data["production_capacity"]
scendata = create_scenario_dataframes_geco(scenario)
for fuel in ["Coal", "Natural Gas", "Bioenergy"]:
if fuel in ["Lignite", "Hard Coal", "Coal", "Coals"]:
fuel_s = "Coal"
values = data.loc[data.source.isin(["Lignite", "Hard Coal"]), "production_capacity"]
elif fuel in ["Bioenergy"]:
fuel_s = "Biomass"
values = data.loc[data.source == fuel, "production_capacity"]
elif fuel in ["Natural Gas"]:
fuel_s = "Gas"
values = data.loc[data.source == fuel, "production_capacity"]
objective = fetch_objective_value(scendata, fuel_s, year)
capacity_factor = map_capacity_factor(fuel_s)
new_series = calculate_production_change(values, objective, capacity_factor, step=step)
data.update(new_series)
data["amount"] = data["amount"] * data["production_capacity"]
return data
def calculate_production_change(values, objective, capacity_factor, step=50):
"""
Porcentual changes of production for the cluster scenario production
"""
new_vals = values.values
index = values.index
power = objective * 1000 / (capacity_factor * 8.6)
total_sum = np.sum(new_vals)
i = 0
s = new_vals.shape[0]
if power > total_sum:
def test(x, y):
return x > y
else:
def test(x, y):
return x < y
step = -step
while test(power, total_sum):
new_vals[i % s] = max(new_vals[i % s] + step, 0)
total_sum = | np.sum(new_vals) | numpy.sum |
import os.path as osp
import chainer
import chainer.functions as F
from fcn import data
from fcn.models import FCN8s
import numpy as np
class FCN8sAtOnce(FCN8s):
pretrained_model = osp.expanduser(
'~/data/models/chainer/fcn8s-atonce_from_caffe.npz')
def __call__(self, x):
# conv1
h = F.relu(self.conv1_1(x))
conv1_1 = h
h = F.relu(self.conv1_2(conv1_1))
conv1_2 = h
h = F.max_pooling_2d(conv1_2, 2, stride=2, pad=0)
pool1 = h # 1/2
# conv2
h = F.relu(self.conv2_1(pool1))
conv2_1 = h
h = F.relu(self.conv2_2(conv2_1))
conv2_2 = h
h = F.max_pooling_2d(conv2_2, 2, stride=2, pad=0)
pool2 = h # 1/4
# conv3
h = F.relu(self.conv3_1(pool2))
conv3_1 = h
h = F.relu(self.conv3_2(conv3_1))
conv3_2 = h
h = F.relu(self.conv3_3(conv3_2))
conv3_3 = h
h = F.max_pooling_2d(conv3_3, 2, stride=2, pad=0)
pool3 = h # 1/8
# conv4
h = F.relu(self.conv4_1(pool3))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2, pad=0)
pool4 = h # 1/16
# conv5
h = F.relu(self.conv5_1(pool4))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2, pad=0)
pool5 = h # 1/32
# fc6
h = F.relu(self.fc6(pool5))
h = F.dropout(h, ratio=.5)
fc6 = h # 1/32
# fc7
h = F.relu(self.fc7(fc6))
h = F.dropout(h, ratio=.5)
fc7 = h # 1/32
# score_fr
h = self.score_fr(fc7)
score_fr = h # 1/32
# score_pool3
scale_pool3 = 0.0001 * pool3 # XXX: scale to train at once
h = self.score_pool3(scale_pool3)
score_pool3 = h # 1/8
# score_pool4
scale_pool4 = 0.01 * pool4 # XXX: scale to train at once
h = self.score_pool4(scale_pool4)
score_pool4 = h # 1/16
# upscore2
h = self.upscore2(score_fr)
upscore2 = h # 1/16
# score_pool4c
h = score_pool4[:, :,
5:5 + upscore2.data.shape[2],
5:5 + upscore2.data.shape[3]]
score_pool4c = h # 1/16
# fuse_pool4
h = upscore2 + score_pool4c
fuse_pool4 = h # 1/16
# upscore_pool4
h = self.upscore_pool4(fuse_pool4)
upscore_pool4 = h # 1/8
# score_pool4c
h = score_pool3[:, :,
9:9 + upscore_pool4.data.shape[2],
9:9 + upscore_pool4.data.shape[3]]
score_pool3c = h # 1/8
# fuse_pool3
h = upscore_pool4 + score_pool3c
fuse_pool3 = h # 1/8
# upscore8
h = self.upscore8(fuse_pool3)
upscore8 = h # 1/1
# score
h = upscore8[:, :, 31:31 + x.shape[2], 31:31 + x.shape[3]]
score = h # 1/1
return score
def init_from_vgg16(self, vgg16):
for l in self.children():
if l.name.startswith('conv'):
l1 = getattr(vgg16, l.name)
l2 = getattr(self, l.name)
assert l1.W.shape == l2.W.shape
assert l1.b.shape == l2.b.shape
l2.W.data[...] = l1.W.data[...]
l2.b.data[...] = l1.b.data[...]
elif l.name in ['fc6', 'fc7']:
l1 = getattr(vgg16, l.name)
l2 = getattr(self, l.name)
assert l1.W.size == l2.W.size
assert l1.b.size == l2.b.size
l2.W.data[...] = l1.W.data.reshape(l2.W.shape)[...]
l2.b.data[...] = l1.b.data.reshape(l2.b.shape)[...]
@classmethod
def download(cls):
return data.cached_download(
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vZ1RJdXotZkNhSEk',
path=cls.pretrained_model,
md5='5f3ffdc7fae1066606e1ef45cfda548f',
)
def predict(self, imgs):
lbls = list()
for img in imgs:
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
x = chainer.Variable(self.xp.asarray(img[np.newaxis]))
score = self.__call__(x)[0].data
score = chainer.cuda.to_cpu(score)
lbl = | np.argmax(score, axis=0) | numpy.argmax |
import numpy as np
import cv2
import cv2.aruco as aruco
import serial
import sys, time, math
# ------------------------------------------------------------------------------
# define variables:
marker_size = 3.35 # - [cm]
sercon = True
port = 'COM3'
camera = 1
camX, camY = 1280, 720
armID = 11
pressed = False
# ------------------------------------------------------------------------------
# define functions:
if sercon:
ser = serial.Serial(port, 9600) # COM port, baud rate (9600 for Arduino)
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R):
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def rotate(X, theta):
r1 = rotationMatrixToEulerAngles(theta*R_flip)
r2 = rotationMatrixToEulerAngles(theta)
'''Rotate multidimensional array `X` `theta` radians around axis `axis`'''
if np.size(X) == 3:# and np.size(theta) == 3: # axis == 'x': return
'''
cx, sx = np.cos(theta[0]), np.sin(theta[0])
cy, sy = np.cos(theta[1]), np.sin(theta[1])
cz, sz = np.cos(theta[2]), np.sin(theta[2])
# attempting a combination of XYZ-rotation:
rot_matrix = (np.array([[cz*cx, cz*sy*sx-sz*cx, cz*sy*cx+sz*sx],
[sz*cy, sz*sy*sx+cz*cx, sz*sy*cx-cz*sx],
[-sy, cy*sx, cy*cx]]))
rot1 = np.dot(X, R_flip*rot_matrix)
rot2 = np.dot(X, rot_matrix.T)'''
cx, sx = np.cos(r1[0]), np.sin(r1[0])
cy, sy = np.cos(r1[1]), np.sin(r1[1])
cz, sz = np.cos(r1[2]), np.sin(r1[2])
# attempting a combination of XYZ-rotation:
rot_matrix = (np.array([[cz*cx, cz*sy*sx-sz*cx, cz*sy*cx+sz*sx],
[sz*cy, sz*sy*sx+cz*cx, sz*sy*cx-cz*sx],
[-sy, cy*sx, cy*cx]]))
rot1 = np.dot(X, rot_matrix)
cx, sx = np.cos(r2[0]), np.sin(r2[0])
cy, sy = np.cos(r2[1]), np.sin(r2[1])
cz, sz = | np.cos(r2[2]) | numpy.cos |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import pandas as pd
import numpy as np
import itertools
import json
import copy
from pprint import pprint
#values for the chi-square distribution, as can also be found in the Table from https://en.wikipedia.org/wiki/Chi-squared_distribution
thresholds=dict()
thresholds[0.90]=2.706
thresholds[0.95]=3.841
thresholds[0.99]=6.635
thresholds[0.999]=10.828
#add new column for AND-gate by temporarily extending the dataset with one extra column operating as the AND-gate
def mergeAND (df, tomerge):
dfextended=df.copy()
tomergeindices=[]
for k in tomerge:
index=df.columns.get_loc(k)
tomergeindices.append(index)
dataset = df.values
newcolumn = np.zeros((len(dataset), 1))
for i in range(len(dataset)):
alltrue = True
for k in range(len(tomerge)):
if dataset[i, tomergeindices[k]] == False:
alltrue=False
if alltrue==True:
newcolumn[i,0]=1
dfextended['AND'] = newcolumn
return dfextended
#add new column for OR-gate by temporarily extending the dataset with one extra column operating as the OR-gate
def mergeOR (df, tomerge):
dfextended=df.copy()
tomergeindices=[]
for k in tomerge:
index=df.columns.get_loc(k)
tomergeindices.append(index)
dataset = df.values
newcolumn = np.zeros((len(dataset), 1))
for i in range(len(dataset)):
onetrue = False
for k in range(len(tomerge)):
if dataset[i, tomergeindices[k]] == True:
onetrue=True
if onetrue==True:
newcolumn[i,0]=1
dfextended['OR'] = newcolumn
return dfextended
# generator of sets
def getavailablesets (df, splitter, seen):
availablesets = []
availables=[]
for node in list(df):
if node not in seen: # seen contains names, not indices.
if node!=splitter:
if node!='AND':
if node!='OR':
availables.append(node)
for l in range(2, 6): #2 to 5 items as input in one gate
for subset in itertools.combinations(availables, l):
availablesets.append(subset)
return availablesets
def getstratum(df, splitter, test_attribute, attribute_values, context=None):
for key in attribute_values:
df = df.loc[(df[key]==attribute_values[key])]
c = np.ones((2,2))
for testvalue in range(2):
for splitvalue in range(2):
df_temp = (df.loc[(df[test_attribute]==testvalue) & (df[splitter]==splitvalue)])
count = df_temp.shape[0]
c[(1-testvalue), (1-splitvalue)] = count
return c
# calculates pamh score
def pamh(counts):
#calculate numerator
sumnumerator = 0.
denominator = 0.
for stratum in counts:
if | np.sum(stratum[:,0]) | numpy.sum |
#<NAME>
#
#
# 2019-11-17
# -----------------------------------------------------------------------------
# This function computes the logarithmic (or ignorance) score. Predictive distributions can
# be considered as Gaussian, Gamma distributed, Empirical or "Loi des fuites"
# (a Gamma distribution + a Dirac at zero, suitable for daily precip), and Kernel distribution.
#
# input:
# calculation: mxn matrix; m = number of simulations
# n = number of member in ensemble
# observation: mx1 vector; m = number of records
# case: - 'Normal'
# - 'Gamma'
# - 'Kernel'
# - 'Fuites' is made for daily precipitation exclusively
# - 'Empirical'
# thres: probability density threshold below which we consider that the
# event was missed by the forecasting system. This value must be
# small (e.g.: 0.0001 means that f(obs) given the forecasts is
# only 0.0001 --> not forecasted).
# By default, thres = 0 and the logarithmic score is unbounded.
# opt_case - if 'case' = 'Fuites', opt_cas is the threshold to determine data
# which contributed to gamma distribution and those who are part of the
# Dirac impulsion
# - if 'case' = 'empirical', opt_cas needed is the number of bins
# in which to divide the ensemble, by default, it will be the
# number of members (Nan excluded). opt_cas have to be an integer
# superior to 1.
#
# output:
# loga: the logarithmic score (n*1 matrix)
# ind_miss: Boleans to point out days for which the event was missed according
# to the threshold specified by the user (1= missed) (n*1 matrix)
#
# Reference:
# 'Empirical' case is based on Roulston and Smith (2002) with
# modifications -> quantile and members with similar values
# -----------------------------------------------------------------------------
# History
#
# MAB June 19: Added 2 cases for the empirical distribution: the
# observation can either be the smallest or the largest member of the
# augmented ensemble, in which case we can't use the "DeltaX = X(S+1) -
# X(S-1);" equation.
# -----------------------------------------------------------------------------
import numpy as np
from scipy.stats import norm, gamma, gaussian_kde
import sys
def score_log(calculation, observation, case, thres=0., opt_case=None):
# transform input into numpy array
calculation = np.array(calculation, dtype='float64')
observation = np.array(observation, dtype='float64')
dim1 = calculation.shape
if len(dim1) == 1:
calculation = calculation.reshape((1,dim1[0]))
dim2 = observation.shape
if len(dim2) == 0:
observation = observation.reshape((1,1))
elif len(dim2) == 1:
observation = observation.reshape((dim2[0],1))
# preparation
n = np.size(calculation, axis=0)
loga = np.empty(n)
loga[:] = np.nan
ind_miss = np.empty(n)
ind_miss[:] = np.nan
# test input arguments are correct
if len(observation) != n:
sys.exit('Error! The length of the record of observations doesn''t match the length of the forecasting period')
if thres == 0:
print('Logarithmic score is unbounded')
elif (thres < 0) or (thres > 1):
sys.exit('Threshold has to be between 0 and 1.')
# calcuation depending on the case
if case == 'Empirical':
# if no opt_case is given, number of bins are determined by the number of nonNaN members
if opt_case == None:
print('Bins used for empirical method determined by ensemble members')
elif (opt_case < 2) or (not isinstance(opt_case, int)):
sys.exit('Format of opt_case is not valide.')
if not isinstance(thres, float):
sys.exit('Format of threshold is not valide. thres needs to be a list with 2 entries, determining the upper and lower bound for aberrant values')
# loop over the records
for j in range(n):
# determine of observation is in the bound of max min of ensemble
if (~np.all(np.isnan(calculation[j,:]))) and (~np.isnan(observation[j])):
if (np.nanmin(calculation[j,:]) <= observation[j]) and (observation[j] <= np.nanmax(calculation[j,:])):
ind_miss[j] = 0
# suppress NaN from the ensemble to determine the number of members
sample_nonnan = calculation[j,:][~np.isnan(calculation[j,:])]
sort_sample_nonnan = np.sort(sample_nonnan)
# transform data, if bins are specified by user in the opt_case argument
if opt_case != None:
sort_sample_nonnan = np.quantile(sort_sample_nonnan, np.arange(0, 1, 1/opt_case))
# number of bins
N = len(sort_sample_nonnan)
# if all members of forcast and obervation are the same -> perfect forecast
if len(np.unique(np.append(sort_sample_nonnan, observation[j]))) == 1:
proba_obs = 1
else:
# if some members are equal, modify slightly the value
if len(np.unique(sort_sample_nonnan)) != len(sort_sample_nonnan):
uni_sample = np.unique(sort_sample_nonnan)
bins = np.append(uni_sample, np.inf)
hist, binedges = np.histogram(sort_sample_nonnan, bins)
idxs, = np.where(hist > 1)
new_sample = uni_sample
for idx in idxs:
new_val = uni_sample[idx] + 0.01 * np.random.rand(hist[idx]-1)
new_sample = np.append(new_sample, new_val)
sort_sample_nonnan = np.sort(new_sample)
# find position of the observation in the ensemble
X = np.sort(np.concatenate((sort_sample_nonnan, observation[j])))
S, = np.where(X == observation[j])
# if observation is at the first or last position of the ensemble -> threshold prob
if S[0] == len(X)-1:
proba_obs = thres
elif S[0] == 0:
proba_obs = thres
else:
#if the observation falls between two members or occupies the first or last rank
if len(S) == 1:
# If the observation is between the augmented ensemble bounds
DeltaX = X[S[0]+1] - X[S[0]-1]
proba_obs = min(1/(DeltaX * (N+1)),1)
# if observation is equal to one member, choose the maximum of the probability density associated
elif len(S) == 2:
if S[0] == 0:
DeltaX = X[S[1]+1] - X[S[1]]
elif S[1] == len(X)-1:
DeltaX = X[S[0]] - X[S[0]-1]
else:
DeltaX1 = X[S[1]+1] - X[S[1]]
DeltaX2 = X[S[0]] - X[S[0]-1]
DeltaX = min(DeltaX1,DeltaX2)
proba_obs = min(1/(DeltaX * (N+1)),1)
# test if probability below threshold
if proba_obs < thres:
proba_obs = thres
ind_miss[j] = 1
# if observation is outside of the bound of the ensemble
else:
ind_miss[j] = 1
proba_obs = thres
# calculate the logarithmus
loga[j] = - np.log2(proba_obs)
# if all values are nan in ensemble
else:
loga[j] = np.nan
ind_miss[j] = np.nan
elif case == 'Normal':
if (opt_case != None):
sys.exit('No optional case possible for Normal distribution')
for j in range(n):
# filter non nan values
sample_nonnan = calculation[j,:][~np.isnan(calculation[j,:])]
# if there are values in the ensemble which are not nan
if (len(sample_nonnan) > 0) and (~np.isnan(observation[j])):
# perfect forecast, all member values equal the observation
if len(np.unique( | np.append(sample_nonnan, observation[j]) | numpy.append |
"""
voxel.py
-----------
Convert meshes to a simple voxel data structure and back again.
"""
import numpy as np
from . import util
from . import remesh
from . import caching
from . import grouping
from .constants import log, _log_time
class Voxel(object):
def __init__(self, *args, **kwargs):
self._data = caching.DataStore()
self._cache = caching.Cache(id_function=self._data.crc)
@caching.cache_decorator
def marching_cubes(self):
"""
A marching cubes Trimesh representation of the voxels.
No effort was made to clean or smooth the result in any way;
it is merely the result of applying the scikit-image
measure.marching_cubes function to self.matrix.
Returns
---------
meshed: Trimesh object representing the current voxel
object, as returned by marching cubes algorithm.
"""
meshed = matrix_to_marching_cubes(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return meshed
@property
def pitch(self):
# stored as TrackedArray with a single element
return self._data['pitch'][0]
@pitch.setter
def pitch(self, value):
self._data['pitch'] = value
@property
def shape(self):
"""
The shape of the matrix for the current voxel object.
Returns
---------
shape: (3,) int, what is the shape of the 3D matrix
for these voxels
"""
return self.matrix.shape
@caching.cache_decorator
def filled_count(self):
"""
Return the number of voxels that are occupied.
Returns
--------
filled: int, number of voxels that are occupied
"""
return int(self.matrix.sum())
@caching.cache_decorator
def volume(self):
"""
What is the volume of the filled cells in the current voxel object.
Returns
---------
volume: float, volume of filled cells
"""
volume = self.filled_count * (self.pitch**3)
return volume
@caching.cache_decorator
def points(self):
"""
The center of each filled cell as a list of points.
Returns
----------
points: (self.filled, 3) float, list of points
"""
points = matrix_to_points(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return points
def point_to_index(self, point):
"""
Convert a point to an index in the matrix array.
Parameters
----------
point: (3,) float, point in space
Returns
---------
index: (3,) int tuple, index in self.matrix
"""
point = np.asanyarray(point)
if point.shape != (3,):
raise ValueError('to_index requires a single point')
index = np.round((point - self.origin) /
self.pitch).astype(int)
index = tuple(index)
return index
def is_filled(self, point):
"""
Query a point to see if the voxel cell it lies in is filled or not.
Parameters
----------
point: (3,) float, point in space
Returns
---------
is_filled: bool, is cell occupied or not
"""
index = self.point_to_index(point)
in_range = (np.array(index) < np.array(self.shape)).all()
if in_range:
is_filled = self.matrix[index]
else:
is_filled = False
return is_filled
class VoxelMesh(Voxel):
def __init__(self,
mesh,
pitch,
max_iter=10,
size_max=None,
method='subdivide'):
"""
A voxel representation of a mesh that will track changes to
the mesh.
At the moment the voxels are not filled in and only represent
the surface.
Parameters
----------
mesh: Trimesh object
pitch: float, how long should each edge of the voxel be
size_max: float, maximum size (in mb) of a data structure that
may be created before raising an exception
"""
super(VoxelMesh, self).__init__()
self._method = method
self._data['mesh'] = mesh
self._data['pitch'] = pitch
self._data['max_iter'] = max_iter
@caching.cache_decorator
def matrix_surface(self):
"""
The voxels on the surface of the mesh as a 3D matrix.
Returns
---------
matrix: self.shape np.bool, if a cell is True it is occupied
"""
matrix = sparse_to_matrix(self.sparse_surface)
return matrix
@caching.cache_decorator
def matrix_solid(self):
"""
The voxels in a mesh as a 3D matrix.
Returns
---------
matrix: self.shape np.bool, if a cell is True it is occupied
"""
matrix = sparse_to_matrix(self.sparse_solid)
return matrix
@property
def matrix(self):
"""
A matrix representation of the surface voxels.
In the future this is planned to return a filled voxel matrix
if the source mesh is watertight, and a surface voxelization
otherwise.
Returns
---------
matrix: self.shape np.bool, cell occupancy
"""
if self._data['mesh'].is_watertight:
return self.matrix_solid
return self.matrix_surface
@property
def origin(self):
"""
The origin of the voxel array.
Returns
------------
origin: (3,) float, point in space
"""
populate = self.sparse_surface
return self._cache['origin']
@caching.cache_decorator
def sparse_surface(self):
"""
Filled cells on the surface of the mesh.
Returns
----------------
voxels: (n, 3) int, filled cells on mesh surface
"""
if self._method == 'ray':
func = voxelize_ray
elif self._method == 'subdivide':
func = voxelize_subdivide
else:
raise ValueError('voxelization method incorrect')
voxels, origin = func(
mesh=self._data['mesh'],
pitch=self._data['pitch'],
max_iter=self._data['max_iter'][0])
self._cache['origin'] = origin
return voxels
@caching.cache_decorator
def sparse_solid(self):
"""
Filled cells inside and on the surface of mesh
Returns
----------------
filled: (n, 3) int, filled cells in or on mesh.
"""
filled = fill_voxelization(self.sparse_surface)
return filled
def as_boxes(self, solid=False):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell.
"""
if solid:
filled = self.sparse_solid
else:
filled = self.sparse_surface
# center points of voxels
centers = (filled * self.pitch).astype(np.float64)
centers += self.origin - (self.pitch / 2.0)
mesh = multibox(centers=centers, pitch=self.pitch)
return mesh
def show(self, solid=False):
"""
Convert the current set of voxels into a trimesh for visualization
and show that via its built- in preview method.
"""
self.as_boxes(solid=solid).show()
@_log_time
def voxelize_subdivide(mesh,
pitch,
max_iter=10,
edge_factor=2.0):
"""
Voxelize a surface by subdividing a mesh until every edge is
shorter than: (pitch / edge_factor)
Parameters
-----------
mesh: Trimesh object
pitch: float, side length of a single voxel cube
max_iter: int, cap maximum subdivisions or None for no limit.
edge_factor: float,
Returns
-----------
voxels_sparse: (n,3) int, (m,n,p) indexes of filled cells
origin_position: (3,) float, position of the voxel
grid origin in space
"""
max_edge = pitch / edge_factor
if max_iter is None:
longest_edge = np.linalg.norm(mesh.vertices[mesh.edges[:, 0]] -
mesh.vertices[mesh.edges[:, 1]],
axis=1).max()
max_iter = max(int(np.ceil(np.log2(longest_edge / max_edge))), 0)
# get the same mesh sudivided so every edge is shorter
# than a factor of our pitch
v, f = remesh.subdivide_to_size(mesh.vertices,
mesh.faces,
max_edge=max_edge,
max_iter=max_iter)
# convert the vertices to their voxel grid position
hit = v / pitch
# Provided edge_factor > 1 and max_iter is large enough, this is
# sufficient to preserve 6-connectivity at the level of voxels.
hit = np.round(hit).astype(int)
# remove duplicates
unique, inverse = grouping.unique_rows(hit)
# get the voxel centers in model space
occupied_index = hit[unique]
origin_index = occupied_index.min(axis=0)
origin_position = origin_index * pitch
voxels_sparse = (occupied_index - origin_index)
return voxels_sparse, origin_position
def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs):
"""
Voxelize a mesh in the region of a cube around a point. When fill=True,
uses proximity.contains to fill the resulting voxels so may be meaningless
for non-watertight meshes. Useful to reduce memory cost for small values of
pitch as opposed to global voxelization.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
point : (3, ) float
Point in space to voxelize around
pitch : float
Side length of a single voxel cube
radius : int
Number of voxel cubes to return in each direction.
kwargs : parameters to pass to voxelize_subdivide
Returns
-----------
voxels : (m, m, m) bool
Array of local voxels where m=2*radius+1
origin_position : (3,) float
Position of the voxel grid origin in space
"""
from scipy import ndimage
# make sure point is correct type/shape
point = np.asanyarray(point, dtype=np.float64).reshape(3)
# this is a gotcha- radius sounds a lot like it should be in
# float model space, not int voxel space so check
if not isinstance(radius, int):
raise ValueError('radius needs to be an integer number of cubes!')
# Bounds of region
bounds = np.concatenate((point - (radius + 0.5) * pitch,
point + (radius + 0.5) * pitch))
# faces that intersect axis aligned bounding box
faces = list(mesh.triangles_tree.intersection(bounds))
# didn't hit anything so exit
if len(faces) == 0:
return np.array([], dtype=np.bool), np.zeros(3)
local = mesh.submesh([[f] for f in faces], append=True)
# Translate mesh so point is at 0,0,0
local.apply_translation(-point)
sparse, origin = voxelize_subdivide(local, pitch, **kwargs)
matrix = sparse_to_matrix(sparse)
# Find voxel index for point
center = np.round(-origin / pitch).astype(np.int64)
# pad matrix if necessary
prepad = np.maximum(radius - center, 0)
postpad = np.maximum(center + radius + 1 - matrix.shape, 0)
matrix = np.pad(matrix, | np.stack((prepad, postpad), axis=-1) | numpy.stack |
# (c) 2017 <NAME>
import numpy as np
from scipy.special import digamma
from scipy.stats import poisson, gamma
from matplotlib import pyplot as plt
euler = 0.577215664901532
t = np.linspace(0, 10, 1000)
plt.plot(t, t* | np.exp(t) | numpy.exp |
import numpy as np
from pycce.constants import HBAR, PI2, ELECTRON_GYRO
from pycce.utilities import dimensions_spinvectors, expand
def expanded_single(ivec, gyro, mfield, self_tensor, detuning=.0):
"""
Function to compute the single bath spin term.
Args:
ivec (ndarray with shape (3, n, n)): Spin vector of the bath spin in the full Hilbert space of the cluster.
gyro (float or ndarray with shape (3, 3)):
mfield (ndarray wtih shape (3,): Magnetic field of type ``mfield = np.array([Bx, By, Bz])``.
self_tensor (ndarray with shape (3, 3)): tensor of self-interaction of type IPI where I is bath spin.
detuning (float): Additional term of d*Iz allowing to simulate different energy splittings of bath spins.
Returns:
ndarray with shape (n, n): Single bath spin term.
"""
if isinstance(gyro, (float, int)):
hzeeman = -gyro / PI2 * (mfield[0] * ivec[0] + mfield[1] * ivec[1] + mfield[2] * ivec[2])
# else assume tensor
else:
gsvec = np.einsum('ij,jkl->ikl', gyro / PI2, ivec, dtype=np.complex128)
hzeeman = np.einsum('lij,ljk->ik', mfield, gsvec, dtype=np.complex128)
hself = 0
if ivec[2, 0, 0] > 0.5:
v_ivec = np.einsum('ij,jkl->ikl', self_tensor, ivec, dtype=np.complex128)
hself = np.einsum('lij,ljk->ik', ivec, v_ivec, dtype=np.complex128)
if detuning:
hself += detuning * ivec[2]
return hself + hzeeman
# def bath_single(bath, vectors, mfield):
# """
# Compute isolated bath spin terms for all spins in the bath
#
# Args:
# bath (BathArray): Array of the bath spins in the given cluster.
# vectors (array-like): array of expanded spin vectors, each with shape (3, n, n).
# mfield (ndarray wtih shape (3,): Magnetic field of type ``mfield = np.array([Bx, By, Bz])``.
#
# Returns:
# ndarray with shape (n, n): All single bath spin terms.
#
# """
# hsingle = 0
#
# for j, n in enumerate(bath):
# ivec = vectors[j]
# hsingle += expanded_single(ivec, n.gyro, mfield, n['Q'], n.detuning)
#
# return hsingle
def dipole_dipole(coord_1, coord_2, g1, g2, ivec_1, ivec_2):
"""
Compute dipole_dipole interactions between two bath spins.
Args:
coord_1 (ndarray with shape (3,)): Coordinates of the first spin.
coord_2 (ndarray with shape (3,)): Coordinates of the second spin.
g1 (float): Gyromagnetic ratio of the first spin.
g2 (float): Gyromagnetic ratio of the second spin.
ivec_1 (ndarray with shape (3, n, n)): Spin vector of the first spin in the full Hilbert space of the cluster.
ivec_2 (ndarray with shape (3, n, n)): Spin vector of the second spin in the full Hilbert space of the cluster.
Returns:
ndarray with shape (n, n): Dipole-dipole interactions.
"""
pre = g1 * g2 * HBAR / PI2
pos = coord_1 - coord_2
r = np.linalg.norm(pos)
p_tensor = -pre * (3 * | np.outer(pos, pos) | numpy.outer |
import os
import sys
import numpy as np
import random
import json
import argparse
import pickle
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(libpath + '/../pyRender/lib')
sys.path.append(libpath + '/../pyRender/src')
sys.path.append(libpath + '/..')
import objloader
import mesh_utils
import time
import h5py
import math
import tensorflow as tf
sys.path.append(os.path.join(libpath, 'tf_ops/nn_distance'))
import tf_nndistance
np.random.seed(0)
start_time = time.time()
##############h5 file handles
def save_dataset(fname, pcs):
cloud = np.stack([pc for pc in pcs])
fout = h5py.File(fname)
fout.create_dataset('data', data=cloud, compression='gzip', dtype='float32')
fout.close()
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
return data
################################
with open('../shapenetcore_v2_split.json') as json_file:
data = json.load(json_file)
SHAPENET_BASEDIR = '/orion/group/ShapeNetManifold_10000_simplified/'
parser = argparse.ArgumentParser()
parser.add_argument('--category', default='chair', help='Which class')
parser.add_argument('--data_split', default = "test", help='which data split to use')
parser.add_argument('--dump_dir', default='dump_chair_ranked_cd/', help='dump folder path [dump]')
parser.add_argument('--fitting_dump_dir', default='test_rank_point2mesh/', help='dump folder path after fitting')
# parser.add_argument('--fitting_dump_dir', default='deformation_parallel_newcost_2cd/', help='dump folder path after fitting')
parser.add_argument('--to_deform', default=True, help='with or without deformation')
parser.add_argument('--num_neighbors', type=int, default=3, help='Number of neighbors to retrieve')
FLAGS = parser.parse_args()
OBJ_CAT = FLAGS.category
DATA_SPLIT = FLAGS.data_split
NUM_NEIGHBORS = FLAGS.num_neighbors
DUMP_DIR = str(FLAGS.dump_dir)
print(DUMP_DIR)
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
FITTING_DUMP_DIR = os.path.join(DUMP_DIR, FLAGS.fitting_dump_dir)
if not os.path.exists(FITTING_DUMP_DIR): os.mkdir(FITTING_DUMP_DIR)
LOG_FOUT = open(os.path.join(FITTING_DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
TO_DEFORM = FLAGS.to_deform
print("Deform "+str(TO_DEFORM))
# if TO_DEFORM:
# print("ERROR. Please run evaluate_fitting_deform.py instead.")
# exit()
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
data = data[DATA_SPLIT]
num_categories = len(list(data.keys()))
cat_idx = -1
for i in range(num_categories):
if (data[str(i)]["category"] == OBJ_CAT):
cat_idx = str(i)
break
# #Retrieved neighbor indices
# pickle_in = open(os.path.join(DUMP_DIR, "neighbors.pickle"),"rb")
# neighbors_idxs = pickle.load(pickle_in)
shapes = data[str(cat_idx)]
synsetid = shapes["synsetid"]
model_names = shapes["model_names"]
num_samples = shapes["num_samples"]
NUM_POINT = 2048
####Get candidates
pickle_in = open('../candidate_generation/candidates_'+DATA_SPLIT+'_'+OBJ_CAT+'_testrank.pickle',"rb")
database_candidate_idxs = pickle.load(pickle_in)
pickle_in.close()
NUM_CANDIDATES = len(database_candidate_idxs[0])
####Get pre-computed deformed chamfer distance
FOL = "chamfer_distance_deformed_candidates/"
pickle_in = open(os.path.join(FOL, "testrank_candidates_"+DATA_SPLIT +"_"+OBJ_CAT+"_point2mesh.pickle"))
database_deformedCD_costs = pickle.load(pickle_in)
pickle_in.close()
pickle_in = open(os.path.join(FOL, "testrank_candidates_"+DATA_SPLIT +"_"+OBJ_CAT+"_point2mesh_undeformed.pickle"))
database_CD_costs = pickle.load(pickle_in)
pickle_in.close()
def chamfer_loss(pc1, pc2):
""" pred: BxNx3,
label: BxNx3, """
dists_forward,_,dists_backward,_ = tf_nndistance.nn_distance(pc1, pc2)
# loss = dists_forward+dists_backward
loss = tf.reduce_mean(dists_forward+dists_backward, axis=1)
return loss
with tf.Graph().as_default():
with tf.device('/gpu:0'):
pointclouds_pl_1 = tf.placeholder(tf.float32, shape=(NUM_CANDIDATES, NUM_POINT, 3))
pointclouds_pl_2 = tf.placeholder(tf.float32, shape=(NUM_CANDIDATES, NUM_POINT, 3))
chamfer_distance = chamfer_loss(pointclouds_pl_1, pointclouds_pl_2)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl_1': pointclouds_pl_1,
'pointclouds_pl_2': pointclouds_pl_2,
'chamfer_distance': chamfer_distance,
}
# fname = DATA_SPLIT +"_"+OBJ_CAT+"_meshsampled.h5"
fname = '../candidate_generation/' + DATA_SPLIT +"_"+OBJ_CAT + '.h5'
OBJ_POINTCLOUDS = load_h5(fname)
print(OBJ_POINTCLOUDS.shape)
all_cd = []
all_deformed_cd = []
all_cd_ranks = []
all_deformed_cd_ranks = []
for i in range(len(model_names)):
# pc_ref = OBJ_POINTCLOUDS[i]
# all_pc_ref = []
# for _ in range(NUM_CANDIDATES):
# all_pc_ref.append(pc_ref)
# all_pc_ref = np.array(all_pc_ref)
# database_candidates_idx_i = database_candidate_idxs[i]
# all_pc_src = []
# for j in range(NUM_CANDIDATES):
# pc_src = OBJ_POINTCLOUDS[database_candidates_idx_i[j]]
# all_pc_src.append(pc_src)
# all_pc_src = np.array(all_pc_src)
# ##CD before deformation
# feed_dict = {ops['pointclouds_pl_1']: all_pc_ref,
# ops['pointclouds_pl_2']: all_pc_src,}
# chamfer_distances = sess.run([ops['chamfer_distance']], feed_dict=feed_dict)
# chamfer_distances = np.array(chamfer_distances)[0]
chamfer_distances = database_CD_costs[i]
chamfer_distance_idx_sorted = np.argsort(chamfer_distances)
retrieved_neighbors_idx = chamfer_distance_idx_sorted[:NUM_NEIGHBORS]
##Deformed CD
deformed_chamfer_distances = database_deformedCD_costs[i]
deformed_chamfer_distances[np.argwhere(deformed_chamfer_distances==-1)] = 1e16 #those with invalid deformation
retrieved_chamfer_distances = chamfer_distances[retrieved_neighbors_idx]
retrieved_deformed_chamfer_distances = deformed_chamfer_distances[retrieved_neighbors_idx]
if (np.max(retrieved_deformed_chamfer_distances) > 1000):
continue
deformed_chamfer_distance_idx_sorted = np.argsort(deformed_chamfer_distances)
CD_ranks = | np.empty_like(chamfer_distance_idx_sorted) | numpy.empty_like |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 1 15:07:30 2021
@author: <NAME>
可以修改217行前后的beta变量观察光源速度不同时的情况
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import warnings
def sr_v_inv_trans2(u):
'''
2-d special relativity inverse velocity transforamtion
u: Sigma'系在Sigma系中的,x方向牵连速度/光速 (即几何单位制下的速度,或者beta)
在Sigma'系中发光,要变换到Sigma系,的洛伦兹变换。
'''
assert u <= 1, '超光速不允许!'
gamma = 1/np.sqrt(1-u**2)
def transform(vxp, vyp):
'''
vxp, vyp: 在Sigma'系中的速度/光速,或者说几何单位制下的数。
'''
assert np.max(np.sqrt(vxp**2+vyp**2)) <= 1, '超光速不允许!'
vy = vyp/(gamma*(1+u*vxp))
vx = (vxp+u)/(1+u*vxp)
return vx, vy
return transform
class WaveFront():
# def ellipse(a, x0, y0, e=0):
# thetas = np.linspace(0, 2*np.pi, 300)
# c = a * e#np.sqrt(a**2-b**2)
# b = np.sqrt(a**2 - c**2)
# x = a*np.cos(thetas) + x0
# y = b*np.sin(thetas) + y0
# return x, y, c
def __init__(self, x, y, c, v_src=None, t0=0, life=np.inf, wf_n=15, color='black', show_v=True, ax=None):
'''
Parameters
----------
x : initial coordinate x.
y : initial coordinate x.
c : velocity of the wave.
wf_n : number of points on the wavefront.
life: the life span of a wavefront.
color : color of the points on the wavefront.
ax : matplotlib.axes._subplots.AxesSubplot
axis
'''
self.x = x
self.y = y
self.c = c
self.ax = ax
r0 = c*t0
self.life = life
self.t = t0
self.dead = False
if v_src == None:
warnings.warn('未输入波源速度,无法考虑相对论集束效应。')
v_src = 0
self.v_inv_tran = sr_v_inv_trans2(v_src)
if self.ax == None:
self.ax = plt.gca()
self.r = r0
self.wf_n = wf_n
self.color = color
self.show_v = show_v
# xs, ys, _ = WaveFront.ellipse(self.r, self.x, self.y)
# plot points on the wavefront
self.xs, self.ys = np.ones(self.wf_n)*self.x, np.ones(self.wf_n)*self.y
self.thetas = np.linspace(0, 2*np.pi, self.wf_n)
self.vxps, self.vyps = self.c * np.cos(self.thetas), self.c * np.sin(self.thetas)
self.vxs, self.vys = self.v_inv_tran(self.vxps, self.vyps)
self.wf_pt, = self.ax.plot(self.xs, self.ys, linestyle='', marker='.', markersize=7, color=self.color)
# plot arrows showing the velocity of points (photons) on the wavefront
if self.show_v:
self.wf_arrs = []
for xsi, ysi, vxsi, vysi in zip(self.xs, self.ys, self.vxs, self.vys):
arr = self.ax.arrow(xsi, ysi, vxsi, vysi)
self.wf_arrs.append(arr)
# plot the wavefront
self.lxs, self.lys = np.ones(300)*self.x, np.ones(300)*self.y
self.lthetas = np.linspace(0, 2*np.pi, 300)
self.lvxps, self.lvyps = self.c * np.cos(self.lthetas), self.c * np.sin(self.lthetas)
self.lvxs, self.lvys = self.v_inv_tran(self.lvxps, self.lvyps)
self.wf_line, = self.ax.plot(self.lxs, self.lys, 'k--')
self.update(t0)
def update(self, dt=.1):
if self.t >= self.life and not self.dead:
self.dead = True
# self.circle.remove()
# self.circle.set_linestyle('')
self.wf_line.remove()
self.wf_pt.remove()
if self.show_v:
for arr in self.wf_arrs:
arr.remove()
if not self.dead:
self.r += self.c * dt
self.t += dt
# xs, ys, _ = WaveFront.ellipse(self.r, self.x, self.y)
self.xs += self.vxs*dt
self.ys += self.vys*dt
self.wf_pt.set_xdata(self.xs)
self.wf_pt.set_ydata(self.ys)
self.lxs += self.lvxs*dt
self.lys += self.lvys*dt
self.wf_line.set_xdata(self.lxs)
self.wf_line.set_ydata(self.lys)
if self.show_v:
for arr in self.wf_arrs:
arr.remove()
self.wf_arrs = []
for xsi, ysi, vxsi, vysi in zip(self.xs, self.ys, self.vxs, self.vys):
arr = self.ax.arrow(xsi, ysi, vxsi, vysi)
self.wf_arrs.append(arr)
class Source():
txt_disp = .2 #text displacement
def __init__(self, x, y, v, c, f, theta=0, wflife=np.inf, ax=None, autoview=False, show_wf_v=True):
self.x = x
self.y = y
self.v = v # the velocity of source
self.theta = theta # the direction of velocity of source
self.vx = v*np.cos(theta)
self.vy = v*np.sin(theta)
self.c = c # the velocity of wave
self.f = f
self.u = lambda t: np.cos(2*np.pi*self.f*t) #u is the displacement
self.ax = ax
self.t = 0
self.wflife = wflife #life of wavefronts
self.p = 1/f # p is period
self.autoview = autoview
self.show_wf_v = show_wf_v
if self.ax == None:
self.ax = plt.gca()
self.src_pt, = plt.plot([x], [y], marker='*', markersize=10)
self.src_txt = plt.text(x, y-Source.txt_disp, 'src', fontsize=12, horizontalalignment='center', verticalalignment='top')
self.wavefronts = []
self.wavefronts.append(WaveFront(self.x, self.y, self.c, v_src=self.v, life=self.wflife, ax=self.ax, show_v=self.show_wf_v))
def update(self, dt=.1):
assert dt < self.p
# wavefront propagate
for i, wavefront in enumerate(self.wavefronts):
wavefront.update(dt)
# source move
self.x += self.vx * dt
self.y += self.vy * dt
self.src_pt.set_xdata([self.x])
self.src_pt.set_ydata([self.y])
self.src_txt.set_x(self.x)
self.src_txt.set_y(self.y-Source.txt_disp)
# generate new wavefront if needed
if (self.t+dt) // self.p - self.t // self.p == 1:
t0 = self.t+dt - ((self.t+dt)//self.p)*self.p
x0 = self.x - self.vx * t0
y0 = self.y - self.vy * t0
self.wavefronts.append(WaveFront(x0, y0, self.c, t0=t0, v_src=self.v, life=self.wflife, ax=self.ax, show_v=self.show_wf_v))
self.t += dt
if self.autoview:
# recompute the ax.dataLim
self.ax.relim()
# update ax.viewLim using the new dataLim
self.ax.autoscale_view()
# dt = .05
# ts = np.arange(0, 9.5, dt)
# fig = plt.figure(figsize=(6, 6))
# ax = fig.subplots()
# ax.set_aspect('equal')
# ax.set_xlim([-10, 13])
# ax.set_ylim([-10, 10])
# src = Source(x=0, y=0, v=.9, c=1, f=.3, wflife=12, ax=ax, theta=0)#np.pi/4)
# def update(t):
# src.update(dt)
# src.ax.set_title('t={:.2f}'.format(t))
# ani = animation.FuncAnimation(
# fig, update, frames=ts, interval=.5*dt*1e3,
# blit=False, save_count=len(ts), repeat=False)
# t=0 时,光源应该在原点
show_k = True
show_kp = True
show_phi = False
show_phase = False #False #等相位面
show_realdir = True
show_z = True
show_z_cont_label = False #show z contour label on the lines
isophase_n = 10
beta = .7 #.9#.999#.5#.9
gamma = 1/np.sqrt(1-beta**2)
freq = .5 #实际是freq/c
c = 1
dct = .05
cts = np.arange(0, 15, dct)
fig = plt.figure(figsize=(10, 8))
ax = fig.subplots()
ax.set_aspect('equal')
ax.set_xlim([-10, 13])
ax.set_ylim([-10, 10])
x = np.linspace(-13, 13, 100)
y = x
app = 10
X, Y = np.meshgrid(x, y)
dx = (x[1]-x[0])/2.
dy = (y[1]-y[0])/2.
extent = [x[0]-dx, x[-1]+dx, y[0]-dy, y[-1]+dy]
cos_phi = X/np.sqrt(X**2+Y**2)
sin_phi = Y/np.sqrt(X**2+Y**2)
cos_realang = lambda ct: (X-beta*ct)/np.sqrt((X-beta*ct)**2+Y**2)
sin_realang = lambda ct: Y/np.sqrt((X-beta*ct)**2+Y**2)
cos_theta_p = lambda ct: gamma*(X-beta*ct)/np.sqrt(gamma**2*(X-beta*ct)**2+Y**2)
sin_theta_p = lambda ct: Y/np.sqrt(gamma**2*(X-beta*ct)**2+Y**2)
omega_over_omegap = lambda ct: gamma*(1+beta*cos_theta_p(ct))
cos_theta = lambda ct: 1/omega_over_omegap(ct)*gamma*(beta+cos_theta_p(ct))
sin_theta = lambda ct: 1/omega_over_omegap(ct)*sin_theta_p(ct)
phase = lambda ct: omega_over_omegap(ct)*(ct - X*cos_theta(ct) - Y*sin_theta(ct))
z = lambda ct: 1/omega_over_omegap(ct) - 1
z = lambda ct: -np.log(omega_over_omegap(ct)) #以下名为z的实际上是ln(1+z)
phi0 = np.arctan2(np.sqrt(2*gamma**2), -np.sqrt(gamma-1))
zmin, zmax = np.min(z(0)), np.max(z(0))
n_colorbar_ticks = 8
ticks = np.arange(n_colorbar_ticks+1)
ticks = ticks / n_colorbar_ticks * (zmax-zmin) + zmin #此处ticks为z
ticklabels = np.exp(ticks) - 1
ticklabels += 5e-5
ticklabels = ['{:.2f}'.format(ticklabel) for ticklabel in ticklabels]
ticks *= -1 #画的是-z,所以tick取负号
lines = ticks.copy()
line_per_clabel = 3
def get_z0line(ct):
l = 20
x0 = beta*ct
xs = [x0+l*np.cos(phi0), x0, x0+l* | np.cos(phi0) | numpy.cos |
import itertools
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
notna,
)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [
Series(dtype=np.float64, name="a"),
Series([np.nan] * 5),
Series([1.0] * 5),
Series(range(5, 0, -1)),
Series(range(5)),
Series([np.nan, 1.0, np.nan, 1.0, 1.0]),
Series([np.nan, 1.0, np.nan, 2.0, 3.0]),
Series([np.nan, 1.0, np.nan, 3.0, 2.0]),
]
def create_dataframes():
return [
DataFrame(columns=["a", "a"]),
DataFrame( | np.arange(15) | numpy.arange |
"""
Test shifting utility functions.
"""
import pytest
import numpy as np
from tensortools.cpwarp import padded_shifts
@pytest.mark.parametrize(
"shift", np.linspace(3, 3, 10)
)
def test_shifts_1d(shift):
x = np.array([1, 2, 3, 4, 5], dtype="float")
wx = [i - shift for i in range(5)]
xs = np.empty_like(x)
padded_shifts.apply_shift(x, shift, xs)
np.testing.assert_allclose(
xs, np.interp(wx, np.arange(5), x))
@pytest.mark.parametrize(
"shift", np.linspace(3, 3, 10)
)
def test_shifts_2d(shift):
"""
Tests shift operation on a matrix, with shifting
applied to the first dimension.
"""
x = np.array([1, 2, 3, 4, 5], dtype="float")
x = np.tile(x[None, :], (2, 1)).T
wx = [i - shift for i in range(5)]
xs = np.empty_like(x)
padded_shifts.apply_shift(x, shift, xs)
y = np.interp(wx, np.arange(5), x[:, 0])
np.testing.assert_allclose(xs, np.column_stack((y, y)))
def test_transpose_shifts():
x = np.array([1, 2, 3, 4, 5], dtype="float")
xs = np.empty_like(x)
# test shift right by 1.0
W = np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
])
padded_shifts.trans_shift(x, 1.0, xs)
np.testing.assert_allclose(xs, np.dot(W.T, x))
padded_shifts.apply_shift(x, 1.0, xs)
np.testing.assert_allclose(xs, np.dot(W, x))
# test shift right by 1.1
W = np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.1, 0.9, 0.0, 0.0, 0.0],
[0.0, 0.1, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.9, 0.0],
])
padded_shifts.trans_shift(x, 1.1, xs)
np.testing.assert_allclose(xs, np.dot(W.T, x))
padded_shifts.apply_shift(x, 1.1, xs)
np.testing.assert_allclose(xs, np.dot(W, x))
# test shift right by 0.7
W = np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.7, 0.3, 0.0, 0.0, 0.0],
[0.0, 0.7, 0.3, 0.0, 0.0],
[0.0, 0.0, 0.7, 0.3, 0.0],
[0.0, 0.0, 0.0, 0.7, 0.3],
])
padded_shifts.trans_shift(x, 0.7, xs)
np.testing.assert_allclose(xs, np.dot(W.T, x))
padded_shifts.apply_shift(x, 0.7, xs)
np.testing.assert_allclose(xs, np.dot(W, x))
# test shift left by 1.0
W = np.array([
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
])
padded_shifts.trans_shift(x, -1.0, xs)
np.testing.assert_allclose(xs, np.dot(W.T, x))
padded_shifts.apply_shift(x, -1.0, xs)
np.testing.assert_allclose(xs, np.dot(W, x))
# test shift left by 1.3
W = np.array([
[0.0, 0.7, 0.3, 0.0, 0.0],
[0.0, 0.0, 0.7, 0.3, 0.0],
[0.0, 0.0, 0.0, 0.7, 0.3],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
])
padded_shifts.trans_shift(x, -1.3, xs)
np.testing.assert_allclose(xs, np.dot(W.T, x))
padded_shifts.apply_shift(x, -1.3, xs)
np.testing.assert_allclose(xs, np.dot(W, x))
# test shift left by 0.4
W = np.array([
[0.6, 0.4, 0.0, 0.0, 0.0],
[0.0, 0.6, 0.4, 0.0, 0.0],
[0.0, 0.0, 0.6, 0.4, 0.0],
[0.0, 0.0, 0.0, 0.6, 0.4],
[0.0, 0.0, 0.0, 0.0, 1.0],
])
padded_shifts.trans_shift(x, -0.4, xs)
np.testing.assert_allclose(xs, np.dot(W.T, x))
padded_shifts.apply_shift(x, -0.4, xs)
np.testing.assert_allclose(xs, np.dot(W, x))
def test_grams():
probe_data = [
# No shift.
(0, np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
])),
# Shift right by 1.0
(1, np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
])),
# Shift right by 2.0
(2, np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
])),
# Shift left by 1.0
(-1, np.array([
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
])),
# Shift left by 2.0
(-2, np.array([
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
])),
# Shift right by 0.3
(0.3, np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.3, 0.7, 0.0, 0.0, 0.0],
[0.0, 0.3, 0.7, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.7, 0.0],
[0.0, 0.0, 0.0, 0.3, 0.7],
])),
# Shift right by 1.3
(1.3, np.array([
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.3, 0.7, 0.0, 0.0, 0.0],
[0.0, 0.3, 0.7, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.7, 0.0],
])),
# Shift left by 0.3
(-0.3, np.array([
[0.7, 0.3, 0.0, 0.0, 0.0],
[0.0, 0.7, 0.3, 0.0, 0.0],
[0.0, 0.0, 0.7, 0.3, 0.0],
[0.0, 0.0, 0.0, 0.7, 0.3],
[0.0, 0.0, 0.0, 0.0, 1.0],
])),
# Shift left by 1.3
(-1.3, np.array([
[0.0, 0.7, 0.3, 0.0, 0.0],
[0.0, 0.0, 0.7, 0.3, 0.0],
[0.0, 0.0, 0.0, 0.7, 0.3],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
])),
]
# Test each example.
for shift, W in probe_data:
WtW = np.dot(W.T, W)
expected = np.row_stack(
[np.concatenate(([0.0], np.diag(WtW, 1))), np.diag(WtW, 0)])
actual = padded_shifts.shift_gram(shift, 5, np.empty((2, 5)))
np.testing.assert_allclose(actual, expected)
def test_sym_bmat_mul():
S = np.array([
[1, 2, 1, 0, 0, 0, 0, 0],
[2, 1, 1, 4, 0, 0, 0, 0],
[1, 1, 5, 1, 1, 0, 0, 0],
[0, 4, 1, 4, 1, 8, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 8, 1, 7, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 3],
[0, 0, 0, 0, 0, 1, 3, 9],
]).astype('float')
x = | np.random.randn(8) | numpy.random.randn |
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for optimizers such as TransposeOptimizer."""
import unittest
import numpy as np
from onnx import helper, numpy_helper, TensorProto, OperatorSetIdProto
from parameterized import parameterized
from backend_test_base import Tf2OnnxBackendTestBase
from common import unittest_main, group_nodes_by_type, check_opset_min_version, check_opset_max_version, get_test_config
from tf2onnx import utils, constants
from tf2onnx.graph import GraphUtil
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
class OptimizerTests(Tf2OnnxBackendTestBase):
"""Run original model proto and modified model proto with onnxruntime, compare the results."""
def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, op_type,
remaining_op_num, debug=False, rtol=1e-07):
utils.make_sure(op_type is not None, "op_type should be specified")
utils.make_sure(remaining_op_num is not None, "remaining_op_num should be specified")
utils.make_sure(self.config.is_onnxruntime_backend, "only onnxruntime is supported to test transpose optimizer")
origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix="_origin")
expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)
new_proto, new_graph = GraphUtil.optimize_model_proto(origin_proto, catch_errors=False, return_graph=True)
self.assertTrue(new_proto, msg="model proto after optimizer should not be None")
new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix="_opt")
current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)
actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)
for expected_val, actual_val in zip(expected, actual):
self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)
self.assertEqual(expected_val.dtype, actual_val.dtype)
self.assertEqual(expected_val.shape, actual_val.shape)
self.assertTrue(current[op_type] == remaining_op_num,
msg="Expect " + str(remaining_op_num) + " " + op_type + " ops left, but actually " + str(
current[op_type]) + " left")
self.assert_shapes_correct(new_graph, allow_missing=False, run_checker=True)
return new_proto
@staticmethod
def _make_onnx_const(np_val, output_name):
node = helper.make_node(
'Constant',
inputs=[],
outputs=[output_name],
value=helper.make_tensor(
name=output_name,
data_type=utils.map_numpy_to_onnx_dtype(np_val.dtype),
dims=np_val.shape,
vals=np_val.flatten().astype(np_val.dtype).tolist(),
),
)
return node
def make_model(self, graph, producer_name="onnx-tests"):
imp = OperatorSetIdProto()
imp.version = self.config.opset
model_proto = helper.make_model(graph, producer_name=producer_name, opset_imports=[imp])
try:
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.config.opset, model_proto.ir_version)
except: # pylint: disable=bare-except
pass
return model_proto
# Tranpose Optimizer Tests Start
def run_transpose_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
remaining_transpose_num=None, debug=False, rtol=1e-07):
return self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type="Transpose",
remaining_op_num=remaining_transpose_num, debug=debug, rtol=rtol)
def check_transpose_perm(self, model_proto, expected_perm):
for node in model_proto.graph.node:
if node.op_type == "Transpose":
perm = list(node.attribute[0].ints)
self.assertEqual(perm, expected_perm)
@parameterized.expand([
((2, 3, 4, 5), [0, 3, 1, 2], [0, 2, 3, 1]),
((2, 3, 4, 5, 6), [0, 4, 1, 2, 3], [0, 2, 3, 4, 1]),
])
def test_transpose_with_concat(self, input_shape, perm, inner_perm):
input_shape_with_trans = [input_shape[i] for i in perm]
for axis in range(len(input_shape)):
output_before_trans = list(input_shape)
output_before_trans[axis] *= 2
output_shape = [output_before_trans[i] for i in perm]
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=inner_perm, name="trans")
node2 = helper.make_node("Concat", ["Y", "input_data2"], ["Z"], axis=axis, name="concat")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm, name="trans2")
graph = helper.make_graph(
[node1, node2, node3],
"test_transpose_with_concat",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape_with_trans),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, input_shape),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape_with_trans).astype(np.float32),
"input_data2": np.random.randn(*input_shape).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_add1(self, input_shape, perm_input, perm_output):
# when transpose follows with a broadcasting op
# reshape is needed when switching transpose with this op and op need broadcast its inputs
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=perm_input, name="trans")
node2 = helper.make_node("Add", ["Y", "input_data2"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node1, node2, node3],
"transpose_with_shape",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, (input_shape[1],)),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, input_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape).astype(np.float32),
"input_data2": np.random.randn(input_shape[1]).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_add2(self, input_shape1, input_shape2, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=perm_input, name="trans")
node2 = helper.make_node("Add", ["Y", "input_data2"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans2")
output_shape = input_shape1
graph = helper.make_graph(
[node1, node2, node3],
"transpose_with_shape",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, input_shape2),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape1).astype(np.float32),
"input_data2": np.random.randn(*input_shape2).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_relu(self, shape, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Relu", ["Y"], ["Z"], name="relu")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"relu-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_leaky_relu(self, shape, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("LeakyRelu", ["Y"], ["Z"], alpha=0.02, name="relu")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"LeakyRelu-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "QuantizeLinear")
def test_transpose_quantize(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array(3, dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("QuantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="quantize")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"quantize-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.UINT8, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [0, 2, 1], [0, 2, 1]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "QuantizeLinear with axis")
def test_transpose_quantize_with_axis(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array([0.75, 0.1, 2.3, 0.3], dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array([2, 4, 6, 8], dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("QuantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="quantize", axis=1)
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"quantize-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.UINT8, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "DequantizeLinear")
def test_transpose_dequantize(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array(3, dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("DequantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="dequantize")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"dequantize-test",
[helper.make_tensor_value_info("X", TensorProto.UINT8, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randint(0, 100, shape, np.uint8)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [0, 2, 1], [0, 2, 1]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "DequantizeLinear with axis")
def test_transpose_dequantize_with_axis(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array([0.75, 0.1, 2.3, 0.3], dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array([2, 4, 6, 8], dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("DequantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="dequantize", axis=1)
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"dequantize-test",
[helper.make_tensor_value_info("X", TensorProto.UINT8, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randint(0, 100, shape, np.uint8)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
([2, 3, 4], [1, 2, 1], [1], [0, 2, 1], [0, 2, 1]),
([2, 3, 4, 5], [1, 2, 1, 2], [1], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(9, "Slice in opset 9 and takes 'axes, 'start' and 'ends' as attributes")
def test_transpose_slice(self, input_shape, slice_size, axes, perm_input, perm_output):
axes = np.array(axes, dtype=np.int64)
starts = np.array([0] * axes.size, dtype=np.int64)
ends = []
for i in range(axes.size):
ends.append(slice_size[axes[i]])
ends = np.array(ends, dtype=np.int64)
output_shape = input_shape.copy()
for axis in axes:
output_shape[perm_input[axis]] = slice_size[axis]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Slice", ["Y"], ["Z"], starts=starts, ends=ends, axes=axes, name="slice")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"slice-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
[
helper.make_tensor("starts", TensorProto.INT64, starts.shape, starts),
helper.make_tensor("ends", TensorProto.INT64, ends.shape, ends),
helper.make_tensor("axes", TensorProto.INT64, axes.shape, axes)
]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
([2, 3, 4], [1, 2, 1], [1], [0, 2, 1], [0, 2, 1]),
([2, 3, 4, 5], [1, 2, 1, 2], [1], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "Slice in opset 10 can accept dynamic 'start' and 'ends'")
def test_transpose_slice_opset_10(self, input_shape, slice_size, axes, perm_input, perm_output):
axes = np.array(axes, dtype=np.int32)
starts = np.array([0] * axes.size, dtype=np.int32)
ends = []
for i in range(axes.size):
ends.append(slice_size[axes[i]])
ends = np.array(ends, dtype=np.int32)
output_shape = input_shape.copy()
for axis in axes:
output_shape[perm_input[axis]] = slice_size[axis]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Slice", ["Y", "starts", "ends", "axes"], ["Z"], name="slice")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"slice-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
[
helper.make_tensor("starts", TensorProto.INT32, starts.shape, starts),
helper.make_tensor("ends", TensorProto.INT32, ends.shape, ends),
helper.make_tensor("axes", TensorProto.INT32, axes.shape, axes)
]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), (2, 0, 1), (1, 2, 0)),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
const_3_val = np.random.randn(*input_shape2).astype(np.float32)
const_3 = helper.make_tensor("const_3", TensorProto.FLOAT, input_shape2, const_3_val.flatten())
const_3_node = helper.make_node("Constant", [], ["const_3"], value=const_3, name="const_3")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "const_3", "const_2", "const_1"], ["Z"], name="max")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, const_2_node, const_3_node, node1, node2, node3],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max_input_non_const(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "non_const", "const_2", "const_1"], ["Z"], name="max")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, const_2_node, node1, node2, node3],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"non_const": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max_no_cancel(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "non_const", "const_2", "const_1"], ["Z"], name="max")
output_shape = [None] * len(input_shape1)
graph = helper.make_graph(
[const_1_node, const_2_node, node1, node2],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"non_const": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=2)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_transpose_merge(self, input_shape1, input_shape2, perm):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node1 = helper.make_node("Transpose", ["X"], ["Y_1"], perm=perm, name="trans_1")
node2 = helper.make_node("Mul", ["Y", "Y_1"], ["OUT"], name="mul")
output_shape = input_shape2
graph = helper.make_graph(
[node0, node1, node2],
"transpose-merge-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_mul_as_square(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans")
node1 = helper.make_node("Mul", ["Y", "Y"], ["Z"], name="mul")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans_1")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-mul-as-sqr-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_mul_broadcastable_const(self, shape, perm_input, perm_output):
const = numpy_helper.from_array(np.random.random((1, shape[1])).astype(np.float32), name='const')
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans")
node1 = helper.make_node("Mul", ["Y", "const"], ["Z"], name="mul")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans_1")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-mul-const-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
[const],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1]),
((2, 3, 4, 5), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
def test_transpose_with_shape(self, shape, perm):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Shape", ["Y"], ["Z"], name="shape")
graph = helper.make_graph(
[node1, node2],
"transpose_with_shape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, [len(shape)])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), [2, 0, 1]),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_transpose_with_identity(self, input_shape, output_shape, perm):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
graph = helper.make_graph(
[node1, node2],
"transpose_with_identity",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_sqrt(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans1")
node1 = helper.make_node("Sqrt", ["Y"], ["Z"], name="sqrt")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-sqrt-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 3, 4), [4, 3], [0, 2, 1], [1, 0]),
((1, 3, 4, 5), (4, 5, 3), [0, 2, 3, 1], [1, 2, 0]),
((1, 3, 4, 5, 6), (4, 5, 6, 3), [0, 2, 3, 4, 1], [1, 2, 3, 0]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze1(self, input_shape, output_shape, perm, expected_perm):
# squeeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[0])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((1, 3, 4), (1, 4, 1, 3, 1, 1), [2, 0, 1], [0, 4, 5], [2, 3, 0, 1, 4, 5]),
((1, 3, 4, 5), (1, 1, 4, 5, 1, 3, 1), [0, 2, 3, 1], [0, 4, 6], [0, 1, 4, 5, 2, 3, 6]),
((1, 3, 4, 5, 6), (1, 1, 4, 5, 1, 6, 1, 3), [0, 2, 3, 4, 1], [0, 4, 6], [0, 1, 4, 5, 6, 7, 2, 3]),
])
def test_transpose_with_unsqueeze(self, input_shape, output_shape, perm, axes_val, expected_perm):
# unsqueeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
if self.config.opset <= 12:
node2 = helper.make_node("Unsqueeze", ["Y"], ["Z"], name="unsqueeze", axes=axes_val)
nodes = [node1, node2]
else:
axes = self._make_onnx_const(np.array(axes_val, dtype=np.int64), "axes")
node2 = helper.make_node("Unsqueeze", ["Y", "axes"], ["Z"], name="unsqueeze")
nodes = [axes, node1, node2]
graph = helper.make_graph(
nodes,
"transpose_with_unsqueeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((1, 3, 4), [4, 3], [0, 2, 1], [1, 0]),
((1, 3, 4, 5), (4, 5, 3), [0, 2, 3, 1], [1, 2, 0]),
((1, 3, 4, 5, 6), (4, 5, 6, 3), [0, 2, 3, 4, 1], [1, 2, 3, 0]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze1_13(self, input_shape, output_shape, perm, expected_perm):
# squeeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([0], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 4, 1, 5), (3, 5, 4), [0, 2, 3, 1], [0, 2, 1]),
((3, 4, 1, 5, 6), (3, 5, 6, 4), [0, 2, 3, 4, 1], [0, 2, 3, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze2(self, input_shape, output_shape, perm, expected_perm):
# squeeze the second dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 4, 1, 5), (3, 5, 4), [0, 2, 3, 1], [0, 2, 1]),
((3, 4, 1, 5, 6), (3, 5, 6, 4), [0, 2, 3, 4, 1], [0, 2, 3, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze2_13(self, input_shape, output_shape, perm, expected_perm):
# squeeze the second dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 1, 4, 5), (3, 4, 5), [0, 2, 3, 1]),
((3, 1, 4, 5, 6), (3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze3(self, input_shape, output_shape, perm):
# squeeze the last dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[len(input_shape) - 1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 4, 5), (3, 4, 5), [0, 2, 3, 1]),
((3, 1, 4, 5, 6), (3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze3_13(self, input_shape, output_shape, perm):
# squeeze the last dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([len(input_shape) - 1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 1, 5), (3, 5), [0, 2, 3, 1]),
((3, 1, 1, 5, 4), (3, 5, 4), [0, 2, 3, 4, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze4(self, input_shape, output_shape, perm):
# squeeze the two dims
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1, len(input_shape) - 1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 1, 5), (3, 5), [0, 2, 3, 1]),
((3, 1, 1, 5, 4), (3, 5, 4), [0, 2, 3, 4, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze4_13(self, input_shape, output_shape, perm):
# squeeze the two dims
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([1, len(input_shape) - 1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((10, 3, 4), [0, 2, 1], [0, 2, 1]),
((10, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((10, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_loop(self, shape, perm_input, perm_output):
def _define_loop_graph(external_inputs):
# external_inputs: external node which will be used by this graph
# graph without loop carried
# computation
# for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output
node1 = helper.make_node("Gather", [external_inputs[0], "loop_iter_num"], ["Y0"])
node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=perm_input)
# graph output
if get_test_config().opset <= 12:
node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
const_node = None
else:
const_tensor = helper.make_tensor(name='const', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], dtype=np.int64))
const_node = helper.make_node("Constant", [], ["axes_const"], value=const_tensor, name="const")
node3 = helper.make_node("Squeeze", ["Z0", "axes_const"], ["scan_output"])
node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])
nodes = [node1, node2, node3, node4, node5]
if const_node is not None:
nodes.append(const_node)
graph = helper.make_graph(
nodes,
"loop_subgraph",
[helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)), # iteration_num
helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()), # condition
helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ()) # loop_carried
],
[helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * (len(shape) - 1))
],
)
return graph
def _make_loop(external_inputs, outputs):
trip_cnt = self._make_onnx_const(np.array(10, dtype=np.int64), "trip_cnt")
cond = self._make_onnx_const(np.array(True, dtype=np.bool), "cond")
sub_graph = _define_loop_graph(external_inputs)
loop_node = helper.make_node("Loop", ["trip_cnt", "cond", "cond"], outputs,
name="loop", body=sub_graph)
return trip_cnt, cond, loop_node
nodes = _make_loop(["array"], ["loop_carried", "scan_out"])
res = helper.make_node("Transpose", ["scan_out"], ["Y"], perm=perm_output, name="trans")
graph = helper.make_graph(
[*nodes, res],
"transpose_with_loop",
[helper.make_tensor_value_info("array", TensorProto.FLOAT, ["unknow"] * len(shape))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, ["unknow"] * len(shape))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"array": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [4, 2, 3], [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [2, 4, 5, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [2, 4, 5, 6, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_trans_with_sub(self, io_shape, const_shape_base, perm_input, perm_output):
const_shapes = []
for i in range(len(const_shape_base)):
const_shapes.append(const_shape_base[i:])
for trans_is_first_input in [True, False]:
for const_shape in const_shapes:
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_a")
const_tensor = helper.make_tensor(name='const', data_type=TensorProto.FLOAT, dims=const_shape,
vals=np.random.randn(*const_shape).flatten().astype(np.float32))
node2 = helper.make_node("Constant", [], ["const"], value=const_tensor, name="const")
if trans_is_first_input:
node3 = helper.make_node("Sub", ["Y", "const"], ["Z"], name="sub")
else:
node3 = helper.make_node("Sub", ["const", "Y"], ["Z"], name="sub")
node4 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_b")
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_trans_with_sub",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, io_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, io_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*io_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), [2, 4, 5, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [2, 4, 5, 6, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_trans_with_sub_input_non_const(self, io_shape, non_const_shape_base, perm_input, perm_output):
non_const_shapes = []
for i in range(len(non_const_shape_base) - 1):
non_const_shapes.append(non_const_shape_base[i:])
for trans_is_first_input in [True, False]:
for non_const_shape in non_const_shapes:
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_a")
if trans_is_first_input:
node2 = helper.make_node("Sub", ["Y", "non_const"], ["Z"], name="sub")
else:
node2 = helper.make_node("Sub", ["non_const", "Y"], ["Z"], name="sub")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_b")
graph = helper.make_graph(
[node1, node2, node3],
"test_trans_with_sub_input_non_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, io_shape),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, non_const_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, io_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*io_shape).astype(np.float32),
"non_const": np.random.randn(*non_const_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_input_non_const(self, input_shape1, input_shape2, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Add", ["Y", "A"], ["Z"], name="add")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[node0, node1, node2],
"transpose-add-test-input-non-const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("A", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"A": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [4, 2, 3], [2, 0, 1], [1, 2, 0]),
((1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_input_const(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = np.random.randn(*input_shape2).astype(np.float32)
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, input_shape2, const_1_val.flatten())
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Add", ["Y", "const_1"], ["Z"], name="add")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, node0, node1, node2],
"transpose-add-test-input-const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 5, 3, 3), (16, 5, 3, 3), (1, 16, 1, 1), (1, 1, 1, 16), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 5, 3, 3, 3), (16, 5, 3, 3, 3), (1, 16, 1, 1, 1), (1, 1, 1, 1, 16), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_conv_1(self, input_shape, weights_shape, output_shape,
const_shape, perm_input, perm_output):
# case where bias's dim is 1D and can be merged into Conv
const_b_val = np.random.randn(*const_shape).astype(np.float32)
const_b = helper.make_tensor("const_b", TensorProto.FLOAT, const_shape, const_b_val.flatten())
const_b_node = helper.make_node("Constant", [], ["const_b"], value=const_b, name="const_b")
node0 = helper.make_node("Conv", ["x", "W"], ["X"], name="conv", pads=[0] * 2 * (len(input_shape) - 2))
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Add", ["Y", "const_b"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[const_b_node, node0, node1, node2, node3],
"transpose-add-test-with-conv-1",
[helper.make_tensor_value_info("x", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("W", TensorProto.FLOAT, weights_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"x": np.random.randn(*input_shape).astype(np.float32),
"W": np.random.randn(*weights_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_conv_2(self, input_shape, weights_shape, output_shape,
const_shape, perm_input, perm_output):
# case where bias's dim is not 1D and can't be merged into Conv
# add handler just remove the transpose around Add node
const_b_val = np.random.randn(*const_shape).astype(np.float32)
const_b = helper.make_tensor("const_b", TensorProto.FLOAT, const_shape, const_b_val.flatten())
const_b_node = helper.make_node("Constant", [], ["const_b"], value=const_b, name="const_b")
node0 = helper.make_node("Conv", ["x", "W"], ["X"], name="conv", pads=[0] * 2 * (len(input_shape) - 2))
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Add", ["Y", "const_b"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[const_b_node, node0, node1, node2, node3],
"transpose-add-test-with-conv-2",
[helper.make_tensor_value_info("x", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("W", TensorProto.FLOAT, weights_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"x": np.random.randn(*input_shape).astype(np.float32),
"W": np.random.randn(*weights_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(10, "pad")
def test_transpose_pad(self, input_shape, output_shape, pads, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y"], ["Z"], pads=pads, name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(11, "pad")
def test_transpose_pad11(self, input_shape, output_shape, pads, perm_input, perm_output):
pads_val = np.array(pads, dtype=np.int64)
pads_tensor = helper.make_tensor("Pads", TensorProto.INT64, [len(input_shape) * 2], pads_val)
pads_const = helper.make_node("Constant", [], ["Pads"], value=pads_tensor, name="Pads")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y", "Pads"], ["Z"], name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2, pads_const],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(11, "pad")
def test_transpose_pad11_non_const_pads(self, input_shape, output_shape, pads, perm_input, perm_output):
pads_val = np.array(pads, dtype=np.int64)
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y", "Pads"], ["Z"], name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("Pads", TensorProto.INT64, pads_val.shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"],
{
"X": np.random.randn(*input_shape).astype(np.float32),
"Pads": pads_val
},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_reciprocal(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans1")
node1 = helper.make_node("Reciprocal", ["Y"], ["Z"], name="reciprocal")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reciprocal-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 1, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_reducemean(self, input_shape, output_shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceMean", ["Y"], ["Z"], axes=list(range(1, len(input_shape) - 1)),
keepdims=1, name="reducemean")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducemean-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [1], [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 4, 1), [2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 3, 1, 1), [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 1, 1, 1), [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 5, 6), [1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [1, 2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 1, 1, 1, 1), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(12, "ReduceSum from opset <= 12 has axes as an attribute")
def test_transpose_reducesum(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceSum", ["Y"], ["Z"], axes=axes,
keepdims=1, name="reducesum")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducesum-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 3, 4, 5), (1, 3, 4), [2], [0, 2, 3, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3), [1, 2], [0, 2, 3, 1], [0, 1]),
((1, 3, 4, 5), (), [0, 1, 2, 3], [0, 2, 3, 1], []),
((1, 3, 4, 5, 6), (1, 3, 5, 6), [1], [0, 2, 3, 4, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3), [1, 2, 3], [0, 2, 3, 4, 1], [0, 1]),
((1, 3, 4, 5, 6), (), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], []),
])
def test_transpose_reducemax(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceMax", ["Y"], ["Z"], axes=axes,
keepdims=0, name="reducemax")
if perm_output:
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
else:
node2 = helper.make_node("Identity", ["Z"], ["res"], name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducemax-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_transpose_argmax(self):
input_shape = [1, 2, 3, 4]
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
node1 = helper.make_node("ArgMax", ["Y"], ["Z"], axis=3, keepdims=0, name="argmax")
node2 = helper.make_node("Cast", ["Z"], ["res"], to=TensorProto.INT32, name="cast")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-argmax-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.INT32, [1, 3, 4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_transpose_tile(self):
input_shape = [1, 2, 3, 4]
repeats_value = [3, 6, 5, 11]
repeats_tensor = helper.make_tensor("A", TensorProto.INT64, [len(input_shape)], repeats_value)
repeats_const = helper.make_node("Constant", [], ["A"], value=repeats_tensor, name="repeats_const")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
node1 = helper.make_node("Tile", ["Y", "A"], ["Z"], name="tile")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=[0, 3, 1, 2], name="trans_2")
graph = helper.make_graph(
[repeats_const, node0, node1, node2],
"transpose-tile-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, [3, 22, 18, 20])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [1], [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 4, 1), [2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 3, 1, 1), [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 1, 1, 1), [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 5, 6), [1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [1, 2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 1, 1, 1, 1), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "ReduceSum from opset >= 13 has axes as an input")
def test_transpose_reducesum_opset_13(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceSum", ["Y", "axes"], ["Z"], keepdims=1, name="reducesum")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
axes = np.array(axes, dtype=np.int64)
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducesum-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
[helper.make_tensor("axes", TensorProto.INT64, axes.shape, axes)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), [2, 0, 1]),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_trans_output_as_graph_outputs(self, input_shape, output_shape, perm):
"""
If transpose's output is graph's output, don't optimize it.
"""
trans = helper.make_node("Transpose", ["X"], ["Y"], name="trans", perm=perm)
graph_proto = helper.make_graph(
[trans],
"trans-to-graph-output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_shape)],
)
graph = GraphUtil.create_graph_from_onnx_graph(graph_proto)
# remove identity to graph output
identity_op = graph.get_node_by_output(graph.outputs[0])
graph.outputs = [identity_op.input[0]]
graph.remove_node(identity_op.name)
optimized_graph = GraphUtil.optimize_graph(graph)
self.assertTrue(optimized_graph, msg="graph after optimizer should not be None")
trans_cnt = len(group_nodes_by_type(optimized_graph)["Transpose"])
self.assertTrue(trans_cnt == 1, msg="Expect 1 Transpose ops left, but actually " + str(trans_cnt) + " left")
@parameterized.expand([
((2, 3, 4, 1), (2, 3, 4, 1), [0, 3, 1, 2]),
((2, 1, 1, 4), (2, 1, 1, 4), [0, 3, 1, 2]),
((2, 3, 4, 1), (2, -1, -1, 1), [0, 3, 1, 2]),
((2, 3, 4, 2, 1), (2, 3, 4, 2, 1), [0, 4, 1, 2, 3]),
((2, 1, 1, 1, 4), (2, 1, 1, 1, 4), [0, 4, 1, 2, 3]),
((2, 3, 4, 2, 1), (2, -1, -1, -1, 1), [0, 4, 1, 2, 3]),
])
def test_trans_can_be_replaced_with_reshape1(self, input_shape_np, input_shape, perm):
# test trans-NHWC
result_shape = [input_shape[i] for i in perm]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
graph = helper.make_graph(
[node1],
"test_trans_can_be_replaced_with_reshape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, result_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"X": np.random.randn(*input_shape_np).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 1, 3, 4), (2, 1, 3, 4), [0, 2, 3, 1]),
((2, 4, 1, 1), (2, 4, 1, 1), [0, 2, 3, 1]),
((2, 1, 3, 4), (2, 1, -1, -1), [0, 2, 3, 1]),
((2, 1, 3, 4, 2), (2, 1, 3, 4, 2), [0, 2, 3, 4, 1]),
((2, 4, 1, 1, 1), (2, 4, 1, 1, 1), [0, 2, 3, 4, 1]),
((2, 1, 3, 4, 2), (2, 1, -1, -1, -1), [0, 2, 3, 4, 1]),
])
def test_trans_can_be_replaced_with_reshape2(self, input_shape_np, input_shape, perm):
# test trans-NCHW
result_shape = [input_shape[i] for i in perm]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
graph = helper.make_graph(
[node1],
"test_trans_can_be_replaced_with_reshape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, result_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"X": np.random.randn(*input_shape_np).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 6, 8), [2, 0, 1], [1, 2, 0]),
((1, 6, 8, 9), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 6, 8, 9, 2), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_two_transposes_switch_with_mul(self, shape, perm_input, perm_output):
const_node = self._make_onnx_const(np.array(np.random.random(6), dtype=np.float32), "const_10")
node0 = helper.make_node("Transpose", ["u1"], ["v1"], perm=perm_input, name="trans_0")
node1 = helper.make_node("Transpose", ["u2"], ["v2"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Mul", ["v1", "v2"], ["x"], name="mul_1")
node3 = helper.make_node("Mul", ["x", const_node.output[0]], ["y"], name="mul_2")
node4 = helper.make_node("Transpose", ["y"], ["res"], perm=perm_output, name="trans_3")
graph = helper.make_graph(
[const_node, node0, node1, node2, node3, node4],
"test-transpose-mul",
[helper.make_tensor_value_info("u1", TensorProto.FLOAT, shape),
helper.make_tensor_value_info("u2", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"u1": np.random.randn(*shape).astype(np.float32),
"u2": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 6, 8), (8, 1, 6), [2, 0, 1], [1, 2, 0]),
((1, 6, 8, 9), (1, 8, 9, 6), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 6, 8, 9, 2), (1, 8, 9, 2, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_many_transposes_and_constant_switch_with_sum(self, input_shape1, input_shape2, perm_input, perm_output):
constnode = self._make_onnx_const(np.array(np.random.random(input_shape2), dtype=np.float32), "v4")
node0 = helper.make_node("Transpose", ["u1"], ["v1"], perm=perm_input, name="trans_0")
node1 = helper.make_node("Transpose", ["u2"], ["v2"], perm=perm_input, name="trans_1")
node11 = helper.make_node("Transpose", ["u3"], ["v3"], perm=perm_input, name="trans_2")
node2 = helper.make_node("Sum", ["v1", "v2", "v3", "v4"], ["x"], name="sum_1")
node3 = helper.make_node("Sum", ["x", "v1"], ["y"], name="sum_2")
node4 = helper.make_node("Transpose", ["y"], ["res"], perm=perm_output, name="trans_4")
output_shape = input_shape1
graph = helper.make_graph(
[constnode, node0, node1, node11, node2, node3, node4],
"test-transpose-mul",
[helper.make_tensor_value_info("u1", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("u2", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("u3", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"u1": np.random.randn(*input_shape1).astype(np.float32),
"u2": np.random.randn(*input_shape1).astype(np.float32),
"u3": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
# Tranpose Optimizer Tests End
# Identity Optimizer Tests Start
def run_identity_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
remaining_identity_num=None, debug=False, rtol=1e-07):
self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type="Identity",
remaining_op_num=remaining_identity_num, debug=debug, rtol=rtol)
def test_identity_non_graph_output(self):
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
node3 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")
graph = helper.make_graph(
[node1, node2, node3],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=0)
def test_identity_unremovable_identity(self):
# should not remove!!
node1 = helper.make_node("Identity", ["X"], ["Y"], name="identity")
graph = helper.make_graph(
[node1],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3, 4, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Y"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=1)
def test_identity_output_as_multiple_graph_outputs(self):
# handle case like this, both Identity nodes are graph outputs,
# Add
# / \
# Identity Identity
# We at most can remove one Identity for this case.
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="identity")
node2 = helper.make_node("Identity", ["Y"], ["Z1"], name="identity2")
node3 = helper.make_node("Identity", ["Y"], ["Z2"], name="identity3")
graph = helper.make_graph(
[node1, node2, node3],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (2, 3, 4, 5)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (2, 3, 4, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1", "Z2"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=1)
def test_identity_in_subgraph_non_graph_output(self):
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
iter_num_value = np.array(1, dtype=np.int64)
node2 = helper.make_node(
'Constant',
inputs=[],
outputs=['iterate_num_value'],
value=helper.make_tensor(
name='iterate_num_value',
data_type=TensorProto.INT64,
dims=iter_num_value.shape,
vals=iter_num_value.flatten().astype(np.int64).tolist(),
),
)
cond_value = np.array(True, dtype=np.bool)
node3 = helper.make_node(
'Constant',
inputs=[],
outputs=['cond_value'],
value=helper.make_tensor(
name='cond_value',
data_type=TensorProto.BOOL,
dims=iter_num_value.shape,
vals=cond_value.flatten().astype(np.bool).tolist(),
),
)
# sub graph
sub_node1 = helper.make_node("Add", ["loop_var_1", "loop_var_1"], ["SubY"], name="sub_add")
sub_node2 = helper.make_node("Identity", ["SubY"], ["SubIdentity1"], name="sub_identity_1")
sub_node3 = helper.make_node("Identity", ["SubIdentity1"], ["loop_var_out_1"], name="sub_identity_2")
sub_node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"], name="sub_identity_3")
sub_graph = helper.make_graph(
[sub_node1, sub_node2, sub_node3, sub_node4],
"identity_subgraph-test",
[helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)), # iteration_num
helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()), # condition
helper.make_tensor_value_info("loop_var_1", TensorProto.FLOAT, ()), # loop-carried dependency
],
[helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("loop_var_out_1", TensorProto.FLOAT, ())
],
)
# sub graph ends
loop_node = helper.make_node("Loop", ["iterate_num_value", "cond_value", "Y"], ["loop_var_1_output"],
name="loop", body=sub_graph)
node4 = helper.make_node("Identity", ["loop_var_1_output"], ["Z"], name="identity")
node5 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")
graph = helper.make_graph(
[node1, node2, node3, loop_node, node4, node5],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=0)
# Identity Optimizer Tests End
# Merge Duplicated Nodes Optimizer Tests Start
def run_merge_duplicated_nodes_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
op_type=None, remaining_op_num=None, debug=False, rtol=1e-07,
graph_validator=None):
new_proto = self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type=op_type,
remaining_op_num=remaining_op_num, debug=debug, rtol=rtol)
if graph_validator:
self.assertTrue(graph_validator(new_proto.graph))
def test_duplicated_duplicated_input(self):
# same input or not
node0 = helper.make_node('Add', inputs=["X", "X"], outputs=["value0"])
node1 = helper.make_node('Add', inputs=["X", "X"], outputs=["value1"])
node2 = helper.make_node('Add', inputs=["value1", "X"], outputs=["value2"])
node3 = helper.make_node("Mul", ["value0", "value2"], ["value3"])
node4 = helper.make_node("Mul", ["value1", "value3"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4],
"test_duplicated_duplicated_input",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (5, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="Add", remaining_op_num=2)
def test_duplicated_duplicated_attributes(self):
# same attr or not
node0 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value0"], axes=[0], keepdims=0)
node1 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value1"], axes=[0], keepdims=0)
node2 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value2"], axes=[1], keepdims=0)
node3 = helper.make_node('Add', inputs=["value0", "value1"], outputs=["value3"])
node4 = helper.make_node("Mul", ["value2", "value3"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4],
"test_duplicated_duplicated_attributes",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="ReduceMin", remaining_op_num=2)
def _check_initializer_num(self, graph_proto, num):
return num == len(graph_proto.initializer)
def test_duplicated_duplicated_constant(self):
const_val = np.array([1, 2, 3], dtype=np.float32)
tensor_1 = helper.make_tensor("tensor_1", TensorProto.FLOAT, const_val.shape, const_val)
tensor_2 = helper.make_tensor("tensor_2", TensorProto.FLOAT, const_val.shape, const_val)
tensor_3 = helper.make_tensor("tensor_3", TensorProto.FLOAT, const_val.shape, const_val)
tensor_4 = helper.make_tensor("tensor_4", TensorProto.FLOAT, const_val.shape, const_val)
node0 = helper.make_node('Constant', inputs=[], outputs=["value0"], value=tensor_1)
node1 = helper.make_node('Constant', inputs=[], outputs=["value1"], value=tensor_2)
node2 = helper.make_node('Constant', inputs=[], outputs=["value2"], value=tensor_3)
node3 = helper.make_node('Constant', inputs=[], outputs=["value3"], value=tensor_4)
node4 = helper.make_node("Mul", ["value0", "value1"], ["output1"])
node5 = helper.make_node("Mul", ["value2", "output1"], ["output2"])
node6 = helper.make_node("Mul", ["value3", "output2"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node6],
"test_duplicated_duplicated_constant",
[],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (3,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {}, model_proto, op_type="Constant", remaining_op_num=0,
graph_validator=lambda g: self._check_initializer_num(g, 1))
def test_duplicated_duplicated_constant_and_initializer(self):
const_val = np.array([1, 2, 3], dtype=np.float32)
tensor_1 = helper.make_tensor("value0", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
tensor_2 = helper.make_tensor("value1", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
tensor_3 = helper.make_tensor("value2", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
tensor_4 = helper.make_tensor("value3", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
node0 = helper.make_node('Constant', inputs=[], outputs=["value0"], value=tensor_1)
node1 = helper.make_node('Constant', inputs=[], outputs=["value1"], value=tensor_2)
node4 = helper.make_node("Mul", ["value0", "value1"], ["output1"])
node5 = helper.make_node("Mul", ["value2", "output1"], ["output2"])
node6 = helper.make_node("Mul", ["value3", "output2"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node4, node5, node6],
"test_duplicated_duplicated_constant",
[helper.make_tensor_value_info("value2", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (3,))],
[tensor_3, tensor_4]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {}, model_proto, op_type="Constant", remaining_op_num=0,
graph_validator=lambda g: self._check_initializer_num(g, 2))
def test_duplicated_node_is_graph_output(self):
node0 = helper.make_node('Add', inputs=["X", "X"], outputs=["value0"])
node1 = helper.make_node('Add', inputs=["X", "X"], outputs=["value1"])
node2 = helper.make_node('Add', inputs=["value1", "X"], outputs=["value2"])
graph = helper.make_graph(
[node0, node1, node2],
"test_duplicated_node_is_graph_output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("value1", TensorProto.FLOAT, (5, 5)),
helper.make_tensor_value_info("value2", TensorProto.FLOAT, (5, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["value1", "value2"],
{"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="Add", remaining_op_num=2)
@check_opset_min_version(10, "Dropout in opset 10 produces mask of 'bool' type")
def test_duplicated_different_output_length(self):
node0 = helper.make_node('Dropout', inputs=["X"], outputs=["value0"])
node1 = helper.make_node('Dropout', inputs=["X"], outputs=["value1", "mask"])
node2 = helper.make_node('Dropout', inputs=["value1"], outputs=["value2"])
graph = helper.make_graph(
[node0, node1, node2],
"test_duplicated_different_output_length",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("value1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("mask", TensorProto.BOOL, (5,)),
helper.make_tensor_value_info("value2", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["value1", "mask", "value2"],
{"X": np.random.randn(5).astype(np.float32)},
model_proto,
op_type="Dropout", remaining_op_num=2)
def test_duplicated_need_multiple_run(self):
node00 = helper.make_node('Log', inputs=["X"], outputs=["value00"])
node01 = helper.make_node('Log', inputs=["value00"], outputs=["value01"])
node02 = helper.make_node('Log', inputs=["value01"], outputs=["value02"])
node10 = helper.make_node('Log', inputs=["X"], outputs=["value10"])
node11 = helper.make_node('Log', inputs=["value10"], outputs=["value11"])
node12 = helper.make_node('Log', inputs=["value11"], outputs=["value12"])
res = helper.make_node('Add', inputs=["value02", "value12"], outputs=["res"])
graph = helper.make_graph(
[node00, node01, node02, node10, node11, node12, res],
"test_duplicated_node_is_graph_output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["res"], {"X": np.random.randn(5).astype(np.float32)},
model_proto,
op_type="Log", remaining_op_num=3)
# Merge Duplicated Nodes Optimizer Tests End
# Reshape Optimizer Tests Start
@parameterized.expand([
(["dims12", "dim0_unsq"], 0, 1, 3), # Reshape [3, 7, 11] -> [7, 11, 3]
(["dim0_unsq", "dims12"], 2, 0, 2), # Reshape [3, 7, 11] -> [11, 3, 7]
])
def test_reshape_opt(self, concat_order, gather_i, starts, ends):
x_shape = [3, 7, 11]
node0 = helper.make_node("Shape", ["X"], ["S"])
g_indices_tensor = helper.make_tensor(name='g_indices_tensor', data_type=TensorProto.INT64, dims=[],
vals=np.array([gather_i], np.int64))
starts_tensor = helper.make_tensor(name='starts_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([starts], np.int64))
ends_tensor = helper.make_tensor(name='ends_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([ends], np.int64))
axes_tensor = helper.make_tensor(name='axes_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], np.int64))
node1 = helper.make_node("Constant", [], ["g_indices"], value=g_indices_tensor)
node2 = helper.make_node("Constant", [], ["starts"], value=starts_tensor)
node3 = helper.make_node("Constant", [], ["ends"], value=ends_tensor)
node4 = helper.make_node("Constant", [], ["axes"], value=axes_tensor)
node5 = helper.make_node("Gather", ["S", "g_indices"], ["dim0"])
if self.config.opset >= 10:
node6 = helper.make_node("Slice", ["S", "starts", "ends", "axes"], ["dims12"])
else:
node6 = helper.make_node("Slice", ["S"], ["dims12"], starts=[starts], ends=[ends], axes=[0])
if self.config.opset >= 13:
node7 = helper.make_node("Unsqueeze", ["dim0", "axes"], ["dim0_unsq"])
else:
node7 = helper.make_node("Unsqueeze", ["dim0"], ["dim0_unsq"], axes=[0])
node8 = helper.make_node("Concat", concat_order, ["dims120"], axis=0)
node9 = helper.make_node("Reshape", ["X", "dims120"], ["Y"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node6, node7, node8, node9],
"test_reshape_opt1",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["Y"], {"X": np.random.randn(*x_shape).astype(np.float32)},
model_proto, op_type="Shape", remaining_op_num=0)
def test_reshape_opt_with_mul(self):
x_shape = [7, 10, 20, 30]
node0 = helper.make_node("Shape", ["X"], ["S"])
g_indices_tensor = helper.make_tensor(name='g_indices_tensor', data_type=TensorProto.INT64, dims=[2],
vals=np.array([1, 2], np.int64))
starts_tensor = helper.make_tensor(name='starts_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], np.int64))
ends_tensor = helper.make_tensor(name='ends_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([1], np.int64))
axes_tensor = helper.make_tensor(name='axes_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], np.int64))
five_tensor = helper.make_tensor(name='five_tensor', data_type=TensorProto.INT32, dims=[],
vals=np.array([5], np.int32))
six_tensor = helper.make_tensor(name='six_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([6], np.int64))
node1 = helper.make_node("Constant", [], ["g_indices"], value=g_indices_tensor)
node2 = helper.make_node("Constant", [], ["starts"], value=starts_tensor)
node3 = helper.make_node("Constant", [], ["ends"], value=ends_tensor)
node4 = helper.make_node("Constant", [], ["axes"], value=axes_tensor)
node5 = helper.make_node("Constant", [], ["five"], value=five_tensor)
node55 = helper.make_node("Constant", [], ["six"], value=six_tensor)
node6 = helper.make_node("Gather", ["S", "g_indices"], ["dims12"])
node7 = helper.make_node("ReduceProd", ["dims12"], ["dims12_prod"], axes=[0])
if self.config.opset >= 10:
node8 = helper.make_node("Slice", ["S", "starts", "ends", ""], ["dim0"])
else:
node8 = helper.make_node("Slice", ["S"], ["dim0"], starts=[0], ends=[1])
node9 = helper.make_node("Cast", ["dim0"], ["dim0_cast"], to=TensorProto.INT32)
if self.config.opset >= 13:
node10 = helper.make_node("Squeeze", ["dim0_cast", "axes"], ["dim0_sq"])
else:
node10 = helper.make_node("Squeeze", ["dim0_cast"], ["dim0_sq"], axes=[0])
node11 = helper.make_node("Mul", ["dim0_sq", "five"], ["five_dim0"])
if self.config.opset >= 13:
node12 = helper.make_node("Unsqueeze", ["five_dim0", "axes"], ["five_dim0_unsq"])
else:
node12 = helper.make_node("Unsqueeze", ["five_dim0"], ["five_dim0_unsq"], axes=[0])
node13 = helper.make_node("Cast", ["five_dim0_unsq"], ["five_dim0_cast"], to=TensorProto.INT64)
node14 = helper.make_node("Concat", ["five_dim0_cast", "dims12_prod", "six"], ["shape"], axis=0)
node15 = helper.make_node("Reshape", ["X", "shape"], ["Y"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node55, node6, node7, node8, node9, node10,
node11, node12, node13, node14, node15],
"test_reshape_opt1",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, 10, 20, 30])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["Y"], {"X": np.random.randn(*x_shape).astype(np.float32)},
model_proto, op_type="Shape", remaining_op_num=0)
# Reshape Optimizer Tests End
# Const Fold Optimizer Tests Start
def test_const_fold_trans_with_const1(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Transpose", ["const"], ["value1"])
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_trans_with_const1",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_const_fold_trans_with_const2(self):
# need multiple optimization run
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Transpose", ["const"], ["value1"])
node3 = helper.make_node("Transpose", ["value1"], ["value2"])
node4 = helper.make_node("Add", ["value2", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_const_fold_trans_with_const2",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_const_fold_node_is_output(self):
# need multiple optimization run
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Transpose", ["const"], ["value1"])
node3 = helper.make_node("Transpose", ["value1"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_node_is_output",
[],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {},
model_proto, remaining_transpose_num=0)
def test_const_fold_concat(self):
shape = (6, 4)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
const_tensor2 = helper.make_tensor(name='const_tensor2', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Constant", [], ["const2"], value=const_tensor2)
node3 = helper.make_node("Concat", ["const", "const2", "const"], ["value1"], axis=1)
node4 = helper.make_node("Add", ["value1", "inp"], ["res"])
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_const_fold_trans_with_const2",
[helper.make_tensor_value_info("inp", TensorProto.FLOAT, [6, 12])],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, [6, 12])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"inp": np.random.randn(6, 12).astype(np.float32)}, model_proto,
"Concat", 0)
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_const_fold_unsqueeze_with_const(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Unsqueeze", ["const"], ["value1"], axes=[0, 2, 3])
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_unsqueeze_with_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, (1, 6, 1, 1, 6))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"X": np.random.randn(1).astype(np.float32)}, model_proto,
"Unsqueeze", 0)
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_const_fold_unsqueeze_with_const_13(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
axes = self._make_onnx_const(np.array([0, 2, 3], dtype=np.int64), "axes")
node2 = helper.make_node("Unsqueeze", ["const", "axes"], ["value1"])
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3, axes],
"test_const_fold_unsqueeze_with_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, (1, 6, 1, 1, 6))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"X": np.random.randn(1).astype(np.float32)}, model_proto,
"Unsqueeze", 0)
def test_const_fold_cast_with_const(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Cast", ["const"], ["value1"], to=TensorProto.INT64)
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_cast_with_const",
[helper.make_tensor_value_info("X", TensorProto.INT64, shape)],
[helper.make_tensor_value_info("res", TensorProto.INT64, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"X": np.random.randn(*shape).astype(np.int64)}, model_proto,
"Cast", 0)
def test_const_fold_split(self):
shape = (2, 6, 1)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(2, 6, 1).flatten().astype(np.float32))
node0 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node1 = helper.make_node("Split", ["const"], ["out1", "out2", "out3"], axis=1)
node2 = helper.make_node("Sum", ["inp", "out1", "out2", "out3"], ["out4"])
graph = helper.make_graph(
[node0, node1, node2],
"test_const_fold_split",
[helper.make_tensor_value_info("inp", TensorProto.FLOAT, (2, 2, 1))],
[helper.make_tensor_value_info("out4", TensorProto.FLOAT, (2, 2, 1))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["out4"], {"inp": np.random.randn(2, 2, 1).astype(np.float32)}, model_proto,
"Split", 0)
def test_const_fold_split_one(self):
shape = (2, 6, 1)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(2, 6, 1).flatten().astype(np.float32))
node0 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node1 = helper.make_node("Split", ["const"], ["out1"], axis=1)
node2 = helper.make_node("Sum", ["inp", "out1"], ["out4"])
graph = helper.make_graph(
[node0, node1, node2],
"test_const_fold_split",
[helper.make_tensor_value_info("inp", TensorProto.FLOAT, (2, 6, 1))],
[helper.make_tensor_value_info("out4", TensorProto.FLOAT, (2, 6, 1))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["out4"], {"inp": np.random.randn(2, 6, 1).astype(np.float32)}, model_proto,
"Split", 0)
@check_opset_min_version(13, "Split changed in opset 13")
def test_const_fold_split_const_splits_13(self):
shape = (2, 6, 1)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(2, 6, 1).flatten().astype(np.float32))
node0 = helper.make_node("Constant", [], ["const"], value=const_tensor)
const_splits = helper.make_tensor(name='const_tensor', data_type=TensorProto.INT64, dims=[3],
vals=np.array([1, 3, 2], np.int64))
node1 = helper.make_node("Constant", [], ["splits"], value=const_splits)
node2 = helper.make_node("Split", ["const", "splits"], ["out1", "out2", "out3"], axis=1)
node3 = helper.make_node("Sum", ["inp", "out2"], ["out4"])
graph = helper.make_graph(
[node0, node1, node2, node3],
"test_const_fold_split",
[helper.make_tensor_value_info("inp", TensorProto.FLOAT, (2, 3, 1))],
[helper.make_tensor_value_info("out4", TensorProto.FLOAT, (2, 3, 1))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["out4"], {"inp": np.random.randn(2, 3, 1).astype(np.float32)}, model_proto,
"Split", 0)
@check_opset_max_version(12, "Split changed in opset 13")
def test_const_fold_split_const_splits(self):
shape = (2, 6, 1)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(2, 6, 1).flatten().astype(np.float32))
node0 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Split", ["const"], ["out1", "out2", "out3"], axis=1, split=[1, 3, 2])
node3 = helper.make_node("Sum", ["inp", "out2"], ["out4"])
graph = helper.make_graph(
[node0, node2, node3],
"test_const_fold_split",
[helper.make_tensor_value_info("inp", TensorProto.FLOAT, (2, 3, 1))],
[helper.make_tensor_value_info("out4", TensorProto.FLOAT, (2, 3, 1))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["out4"], {"inp": np.random.randn(2, 3, 1).astype(np.float32)}, model_proto,
"Split", 0)
# Const Fold Optimizer Tests End
# Const Dequantize Optimizer Tests Start
@check_opset_min_version(10, "DequantizeLinear")
def test_const_dequantize_reshape(self):
inputval = numpy_helper.from_array(np.random.randint(0, 100, (2, 3, 4, 5), np.uint8), name='X')
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array( | np.array(3, dtype=np.uint8) | numpy.array |
"""sequence/chipseq formats"""
import io
from .util import open_file, chunkify
import numpy
import pandas as pd
import os
import tempfile
import subprocess
def normalize_strand(x):
if x == 1 or x == 0 or x == -1:
return x
if x == "+":
return 1
elif x == "-":
return -1
else:
return 0
class BedEntry(object):
__slots__ = ["refseq", "position", "length", "strand", "score", "name"]
def __init__(self, chr, chrStart, chrEnd, name=None, strand=None, score=None):
self.refseq = chr
pos = int(chrStart)
self.position = pos
self.length = int(chrEnd) - pos
if strand:
self.strand = normalize_strand(strand)
else:
self.strand = normalize_strand(self.length > 0)
self.score = numpy.nan
if name is None:
name = "Noname"
self.name = name
if score is not None:
self.score = score
def get_read_length(self):
return self.length
def __len__(self):
return self.length
def __repr__(self):
return "BedEntry(%s, %i, %i)" % (
repr(self.refseq),
self.position,
self.position + self.length,
)
def __str__(self):
print(
"Bed Entry chr=%s, start=%i, length=%i, strand=%s, score=%s, name=%s"
% (
self.refseq,
self.position,
self.length,
self.strand,
self.score,
self.name,
)
)
def read_bed(filenameOrFileObject, report_progress=False):
res = []
for e in read_bed_iterator(filenameOrFileObject, report_progress):
res.append(e)
return res
def read_bed_iterator(filenameOrFileObject, report_progress=False):
fo = open_file(filenameOrFileObject, "rb")
for row in chunkify(fo, b"\n"):
if row.startswith(b"track"):
trackInfo = row
elif row.startswith(b"#"): # not really a comment character...
continue
else:
try:
if not row:
continue
row = row.split(b"\t")
e = BedEntry(row[0], row[1], row[2]) # bed does start at 0
try:
e.name = row[3]
e.score = float(row[4])
except IndexError:
pass
except ValueError:
pass
try:
e.strand = normalize_strand(row[5])
except IndexError:
pass
yield e
except Exception as e:
raise ValueError("Could not parse row: %s" % row)
def write_bed_header(file_handle, track_name):
file_handle.write(
('track name="%s" description="" useScore=0\n' % track_name).encode("utf-8")
)
def write_bed_entry_short(file_handle, entry, name_to_chromosome_lookup_or_none=None):
if name_to_chromosome_lookup_or_none:
chr = name_to_chromosome_lookup_or_none[entry.refseq]
else:
chr = entry.refseq
out = [
chr, # chromosome
entry.position, # start
entry.position + len(entry), # end
]
file_handle.write("\t".join((str(x) for x in out)) + "\n")
def write_bed_entry_long(file_handle, entry, name_to_chromosome_lookup_or_none=None):
if name_to_chromosome_lookup_or_none:
chr = name_to_chromosome_lookup_or_none[entry.refseq]
else:
chr = entry.refseq
out = [
chr, # chromosome
entry.position, # start
entry.position + len(entry), # end
entry.name,
"." if | numpy.isnan(entry.score) | numpy.isnan |
from robot.ur_robot import URRobot
from vision.realsense_d415_tcp import RealsenseD415TCP
import utils.utils as utils
import vision.utils as visionutils
from utils.config_loader import ConfigLoader
from datetime import datetime
import numpy as np
import cv2
import json
import time
from scipy import optimize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import argparse
import os
class Configuration(object):
def __init__(self, config_file):
config = ConfigLoader.load(args.config_file)
#creates a class which instance attributes are based on the config dictionary
for k, v in config.items():
setattr(self, k, v)
# Checkerboard size as a tuple.
self.checkerboard_size = (self.checkerboard_size, self.checkerboard_size)
self.reference_point_offset = np.array(self.reference_point_offset)
self.tool_orientation = np.array(self.tool_orientation)
self.workspace_limits = np.array(self.workspace_limits)
@staticmethod
def dump_sample_file():
config = {
"calibration_type": "EYE_IN_HAND",
"robot_config_file":"PATH/TO/FILE",
"camera_config_file":"PATH/TO/FILE",
"workspace_limits": [[-0.490, 0.390], [-0.645, -0.185], [0.462, 0.710]],
"calib_grid_step": 0.05,
"reference_point_offset": [[0.74550],[-0.00895],[0.04900], [1]],
"tool_orientation": [1.226,-2.890,0.00],
"checkerboard_size": 3
}
with open('configurations/sample_configuration.json', 'w') as fp:
json.dump(config, fp)
def calibrate(config):
# Construct 3D calibration grid across workspace
gridspace_x = np.linspace(config.workspace_limits[0][0], config.workspace_limits[0][1], int(1 + (config.workspace_limits[0][1] - config.workspace_limits[0][0])/config.calib_grid_step))
gridspace_y = np.linspace(config.workspace_limits[1][0], config.workspace_limits[1][1], int(1 + (config.workspace_limits[1][1] - config.workspace_limits[1][0])/config.calib_grid_step))
gridspace_z = np.linspace(config.workspace_limits[2][0], config.workspace_limits[2][1], int(1 + (config.workspace_limits[2][1] - config.workspace_limits[2][0])/config.calib_grid_step))
calib_grid_x, calib_grid_y, calib_grid_z = np.meshgrid(gridspace_x, gridspace_y, gridspace_z)
num_calib_grid_pts = calib_grid_x.shape[0]*calib_grid_x.shape[1]*calib_grid_x.shape[2]
calib_grid_x.shape = (num_calib_grid_pts,1)
calib_grid_y.shape = (num_calib_grid_pts,1)
calib_grid_z.shape = (num_calib_grid_pts,1)
calib_grid_pts = np.concatenate((calib_grid_x, calib_grid_y, calib_grid_z), axis=1)
#measured_pts: points generated by sampling out of the config.workspace_limits[] + checkerboard offset from tool.
# It is the position of the tool when taking a picture, ideally this is the position of the center of the checkerboard in robot world coordinates.
# This is achieved easily when the camera is fixed and the robot moves the checkerboard in the image.
# As the robot position + checkerboard offset from tool = the position of the center of the fixed checkerboard in robot world coordinates.
measured_pts = []
#obseverved_pts: This is the position X,Y,Z in meters of the center of the checkerboard with respect to the origin of the camera in the camera world coordinates.
observed_pts = []
#observed_pix: Pixel locations of the center of the checkerboard in the image.
observed_pix = []
print(f'Going to calibrate in {num_calib_grid_pts} different points.')
# Connect to the robot
print('Connecting to robot...')
robot = URRobot(config.robot_config_file)
# Slow down robot to SAFE values
robot.activate_safe_mode()
robot.go_home()
# Connect to the camera
print('Connecting to camera...')
camera = RealsenseD415TCP(config.camera_config_file)
# Move robot to each calibration point in workspace
print('Collecting data...')
for calib_pt_idx in range(num_calib_grid_pts):
tool_position = calib_grid_pts[calib_pt_idx,:]
print('Calibration point: ', calib_pt_idx, '/', num_calib_grid_pts)
robot.move_to_pose(tool_position, config.tool_orientation)
time.sleep(1)
# Wait for a coherent pair of frames: depth and color
camera_color_img, camera_depth_img = camera.get_state()
if not (camera_depth_img is None and camera_color_img is None):
checkerboard_pix = visionutils.find_checkerboard(camera_color_img, config.checkerboard_size)
if checkerboard_pix is not None:
checkerboard_z = camera_depth_img[checkerboard_pix[1]][checkerboard_pix[0]]
checkerboard_x = np.multiply(checkerboard_pix[0]-camera.intrinsics[0][2], checkerboard_z/camera.intrinsics[0][0])
checkerboard_y = np.multiply(checkerboard_pix[1]-camera.intrinsics[1][2], checkerboard_z/camera.intrinsics[1][1])
if checkerboard_z != 0:
observed_pts.append([checkerboard_x, checkerboard_y, checkerboard_z])
observed_pix.append(checkerboard_pix)
# Get current robot pose
current_pose = robot.get_cartesian_pose()
if config.calibration_type == "EYE_IN_HAND":
rot_vec = np.array(current_pose)
rot_vec.shape = (1,6)
T_be = utils.V2T(rot_vec)
invT_be = np.linalg.inv(T_be)
# Save calibration point and observed checkerboard center
checker2tool = np.dot(invT_be, config.reference_point_offset)
checker2tool = checker2tool[:3, 0]
measured_pts.append(checker2tool)
print('Measured points: ', checker2tool)
else: # "EYE_TO_HAND"
tool_position = current_pose[:3] + config.reference_point_offset.flatten()[:3]
measured_pts.append(tool_position)
print('Measured points: ', tool_position)
# Save calibration point and observed checkerboard center
print('Observed points: ', [checkerboard_x,checkerboard_y,checkerboard_z])
print('Checkerboard pix: ', checkerboard_pix)
else:
print('checkerboard Z == 0')
else:
print('No checkerboard found')
else:
print('No depth or color frames')
# Move robot back to home pose
measured_pts = np.asarray(measured_pts)
observed_pts = | np.asarray(observed_pts) | numpy.asarray |
import unittest
import numpy
from pyscf import gto, scf
from cqcpy import ft_utils
from cqcpy import utils
from kelvin.ccsd import ccsd
from kelvin.scf_system import SCFSystem
from kelvin import cc_utils
from numpy import einsum
try:
from lattice.hubbard import Hubbard1D
from kelvin.hubbard_system import HubbardSystem
has_lattice = True
except ImportError:
has_lattice = False
class FakeHubbardSystem(object):
def __init__(self, sys, M=None):
self.M = M
self.sys = sys
def has_g(self):
return self.sys.has_g()
def has_u(self):
return self.sys.has_u()
def has_r(self):
return self.sys.has_r()
def verify(self, T, mu):
return self.sys.verify(T, mu)
def const_energy(self):
return self.sys.const_energy()
def get_mp1(self):
E1 = self.sys.get_mp1()
beta = 1.0/self.sys.T
mu = self.sys.mu
en = self.sys.g_energies_tot()
fo = ft_utils.ff(beta, en, mu)
extra = 0.5*numpy.einsum('ijij,i,j->', self.M, fo, fo)
return E1 + extra
def g_energies_tot(self):
return self.sys.g_energies_tot()
def g_fock_tot(self):
beta = 1.0/self.sys.T
mu = self.sys.mu
en = self.sys.g_energies_tot()
fo = ft_utils.ff(beta, en, mu)
return self.sys.g_fock_tot() + 0.5*einsum('piqi,i->pq', self.M, fo)
def g_aint_tot(self):
U = self.sys.g_aint_tot()
return U + self.M
class FTCC2RDMTest(unittest.TestCase):
def setUp(self):
self.thresh = 1e-13
@unittest.skipUnless(has_lattice, "Lattice module cannot be found")
def test_hubbard(self):
U = 1.0
T = 1.0
L = 2
mu = 0.5
Mg = numpy.zeros((2*L, 2*L, 2*L, 2*L))
for i in range(L):
Mg[i, L+i, i, L+i] = -2.0
Mg[L+i, i, L+i, i] = -2.0
Mg = Mg - Mg.transpose((0, 1, 3, 2))
hub = Hubbard1D(2, 1.0, U)
Pa = numpy.zeros((2, 2))
Pb = numpy.zeros((2, 2))
Pa[0, 0] = 1.0
Pb[1, 1] = 1.0
sys = HubbardSystem(T, hub, Pa, Pb, mu=mu, orbtype='g')
cmat = utils.block_diag(sys.ua, sys.ub)
Mg = sys._transform1(Mg, cmat)
cc = ccsd(sys, T=T, mu=mu, iprint=0, max_iter=80, econv=1e-9)
E, Ecc = cc.run()
cc._ft_ccsd_lambda()
cc._g_ft_1rdm()
cc._g_ft_2rdm()
P2tot = cc.full_2rdm()
E2 = 0.25*numpy.einsum('pqrs,rspq->', P2tot, Mg)
out = E2
d = 5e-4
sysf = FakeHubbardSystem(sys, M=d*Mg)
ccf = ccsd(sysf, T=T, mu=mu, iprint=0, max_iter=80, econv=1e-9)
Ef, Eccf = ccf.run()
sysb = FakeHubbardSystem(sys, M=-d*Mg)
ccb = ccsd(sysb, T=T, mu=mu, iprint=0, max_iter=80, econv=1e-9)
Eb, Eccb = ccb.run()
ref = (Ef - Eb)/(2*d)
error = abs(ref - out) / abs(ref)
self.assertTrue(error < 1e-6, "Error: {}".format(error))
def test_Be_gen(self):
T = 0.8
beta = 1.0/T
mu = 0.04
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
m.conv_tol = 1e-12
m.scf()
sys = SCFSystem(m, T, mu, orbtype='g')
ccsdT = ccsd(sys, T=T, mu=mu, iprint=0)
ccsdT.run()
ccsdT._ft_ccsd_lambda()
ccsdT._g_ft_2rdm()
F, I = cc_utils.ft_integrals(sys, sys.g_energies_tot(), beta, mu)
ref = (0.25/beta)*einsum('cdab,abcd->', ccsdT.P2[0], I.vvvv)
ref += (0.5/beta)*einsum('ciab,abci->', ccsdT.P2[1], I.vvvo)
ref += (0.5/beta)*einsum('bcai,aibc->', ccsdT.P2[2], I.vovv)
ref += (0.25/beta)*einsum('ijab,abij->', ccsdT.P2[3], I.vvoo)
ref += (1.0/beta)*einsum('bjai,aibj->', ccsdT.P2[4], I.vovo)
ref += (0.25/beta)*einsum('abij,ijab->', ccsdT.P2[5], I.oovv)
ref += (0.5/beta)*einsum('jkai,aijk->', ccsdT.P2[6], I.vooo)
ref += (0.5/beta)*einsum('kaij,ijka->', ccsdT.P2[7], I.ooov)
ref += (0.25/beta)*einsum('klij,ijkl->', ccsdT.P2[8], I.oooo)
Inew = sys.g_aint_tot()
out1 = (0.25)*einsum('pqrs,rspq->', ccsdT.n2rdm, Inew)
Inew = sys.g_int_tot()
out2 = (0.5)*einsum('pqrs,rspq->', ccsdT.n2rdm, Inew)
diff1 = abs(ref - out1)
diff2 = abs(ref - out2)
self.assertTrue(diff1 < self.thresh, "Error in 2rdm: {}".format(diff1))
self.assertTrue(diff2 < self.thresh, "Error in 2rdm: {}".format(diff2))
def test_Be_gen_active(self):
T = 0.05
beta = 1.0/T
mu = 0.04
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
m.conv_tol = 1e-11
m.scf()
sys = SCFSystem(m, T, mu, orbtype='g')
athresh = 1e-20
ccsdT = ccsd(sys, T=T, mu=mu, iprint=0, damp=0.16, tconv=1e-10,
athresh=athresh, ngrid=40, max_iter=80)
ccsdT.run()
ccsdT._ft_ccsd_lambda()
ccsdT._g_ft_2rdm()
en = sys.g_energies_tot()
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
focc = [x for x in fo if x > athresh]
fvir = [x for x in fv if x > athresh]
iocc = [i for i, x in enumerate(fo) if x > athresh]
ivir = [i for i, x in enumerate(fv) if x > athresh]
F, I = cc_utils.ft_active_integrals(sys, en, focc, fvir, iocc, ivir)
ref = (0.25/beta)*einsum('cdab,abcd->', ccsdT.P2[0], I.vvvv)
ref += (0.5/beta)*einsum('ciab,abci->', ccsdT.P2[1], I.vvvo)
ref += (0.5/beta)*einsum('bcai,aibc->', ccsdT.P2[2], I.vovv)
ref += (0.25/beta)*einsum('ijab,abij->', ccsdT.P2[3], I.vvoo)
ref += (1.0/beta)*einsum('bjai,aibj->', ccsdT.P2[4], I.vovo)
ref += (0.25/beta)*einsum('abij,ijab->', ccsdT.P2[5], I.oovv)
ref += (0.5/beta)*einsum('jkai,aijk->', ccsdT.P2[6], I.vooo)
ref += (0.5/beta)* | einsum('kaij,ijka->', ccsdT.P2[7], I.ooov) | numpy.einsum |
# -*- encoding: utf-8 -*-
import multiprocessing
import glob
import os
import re
import sys
import time
import warnings
import numpy as np
import pynisher
from autosklearn.constants import BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, \
MULTILABEL_CLASSIFICATION, CLASSIFICATION_TASKS, BAC_METRIC, F1_METRIC
from autosklearn.evaluation.util import calculate_score
from autosklearn.util import StopWatch
from autosklearn.ensembles.ensemble_selection import EnsembleSelection
from autosklearn.util.logging_ import get_logger
class EnsembleBuilder(multiprocessing.Process):
def __init__(self, backend, dataset_name, task_type, metric,
limit, ensemble_size=None, ensemble_nbest=None,
seed=1, shared_mode=False, max_iterations=-1, precision="32",
low_precision=True):
super(EnsembleBuilder, self).__init__()
self.backend = backend
self.dataset_name = dataset_name
self.task_type = task_type
self.metric = metric
self.limit = limit
self.ensemble_size = ensemble_size
self.ensemble_nbest = ensemble_nbest
self.seed = seed
self.shared_mode = shared_mode
self.max_iterations = max_iterations
self.precision = precision
self.low_precision = low_precision
logger_name = 'EnsembleBuilder(%d):%s' % (self.seed, self.dataset_name)
self.logger = get_logger(logger_name)
def run(self):
buffer_time = 5
time_left = self.limit - buffer_time
safe_ensemble_script = pynisher.enforce_limits(
wall_time_in_s=int(time_left), logger=self.logger)(self.main)
safe_ensemble_script()
def main(self):
watch = StopWatch()
watch.start_task('ensemble_builder')
used_time = 0
time_iter = 0
index_run = 0
num_iteration = 0
current_num_models = 0
last_hash = None
current_hash = None
dir_ensemble = os.path.join(self.backend.temporary_directory,
'.auto-sklearn',
'predictions_ensemble')
dir_valid = os.path.join(self.backend.temporary_directory,
'.auto-sklearn',
'predictions_valid')
dir_test = os.path.join(self.backend.temporary_directory,
'.auto-sklearn',
'predictions_test')
paths_ = [dir_ensemble, dir_valid, dir_test]
dir_ensemble_list_mtimes = []
self.logger.debug('Starting main loop with %f seconds and %d iterations '
'left.' % (self.limit - used_time, num_iteration))
while used_time < self.limit or (self.max_iterations > 0 and
self.max_iterations >= num_iteration):
num_iteration += 1
self.logger.debug('Time left: %f', self.limit - used_time)
self.logger.debug('Time last ensemble building: %f', time_iter)
# Reload the ensemble targets every iteration, important, because cv may
# update the ensemble targets in the cause of running auto-sklearn
# TODO update cv in order to not need this any more!
targets_ensemble = self.backend.load_targets_ensemble()
# Load the predictions from the models
exists = [os.path.isdir(dir_) for dir_ in paths_]
if not exists[0]: # all(exists):
self.logger.debug('Prediction directory %s does not exist!' %
dir_ensemble)
time.sleep(2)
used_time = watch.wall_elapsed('ensemble_builder')
continue
if self.shared_mode is False:
dir_ensemble_list = sorted(glob.glob(os.path.join(
dir_ensemble, 'predictions_ensemble_%s_*.npy' % self.seed)))
if exists[1]:
dir_valid_list = sorted(glob.glob(os.path.join(
dir_valid, 'predictions_valid_%s_*.npy' % self.seed)))
else:
dir_valid_list = []
if exists[2]:
dir_test_list = sorted(glob.glob(os.path.join(
dir_test, 'predictions_test_%s_*.npy' % self.seed)))
else:
dir_test_list = []
else:
dir_ensemble_list = sorted(os.listdir(dir_ensemble))
dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []
# Check the modification times because predictions can be updated
# over time!
old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
dir_ensemble_list_mtimes = []
# The ensemble dir can contain non-model files. We filter them and
# use the following list instead
dir_ensemble_model_files = []
for dir_ensemble_file in dir_ensemble_list:
if dir_ensemble_file.endswith("/"):
dir_ensemble_file = dir_ensemble_file[:-1]
if not dir_ensemble_file.endswith(".npy"):
self.logger.info('Error loading file (not .npy): %s', dir_ensemble_file)
continue
dir_ensemble_model_files.append(dir_ensemble_file)
basename = os.path.basename(dir_ensemble_file)
dir_ensemble_file = os.path.join(dir_ensemble, basename)
mtime = os.path.getmtime(dir_ensemble_file)
dir_ensemble_list_mtimes.append(mtime)
if len(dir_ensemble_model_files) == 0:
self.logger.debug('Directories are empty')
time.sleep(2)
used_time = watch.wall_elapsed('ensemble_builder')
continue
if len(dir_ensemble_model_files) <= current_num_models and \
old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes:
self.logger.debug('Nothing has changed since the last time')
time.sleep(2)
used_time = watch.wall_elapsed('ensemble_builder')
continue
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# TODO restructure time management in the ensemble builder,
# what is the time of index_run actually needed for?
watch.start_task('index_run' + str(index_run))
watch.start_task('ensemble_iter_' + str(num_iteration))
# List of num_runs (which are in the filename) which will be included
# later
include_num_runs = []
backup_num_runs = []
model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy')
if self.ensemble_nbest is not None:
# Keeps track of the single scores of each model in our ensemble
scores_nbest = []
# The indices of the model that are currently in our ensemble
indices_nbest = []
# The names of the models
model_names = []
model_names_to_scores = dict()
model_idx = 0
for model_name in dir_ensemble_model_files:
if model_name.endswith("/"):
model_name = model_name[:-1]
basename = os.path.basename(model_name)
try:
with open(os.path.join(dir_ensemble, basename), 'rb') as fh:
if self.precision is "16":
predictions = np.load(fh).astype(dtype=np.float16)
elif self.precision is "32":
predictions = np.load(fh).astype(dtype=np.float32)
elif self.precision is "64":
predictions = np.load(fh).astype(dtype=np.float64)
else:
predictions = np.load(fh)
score = calculate_score(targets_ensemble, predictions,
self.task_type, self.metric,
predictions.shape[1])
except Exception as e:
self.logger.warning('Error loading %s: %s - %s',
basename, type(e), e)
score = -1
model_names_to_scores[model_name] = score
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if self.ensemble_nbest is not None:
if score <= 0.001:
self.logger.info('Model only predicts at random: ' +
model_name + ' has score: ' + str(score))
backup_num_runs.append((automl_seed, num_run))
# If we have less models in our ensemble than ensemble_nbest add
# the current model if it is better than random
elif len(scores_nbest) < self.ensemble_nbest:
scores_nbest.append(score)
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
model_names.append(model_name)
else:
# Take the worst performing model in our ensemble so far
idx = np.argmin(np.array([scores_nbest]))
# If the current model is better than the worst model in
# our ensemble replace it by the current model
if scores_nbest[idx] < score:
self.logger.info(
'Worst model in our ensemble: %s with score %f '
'will be replaced by model %s with score %f',
model_names[idx], scores_nbest[idx], model_name,
score)
# Exclude the old model
del scores_nbest[idx]
scores_nbest.append(score)
del include_num_runs[idx]
del indices_nbest[idx]
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
del model_names[idx]
model_names.append(model_name)
# Otherwise exclude the current model from the ensemble
else:
# include_num_runs.append(True)
pass
else:
# Load all predictions that are better than random
if score <= 0.001:
# include_num_runs.append(True)
self.logger.info('Model only predicts at random: ' +
model_name + ' has score: ' +
str(score))
backup_num_runs.append((automl_seed, num_run))
else:
include_num_runs.append((automl_seed, num_run))
model_idx += 1
# If there is no model better than random guessing, we have to use
# all models which do random guessing
if len(include_num_runs) == 0:
include_num_runs = backup_num_runs
indices_to_model_names = dict()
indices_to_run_num = dict()
for i, model_name in enumerate(dir_ensemble_model_files):
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if (automl_seed, num_run) in include_num_runs:
num_indices = len(indices_to_model_names)
indices_to_model_names[num_indices] = model_name
indices_to_run_num[num_indices] = (automl_seed, num_run)
try:
all_predictions_train, all_predictions_valid, all_predictions_test =\
self.get_all_predictions(dir_ensemble,
dir_ensemble_model_files,
dir_valid, dir_valid_list,
dir_test, dir_test_list,
include_num_runs,
model_and_automl_re,
self.precision)
except IOError as e:
print(e)
self.logger.error('Could not load the predictions.')
continue
if len(include_num_runs) == 0:
self.logger.error('All models do just random guessing')
time.sleep(2)
continue
else:
ensemble = EnsembleSelection(ensemble_size=self.ensemble_size,
task_type=self.task_type,
metric=self.metric)
try:
ensemble.fit(all_predictions_train, targets_ensemble,
include_num_runs)
self.logger.info(ensemble)
except ValueError as e:
self.logger.error('Caught ValueError: ' + str(e))
used_time = watch.wall_elapsed('ensemble_builder')
time.sleep(2)
continue
except IndexError as e:
self.logger.error('Caught IndexError: ' + str(e))
used_time = watch.wall_elapsed('ensemble_builder')
time.sleep(2)
continue
except Exception as e:
self.logger.error('Caught error! %s', str(e))
used_time = watch.wall_elapsed('ensemble_builder')
time.sleep(2)
continue
# Output the score
self.logger.info('Training performance: %f' % ensemble.train_score_)
self.logger.info('Building the ensemble took %f seconds' %
watch.wall_elapsed('ensemble_iter_' + str(num_iteration)))
# Set this variable here to avoid re-running the ensemble builder
# every two seconds in case the ensemble did not change
current_num_models = len(dir_ensemble_model_files)
ensemble_predictions = ensemble.predict(all_predictions_train)
if sys.version_info[0] == 2:
ensemble_predictions.flags.writeable = False
current_hash = hash(ensemble_predictions.data)
else:
current_hash = hash(ensemble_predictions.data.tobytes())
# Only output a new ensemble and new predictions if the output of the
# ensemble would actually change!
# TODO this is neither safe (collisions, tests only with the ensemble
# prediction, but not the ensemble), implement a hash function for
# each possible ensemble builder.
if last_hash is not None:
if current_hash == last_hash:
self.logger.info('Ensemble output did not change.')
time.sleep(2)
continue
else:
last_hash = current_hash
else:
last_hash = current_hash
# Save the ensemble for later use in the main auto-sklearn module!
self.backend.save_ensemble(ensemble, index_run, self.seed)
# Save predictions for valid and test data set
if len(dir_valid_list) == len(dir_ensemble_model_files):
all_predictions_valid = np.array(all_predictions_valid)
ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
if self.low_precision:
if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]:
ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(ensemble_predictions_valid.shape, dtype=np.int32)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_valid.shape[1] == 1):
bin_array[ensemble_predictions_valid >= 0.5] = 1
else:
sample_num = ensemble_predictions_valid.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_valid[i, :])
bin_array[i, j] = 1
ensemble_predictions_valid = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_valid.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_valid.size > 1000000:
precision = 4
else:
# File size maximally 2.1MB
precision = 6
self.backend.save_predictions_as_txt(ensemble_predictions_valid,
'valid', index_run, prefix=self.dataset_name,
precision=precision)
else:
self.logger.info('Could not find as many validation set predictions (%d)'
'as ensemble predictions (%d)!.',
len(dir_valid_list), len(dir_ensemble_model_files))
del all_predictions_valid
if len(dir_test_list) == len(dir_ensemble_model_files):
all_predictions_test = | np.array(all_predictions_test) | numpy.array |
"""Linear Predictive Coding analysis and resynthesis for audio."""
import numpy as np
import scipy.signal
def lpcfit(x, p=12, h=128, w=None, overlaps=True):
"""Perform LPC analysis of short-time windows of a waveform.
Args:
x: 1D np.array containing input audio waveform.
p: int, order of LP models to fit.
h: int, hop in samples between successive short-time windows.
w: int, analysis window length. Defaults to 2 x h.
overlaps: bool, if true, residuals are overlap-added between
windows (for a continuous excitation), otherwise only the
residual for each hop portion is kept (for perfect reconstruction).
Returns:
a: np.array of (n_frames, p + 1) containing the LPC filter coefficients for
each frame.
g: np.array of (n_frames,) giving the gain for each frame.
e: np.array of (n_frames * h + (w - h),) giving the normalized-energy
excitation (residual).
"""
if not w:
w = 2 * h
npts = x.shape[0]
nhops = int(npts/h)
# Pad x with zeros so that we can extract complete w-length windows from it.
x = np.hstack([np.zeros(int((w-h)/2)), x, np.zeros(int(w-h/2))])
a = np.zeros((nhops, p+1))
g = np.zeros(nhops)
if overlaps:
e = np.zeros((nhops - 1) * h + w)
else:
e = np.zeros(npts)
# Pre-emphasis
pre = [1, -0.9]
x = scipy.signal.lfilter(pre, 1 , x)
for hop in np.arange(nhops):
# Extract segment of signal.
xx = x[hop * h + np.arange(w)]
# Apply hanning window
wxx = xx * np.hanning(w)
# Form autocorrelation (calculates *way* too many points)
rxx = np.correlate(wxx, wxx, 'full')
# Extract just the points we need (middle p+1 points).
rxx = rxx[w - 1 + np.arange(p + 1)]
# Setup the normal equations
coeffs = np.dot(np.linalg.inv(scipy.linalg.toeplitz(rxx[:-1])), rxx[1:])
# Calculate residual by filtering windowed xx
aa = np.hstack([1.0, -coeffs])
if overlaps:
rs = scipy.signal.lfilter(aa, 1, wxx)
else:
rs = scipy.signal.lfilter(aa, 1, xx[int((w - h) / 2) + np.arange(h)])
G = np.sqrt(np.mean(rs**2))
# Save filter, gain and residual
a[hop] = aa
g[hop] = G
if overlaps:
e[hop * h + np.arange(w)] += rs / G
else:
e[hop *h + np.arange(h)] = rs / G
# Throw away first (win-hop)/2 pts if in overlap mode
# for proper synchronization of resynth
if overlaps:
e = e[int((w - h) / 2):]
return a, g, e
def lpcsynth(a, g, e=None, h=128, overlaps=True):
"""Resynthesize a short-time LPC analysis to audio.
Args:
a: np.array of (nframes, order + 1) giving the per-frame LPC filter
coefficients.
g: np.array of (nframes,) giving the gain for each frame.
e: np.array of (nframes * hop + (window - hop)) giving the excitation
signal to feed into the filters. If a scalar, an impulse train with the
specified period is used. Defaults to Gaussian white noise.
h: int, hop between successive reconstruction frames, in samples.
Reconstruction window is always 2 * h.
overlaps: bool. If true, successive frames are windowed and overlap-
added. If false, we assume e contains exact residuals for each
window, so reconstructions are similarly truncated and concatenated.
Returns:
1D np.array of the resynthesized waveform.
"""
w = 2 * h
nhops, p = a.shape
npts = nhops * h + w
# Excitation needs extra half-window at the end if in overlap mode
nepts = npts + overlaps*(w - h)
if e is None:
e = np.random.randn(nepts)
elif type(e) == int:
period = e;
e = np.sqrt(period) * (
np.mod(np.arange(nepts), period) == 0).astype(float)
else:
nepts = e.shape[0]
npts = nepts + h
# Try to make sure we don't run out of e (in ov mode)
e = np.hstack([e, np.zeros(w)])
d = np.zeros(npts)
for hop in np.arange(nhops):
hbase = hop * h
#print d.shape, hbase, hop, nhops
oldbit = d[hbase + np.arange(h)]
aa = a[hop, :]
G = g[hop]
if overlaps:
d[hbase + np.arange(w)] += np.hanning(w) * (
G * scipy.signal.lfilter([1], aa, e[hbase + np.arange(w)]))
else:
d[hbase + np.arange(h)] = G * scipy.signal.lfilter(
1, aa, e[hbase + np.arange(h)])
# De-emphasis (must match pre-emphasis in lpcfit)
pre = [1, -0.9]
d = scipy.signal.lfilter([1], pre, d)
return d
def lpcBHenc(E, H=None, W=256, viz=False):
"""
% P = lpcBHenc(E,H,W,viz) Encode LPC residual as buzz/hiss pitch periods
% E is a residual from LPC encoding. P is an encoding
% which, for every H samples, returns an integer pitch period
% or 0 for frames judged as noisy. Pitch is found via autocorrelation
% over a window of W points
% 2001-03-19 <EMAIL>
"""
if not H:
H = int(W / 2)
nhops = int(E.shape[0]/H)
P = np.zeros(nhops)
pmin = 2
pmax = 127
pdthresh = 0.2
# Pad so that each W-point frame is centered around hop * H.
ee = np.hstack([np.zeros(W / 2), E, np.zeros(W / 2)])
for hop in np.arange(nhops):
xx = ee[hop * H + np.arange(W)]
rxx = | np.correlate(xx, xx, 'full') | numpy.correlate |
'''
Implementation of weighted generalized canonical correlation analysis as
described in:
@inproceedings{benton2016learning,
title={Learning multiview embeddings of twitter users},
author={<NAME> and <NAME> and <NAME>},
year={2016},
organization={ACL}
}
<NAME>
8/6/2016
10/17/2016: added incremental PCA flag for when one has many, many views
'''
import pickle, gzip, os, sys, time
import numpy as np
import scipy
import scipy.sparse
import scipy.linalg
import argparse
class WeightedGCCA:
'''
Weighted generalized canonical correlation analysis (WGCCA).
Implemented with batch SVD.
'''
def __init__(self, V, F, k, eps, viewWts=None, verbose=True):
self.V = V # Number of views
self.F = F # Number of features per view
self.k = k # Dimensionality of embedding we want to learn
# Regularization for each view
try:
if len(eps) == self.V:
self.eps = [np.float32(e) for e in eps]
else:
self.eps = [np.float32(eps) for i in range(self.V)] # Assume eps is same for each view
except:
self.eps = [np.float32(eps) for i in range(self.V)] # Assume eps is same for each view
self.W = [np.float32(v) for v in viewWts] if viewWts else [np.float32(1.) for v in range(V)] # How much we should weight each view -- defaults to equal weighting
self.U = None # Projection from each view to shared space
self.G = None # Embeddings for training examples
self.G_scaled = None # Scaled by SVs of covariance matrix sum
self.verbose = verbose
def _compute(self, views, K=None, incremental=False):
'''
Compute G by first taking low-rank decompositions of each view, stacking
them, then computing the SVD of this matrix.
'''
# K ignores those views we have no data for. If it is not provided,
# then we use all views for all examples. All we need to know is
# K^{-1/2}, which just weights each example based on number of non-zero
# views. Will fail if there are any empty examples.
if K is None:
K = np.float32(np.ones((views[0].shape[0], len(views))))
else:
K = np.float32(K)
# We do not want to count missing views we are downweighting heavily/zeroing out, so scale K by W
K = K.dot(np.diag(self.W))
Ksum = np.sum(K, axis=1)
# If we have some missing rows after weighting, then make these small & positive.
Ksum[Ksum==0.] = 1.e-8
K_invSqrt = scipy.sparse.dia_matrix( ( 1. / np.sqrt( Ksum ), np.asarray([0]) ), shape=(K.shape[0], K.shape[0]) )
# Left singular vectors for each view along with scaling matrices
As = []
Ts = []
Ts_unnorm = []
N = views[0].shape[0]
_Stilde = np.float32(np.zeros(self.k))
_Gprime = np.float32(np.zeros((N, self.k)))
_Stilde_scaled = np.float32(np.zeros(self.k))
_Gprime_scaled = np.float32(np.zeros((N, self.k)))
# Take SVD of each view, to calculate A_i and T_i
for i, (eps, view) in enumerate(zip(self.eps, views)):
A, S, B = scipy.linalg.svd(view, full_matrices=False, check_finite=False)
# Find T by just manipulating singular values. Matrices are all diagonal,
# so this should be fine.
S_thin = S[:self.k]
S2_inv = 1. / (np.multiply( S_thin, S_thin ) + eps)
T = np.diag(
np.sqrt(
np.multiply( np.multiply( S_thin, S2_inv ), S_thin )
)
)
# Keep singular values
T_unnorm = np.diag( S_thin + eps )
if incremental:
ajtj = K_invSqrt.dot( np.sqrt(self.W[i]) * A.dot(T) )
ajtj_scaled = K_invSqrt.dot( np.sqrt(self.W[i]) * A.dot(T_unnorm) )
_Gprime, _Stilde = WeightedGCCA._batch_incremental_pca(ajtj,
_Gprime,
_Stilde)
_Gprime_scaled, _Stilde_scaled = WeightedGCCA._batch_incremental_pca(ajtj_scaled,
_Gprime_scaled,
_Stilde_scaled)
else:
# Keep the left singular vectors of view j
As.append(A[:,:self.k])
Ts.append(T)
Ts_unnorm.append(T_unnorm)
if self.verbose:
print ('Decomposed data matrix for view %d' % (i))
if incremental:
self.G = _Gprime
self.G_scaled = _Gprime_scaled
self.lbda = _Stilde
self.lbda_scaled = _Stilde_scaled
else:
# In practice M_tilde may be really big, so we would
# like to perform this SVD incrementally, over examples.
M_tilde = K_invSqrt.dot( np.bmat( [ np.sqrt(w) * A.dot(T) for w, A, T in zip(self.W, As, Ts) ] ) )
Q, R = scipy.linalg.qr( M_tilde, mode='economic')
# Ignore right singular vectors
U, lbda, V_toss = scipy.linalg.svd(R, full_matrices=False, check_finite=False)
self.G = Q.dot(U[:,:self.k])
self.lbda = lbda
# Unnormalized version of G -> captures covariance between views
M_tilde = K_invSqrt.dot( np.bmat( [ np.sqrt(w) * A.dot(T) for w, A, T in zip(self.W, As, Ts_unnorm) ] ) )
Q, R = scipy.linalg.qr( M_tilde, mode='economic')
# Ignore right singular vectors
U, lbda, V_toss = scipy.linalg.svd(R, full_matrices=False, check_finite=False)
self.lbda_scaled = lbda
self.G_scaled = self.G.dot(np.diag(self.lbda_scaled[:self.k]))
if self.verbose:
print ('Decomposed M_tilde / solved for G')
self.U = [] # Mapping from views to latent space
self.U_unnorm = [] # Mapping, without normalizing variance
self._partUs = []
# Now compute canonical weights
for idx, (eps, f, view) in enumerate(zip(self.eps, self.F, views)):
R = scipy.linalg.qr(view, mode='r')[0]
Cjj_inv = np.linalg.inv( (R.transpose().dot(R) + eps * np.eye( f )) )
pinv = Cjj_inv.dot( view.transpose() )
self._partUs.append(pinv)
self.U.append(pinv.dot( self.G ))
self.U_unnorm.append(pinv.dot( self.G_scaled ))
if self.verbose:
print ('Solved for U in view %d' % (idx))
@staticmethod
def _batch_incremental_pca(x, G, S):
r = G.shape[1]
b = x.shape[0]
xh = G.T.dot(x)
H = x - G.dot(xh)
J, W = scipy.linalg.qr(H, overwrite_a=True, mode='full', check_finite=False)
Q = np.bmat( [[np.diag(S), xh], [ | np.zeros((b,r), dtype=np.float32) | numpy.zeros |
from io import BytesIO
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import numpy as np
from haikubot.utils.string_cleaner import (
clean_characters,
clean_words,
camel_case_clean,
)
from haikubot.utils.color import string_to_color_hex
def get_authors(haikus):
authors = []
for haiku in haikus:
author = haiku[2]
if author not in authors:
authors.append(author)
return authors
def set_legend(
ax,
line,
author,
):
leg = Legend(ax, lines[2:], ["line C", "line D"], loc="lower right", frameon=False)
ax.add_artist(leg)
def clean_graph(graph):
maxVal = np.amax(graph)
clean = np.delete(graph, np.argwhere(graph == maxVal))
return np.append(clean, maxVal)
def plot_author(ax, graph, author, anonymous=True):
clean = clean_graph(graph)
lastX = len(clean) - 1
maxVal = np.amax(graph)
ax.plot(clean, color=string_to_color_hex(author))
ax.scatter(lastX, maxVal, color=string_to_color_hex(author))
if not anonymous:
ax.text(
lastX,
maxVal,
s=author,
color=string_to_color_hex(author),
ha="right",
va="bottom",
)
def generate_timeline(haikus, anonymous=True):
authors = get_authors(haikus)
graphs = np.zeros((len(haikus), len(authors)))
current = graphs[0]
# Checks the author of an haiku and a
for (index, haiku) in enumerate(haikus):
author = haiku[2]
current[authors.index(author)] += 1
graphs[index] = | np.copy(current) | numpy.copy |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(object):
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_( | np.can_cast('i2', 'U6') | numpy.can_cast |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
import os
from fluids import *
import numpy as np
from math import pi, log10, log
from random import uniform
from numpy.testing import assert_allclose
from scipy.constants import *
from scipy.optimize import *
from scipy.interpolate import *
from fluids import fluids_data_dir
from fluids.core import Engauge_2d_parser
from fluids.optional.pychebfun import *
import pytest
def log_uniform(low, high):
return 10**uniform(log10(low), log10(high))
def test_fittings():
K = entrance_beveled_orifice(Di=0.1, do=.07, l=0.003, angle=45)
assert_allclose(K, 1.2987552913818574)
### Exits
assert_allclose(exit_normal(), 1.0)
K_helix = helix(Di=0.01, rs=0.1, pitch=.03, N=10, fd=.0185)
assert_allclose(K_helix, 14.525134924495514)
K_spiral = spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
assert_allclose(K_spiral, 7.950918552775473)
### Contractions
K_sharp = contraction_sharp(Di1=1, Di2=0.4)
assert_allclose(K_sharp, 0.5301269161591805)
K_beveled = contraction_beveled(Di1=0.5, Di2=0.1, l=.7*.1, angle=120)
assert_allclose(K_beveled, 0.40946469413070485)
### Expansions (diffusers)
K_sharp = diffuser_sharp(Di1=.5, Di2=1)
assert_allclose(K_sharp, 0.5625)
K = diffuser_curved(Di1=.25**0.5, Di2=1., l=2.)
assert_allclose(K, 0.2299781250000002)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07)
assert_allclose(K, 0.06873244301714816)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07, fd2=.08)
assert_allclose(K, 0.06952256647393829)
# Misc
K1 = Darby3K(NPS=2., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K2 = Darby3K(NPS=12., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K3 = Darby3K(NPS=12., Re=10000., K1=950, Ki=0.25, Kd=4)
Ks = [1.1572523963562353, 0.819510280626355, 0.819510280626355]
assert_allclose([K1, K2, K3], Ks)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000, name='fail')
tot = sum([Darby3K(NPS=2., Re=1000, name=i) for i in Darby.keys()])
assert_allclose(tot, 67.96442287975898)
K1 = Hooper2K(Di=2., Re=10000., name='Valve, Globe, Standard')
K2 = Hooper2K(Di=2., Re=10000., K1=900, Kinfty=4)
assert_allclose([K1, K2], [6.15, 6.09])
tot = sum([Hooper2K(Di=2., Re=10000., name=i) for i in Hooper.keys()])
assert_allclose(tot, 46.18)
with pytest.raises(Exception):
Hooper2K(Di=2, Re=10000)
with pytest.raises(Exception):
Hooper2K(Di=2., Re=10000, name='fail')
K2 = change_K_basis(K1=32.68875692997804, D1=.01, D2=.02)
assert_allclose(K2, 523.0201108796487)
### Entrances
def test_entrance_distance_45_Miller():
from fluids.fittings import entrance_distance_45_Miller
K = entrance_distance_45_Miller(Di=0.1, Di0=0.14)
assert_allclose(K, 0.24407641818143339)
def test_entrance_distance():
K1 = entrance_distance(0.1, t=0.0005)
assert_allclose(K1, 1.0154100000000004)
assert_allclose(entrance_distance(Di=0.1, t=0.05), 0.57)
K = entrance_distance(Di=0.1, t=0.0005, method='Miller')
assert_allclose(K, 1.0280427936730414)
K = entrance_distance(Di=0.1, t=0.0005, method='Idelchik')
assert_allclose(K, 0.9249999999999999)
K = entrance_distance(Di=0.1, t=0.0005, l=.02, method='Idelchik')
assert_allclose(K, 0.8475000000000001)
K = entrance_distance(Di=0.1, t=0.0005, method='Harris')
assert_allclose(K, 0.8705806231290558, 3e-3)
K = entrance_distance(Di=0.1, method='Crane')
assert_allclose(K, 0.78)
with pytest.raises(Exception):
entrance_distance(Di=0.1, t=0.01, method='BADMETHOD')
def test_entrance_rounded():
K = entrance_rounded(Di=0.1, rc=0.0235)
assert_allclose(K, 0.09839534618360923)
assert_allclose(entrance_rounded(Di=0.1, rc=0.2), 0.03)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Miller')
assert_allclose(K, 0.057734448458542094)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Swamee')
assert_allclose(K, 0.06818838227156554)
K = entrance_rounded(Di=0.1, rc=0.01, method='Crane')
assert_allclose(K, .09)
K = entrance_rounded(Di=0.1, rc=0.01, method='Harris')
assert_allclose(K, 0.04864878230217168)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Harris')
assert_allclose(K, 0.0)
K = entrance_rounded(Di=0.1, rc=0.01, method='Idelchik')
assert_allclose(K, 0.11328005177738182)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Idelchik')
assert_allclose(K, 0.03)
with pytest.raises(Exception):
entrance_rounded(Di=0.1, rc=0.01, method='BADMETHOD')
def test_entrance_beveled():
K = entrance_beveled(Di=0.1, l=0.003, angle=45)
assert_allclose(K, 0.45086864221916984)
K = entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
assert_allclose(K, 0.3995000000000001)
def test_entrance_sharp():
assert_allclose(entrance_sharp(), 0.57)
with pytest.raises(Exception):
entrance_sharp(method='BADMETHOD')
for method in ['Swamee', 'Blevins', 'Idelchik', 'Crane']:
assert_allclose(0.5, entrance_sharp(method=method))
entrance_sharp(method='Miller') # Don't bother checking a value for the Miller method
def test_entrance_angled():
K_30_Idelchik = 0.9798076211353316
assert_allclose(entrance_angled(30), K_30_Idelchik)
assert_allclose(entrance_angled(30, method='Idelchik'), K_30_Idelchik)
with pytest.raises(Exception):
entrance_angled(30, method='BADMETHOD')
### Bends
def test_bend_rounded_Crane():
K = bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
assert_allclose(K, 0.09321910015613409)
K_max = bend_rounded_Crane(Di=.400, rc=.4*25, angle=30)
K_limit = bend_rounded_Crane(Di=.400, rc=.4*20, angle=30)
assert_allclose(K_max, K_limit)
def test_bend_rounded_Miller():
# Miller examples - 9.12
D = .6
Re = Reynolds(V=4, D=D, nu=1.14E-6)
kwargs = dict(Di=D, bend_diameters=2, angle=90, Re=Re, roughness=.02E-3)
K = bend_rounded_Miller(L_unimpeded=30*D, **kwargs)
assert_allclose(K, 0.1513266131915296, rtol=1e-4)# 0.150 in Miller- 1% difference due to fd
K = bend_rounded_Miller(L_unimpeded=0*D, **kwargs)
assert_allclose(K, 0.1414607344374372, rtol=1e-4) # 0.135 in Miller - Difference mainly from Co interpolation method, OK with that
K = bend_rounded_Miller(L_unimpeded=2*D, **kwargs)
assert_allclose(K, 0.09343184457353562, rtol=1e-4) # 0.093 in miller
def test_bend_rounded():
### Bends
K_5_rc = [bend_rounded(Di=4.020, rc=4.0*5, angle=i, fd=0.0163) for i in [15, 30, 45, 60, 75, 90]]
K_5_rc_values = [0.07038212630028828, 0.10680196344492195, 0.13858204974134541, 0.16977191374717754, 0.20114941557508642, 0.23248382866658507]
assert_allclose(K_5_rc, K_5_rc_values)
K_10_rc = [bend_rounded(Di=34.500, rc=36*10, angle=i, fd=0.0106) for i in [15, 30, 45, 60, 75, 90]]
K_10_rc_values = [0.061075866683922314, 0.10162621862720357, 0.14158887563243763, 0.18225270014527103, 0.22309967045081655, 0.26343782210280947]
assert_allclose(K_10_rc, K_10_rc_values)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, fd=0.0163)
assert_allclose(K, 0.106920213333191)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5)
assert_allclose(K, 0.11532121658742862)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5, method='Miller')
assert_allclose(K, 0.10276501180879682)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Crane')
assert_allclose(K, 0.08959057097762159)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Ito')
assert_allclose(K, 0.10457946464978755)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Swamee')
assert_allclose(K, 0.055429466248839564)
def test_bend_miter():
K_miters = [bend_miter(i) for i in [150, 120, 90, 75, 60, 45, 30, 15]]
K_miter_values = [2.7128147734758103, 2.0264994448555864, 1.2020815280171306, 0.8332188430731828, 0.5299999999999998, 0.30419633092708653, 0.15308822558050816, 0.06051389308126326]
assert_allclose(K_miters, K_miter_values)
K = bend_miter(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20, method='Miller')
assert_allclose(K, 0.2944060416245167)
K = bend_miter(Di=.05, angle=45, Re=1e6, roughness=1e-5, method='Crane')
assert_allclose(K, 0.28597953150073047)
K = bend_miter(angle=45, Re=1e6, method='Rennels')
assert_allclose(K, 0.30419633092708653)
with pytest.raises(Exception):
bend_miter(angle=45, Re=1e6, method='BADMETHOD')
def test_bend_miter_Miller():
K = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K, 0.2944060416245167)
K_default_L_unimpeded = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5)
assert_allclose(K, K_default_L_unimpeded)
K_high_angle = bend_miter_Miller(Di=.6, angle=120, Re=1e6, roughness=1e-5, L_unimpeded=20)
K_higher_angle = bend_miter_Miller(Di=.6, angle=150, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K_high_angle, K_higher_angle)
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_rounded_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(500):
Di = log_uniform(1e-5, 100)
rc = uniform(0, 100)
angle = uniform(0, 180)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_rounded_Miller(Di=Di, rc=rc, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_miter_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(10**3):
Di = log_uniform(1e-5, 100)
angle = uniform(0, 120)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_miter_Miller(Di=Di, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
### Diffusers
def test_diffuser_conical():
K1 = diffuser_conical(Di1=.1**0.5, Di2=1, angle=10., fd=0.020)
K2 = diffuser_conical(Di1=1/3., Di2=1, angle=50, fd=0.03) # 2
K3 = diffuser_conical(Di1=2/3., Di2=1, angle=40, fd=0.03) # 3
K4 = diffuser_conical(Di1=1/3., Di2=1, angle=120, fd=0.0185) # #4
K5 = diffuser_conical(Di1=2/3., Di2=1, angle=120, fd=0.0185) # Last
K6 = diffuser_conical(Di1=.1**0.5, Di2=1, l=3.908, fd=0.020)
Ks = [0.12301652230915454, 0.8081340270019336, 0.32533470783539786, 0.812308728765127, 0.3282650135070033, 0.12300865396254032]
assert_allclose([K1, K2, K3, K4, K5, K6], Ks)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, angle=1800., fd=0.020)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, fd=0.020)
K1 = diffuser_conical_staged(Di1=1., Di2=10., DEs=[2,3,4,5,6,7,8,9], ls=[1,1,1,1,1,1,1,1,1], fd=0.01)
K2 = diffuser_conical(Di1=1., Di2=10.,l=9, fd=0.01)
Ks = [1.7681854713484308, 0.973137914861591]
assert_allclose([K1, K2], Ks)
# Idelchilk
Ks_Idelchik = [diffuser_conical(Di1=.1**0.5, Di2=1, l=l, method='Idelchik') for l in [.1, .5, 1, 2, 3, 4, 5, 20]]
Ks_Idelchik_expect = [0.8617385829640242, 0.9283647028367953, 0.7082429168951839, 0.291016580744589, 0.18504484868875992, 0.147705693811332, 0.12911637682462676, 0.17]
assert_allclose(Ks_Idelchik, Ks_Idelchik_expect, rtol=1e-2)
### Contractions
def test_contraction_conical_Crane():
K2 = contraction_conical_Crane(Di1=0.0779, Di2=0.0525, l=0)
assert_allclose(K2, 0.2729017979998056)
def test_contraction_round():
K_round = contraction_round(Di1=1, Di2=0.4, rc=0.04)
assert_allclose(K_round, 0.1783332490866574)
K = contraction_round(Di1=1, Di2=0.4, rc=0.04, method='Miller')
assert_allclose(K, 0.085659530512986387)
K = contraction_round(Di1=1, Di2=0.4, rc=0.04, method='Idelchik')
assert_allclose(K, 0.1008)
with pytest.raises(Exception):
contraction_round(Di1=1, Di2=0.4, rc=0.04, method='BADMETHOD')
def test_contraction_round_Miller():
K = contraction_round_Miller(Di1=1, Di2=0.4, rc=0.04)
assert_allclose(K, 0.085659530512986387)
def test_contraction_conical():
K_conical1 = contraction_conical(Di1=0.1, Di2=0.04, l=0.04, fd=0.0185)
K_conical2 = contraction_conical(Di1=0.1, Di2=0.04, angle=73.74, fd=0.0185)
| assert_allclose([K_conical1, K_conical2], [0.15779041548350314, 0.15779101784158286]) | numpy.testing.assert_allclose |
import collections
import functools
import numpy as np
import sklearn.cluster
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from .layers import QuaternionRotation, QuaternionRotoinversion
from .losses import mean_exp_rsq
from .utils import hash_sample
class PointRotations:
"""Finds rotations that leave a point cloud unchanged up to a permutation.
This method optimizes a set of unit quaternions to match the
distribution of transformed points to the set of unrotated
points. Quaternions are then clustered by their axis of rotation
and merged into N-fold rotation symmetries.
:param num_rotations: Number of plain rotations (and rotoinversions, if enabled) to consider
:param quaternion_dim: Optimizer dimension for quaternions (higher may make optimization easier at the cost of more expensive optimization steps)
:param include_inversions: If True, include rotoinversions as well as rotations
:param loss: Loss function to use; see :py:mod:`symmys.losses`
"""
def __init__(self, num_rotations, quaternion_dim=8, include_inversions=True,
loss=mean_exp_rsq):
self.num_rotations = num_rotations
self.quaternion_dim = quaternion_dim
self.include_inversions = include_inversions
self.loss = loss
self._model_dict = None
@property
def model(self):
"""Return the tensorflow model that will perform rotations."""
if self._model_dict is None:
self._model_dict = self.build_model()
return self._model_dict['model']
@property
def rotation_layer(self):
"""Return the tensorflow.keras layer for rotations."""
if self._model_dict is None:
self._model_dict = self.build_model()
return self._model_dict['rotation_layer']
@property
def rotoinversion_layer(self):
"""Return the tensorflow.keras layer for rotoinversions."""
if self._model_dict is None:
self._model_dict = self.build_model()
return self._model_dict['rotoinversion_layer']
def build_model(self):
"""Create the tensorflow model.
This method can be replaced by child classes to experiment
with different network architectures. The returned result
should be a dictionary containing at least:
- `model`: a `tensorflow.keras.models.Model` instance that replicates a given set of input points
- `rotation_layer`: a layer with a `quaternions` attribute to be read
- `rotoinversion_layer` (if inversions are enabled): a layer with a `quaternions` attribute to be read
"""
result = {}
inp = last = keras.layers.Input(shape=(3,))
result['rotation_layer'] = rot_layer = QuaternionRotation(
self.num_rotations, quaternion_dim=self.quaternion_dim)
if self.include_inversions:
result['rotoinversion_layer'] = conj_rot_layer = QuaternionRotoinversion(
self.num_rotations, quaternion_dim=self.quaternion_dim)
last = tf.concat([rot_layer(last), conj_rot_layer(last)], 1)
else:
last = rot_layer(last)
result['model'] = keras.models.Model(inp, last)
return result
def fit(self, points, epochs=1024, early_stopping_steps=16,
validation_split=.3, hash_sample_N=128,
reference_fraction=.1, optimizer='adam', batch_size=256,
valid_symmetries=12, extra_callbacks=[]):
"""Fit rotation quaternions and analyze the collective symmetries of a set of input points.
This method builds a rotation model, fits it to the given
data, and groups the found quaternions by their axis and
rotation angle.
After fitting, a map of symmetries will be returned: a
dictionary of {N-fold: [axes]} containing all the axes about
which each observed symmetry were found.
:param points: Input points to analyze:: (N, 3) numpy array-like sequence
:param epochs: Maximum number of epochs to train
:param early_stopping_steps: Patience (in epochs) for early stopping criterion; training halts when the validation set loss does not improve for this many epochs
:param validation_split: Fraction of training data to use for calculating validation loss
:param hash_sample_N: Minimum number of points to use as reference data for the loss function (see :py:func:`hash_sample`)
:param reference_fraction: Fraction of given input data to be hashed to form the reference data
:param optimizer: Tensorflow/keras optimizer name or instance
:param batch_size: Batch size for optimization
:param valid_symmetries: Maximum degree of symmetry (N) that will be considered when identifying N-fold rotations
:param extra_callbacks: Additional tensorflow callbacks to use during optimization
"""
points = np.asarray(points)
N = len(points)
reference_N = int(reference_fraction*N)
reference, train = points[:reference_N], points[reference_N:]
reference = hash_sample(reference, hash_sample_N)
reduce_lr_patience = int(early_stopping_steps/3.)
callbacks = [
keras.callbacks.EarlyStopping(
patience=early_stopping_steps, monitor='val_loss'),
keras.callbacks.ReduceLROnPlateau(
patience=reduce_lr_patience, monitor='val_loss', factor=.5, verbose=False),
] + extra_callbacks
try:
import tensorflow_addons as tfa
callbacks.append(tfa.callbacks.TQDMProgressBar(
show_epoch_progress=False, update_per_second=1))
except ImportError:
pass
model = self.model
loss = self.loss(model.output, reference)
model.add_loss(loss)
model.compile(optimizer, loss=None)
model.fit(
train, train, validation_split=validation_split, verbose=False,
batch_size=batch_size, callbacks=callbacks, epochs=epochs)
self.history = model.history.history
if isinstance(valid_symmetries, int):
valid_symmetries = range(1, valid_symmetries + 1)
Ns = np.array(list(sorted(valid_symmetries)))
symmetries = collections.defaultdict(list)
for (symmetry, axis) in zip(*self._cluster_quaternions(
self.rotation_layer.quaternions.numpy(), Ns)):
if symmetry == 1:
continue
symmetries[symmetry].append(axis)
if self.include_inversions:
for (symmetry, axis) in zip(*self._cluster_quaternions(
self.rotoinversion_layer.quaternions.numpy(), Ns)):
symmetries[-symmetry].append(axis)
self.symmetries = dict(symmetries)
return self.symmetries
@staticmethod
def _cluster_quaternions(quats, Ns, tolerance=.0125):
axes = quats[:, 1:].copy()
axes /= np.linalg.norm(axes, axis=-1, keepdims=True)
filt = np.logical_and(np.isfinite(quats[:, 0]), np.all(np.isfinite(axes), axis=-1))
quats, axes = quats[filt], axes[filt]
distances = 1 - np.abs(np.sum(axes[:, np.newaxis]*axes[np.newaxis], axis=-1))
distances = | np.clip(distances, 0, 1) | numpy.clip |
import numpy as np
# t04 is identical to t01 except for several factors.
def t04(parmod,ps,x,y,z):
"""
A data-based model of the external (i.e., without earth's contribution) part of the
magnetospheric magnetic field, calibrated by
(1) solar wind pressure pdyn (nanopascals),
(2) dst (nanotesla),
(3) byimf,
(4) bzimf (nanotesla)
(5-10) indices w1 - w6, calculated as time integrals from the beginning of a storm
see the reference (3) below, for a detailed definition of those variables
:param parmod: The elements are explained above.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
Computed as a sum of contributions from principal field sources.
Assembled: March 25, 2004; Updated: August 2 & 31, December 27, 2004.
A bug eliminated March 14, 2005 (might cause compilation problems with some fortran compilers)
Attention: The model is based on data taken sunward from x=-15Re, and hence becomes invalid at larger tailward distances !!! *
REFERENCES:
(1) <NAME>, A new data-based model of the near magnetosphere magnetic field:
1. Mathematical structure.
2. Parameterization and fitting to observations. JGR v. 107(A8), 1176/1179, doi:10.1029/2001JA000219/220, 2002.
(2) <NAME>, <NAME>, <NAME>, Storm-time distortion of the
inner magnetosphere: How severe can it get ? JGR v. 108(A5), 1209, doi:10.1029/2002JA009808, 2003.
(3) <NAME> and <NAME>, Modeling the dynamics of the inner magnetosphere during
strong geomagnetic storms, J. Geophys. Res., v. 110 (A3), A03208, doi: 10.1029/2004JA010798, 2005.
"""
a = np.array([
1.00000,5.44118,0.891995,9.09684,0.00000,-7.18972,12.2700,
-4.89408,0.00000,0.870536,1.36081,0.00000,0.688650,0.602330,
0.00000,0.316346,1.22728,-0.363620E-01,-0.405821,0.452536,
0.755831,0.215662,0.152759,5.96235,23.2036,11.2994,69.9596,
0.989596,-0.132131E-01,0.985681,0.344212E-01,1.02389,0.207867,
1.51220,0.682715E-01,1.84714,1.76977,1.37690,0.696350,0.343280,
3.28846,111.293,5.82287,4.39664,0.383403,0.648176,0.318752E-01,
0.581168,1.15070,0.843004,0.394732,0.846509,0.916555,0.550920,
0.180725,0.898772,0.387365,2.26596,1.29123,0.436819,1.28211,
1.33199,.405553,1.6229,.699074,1.26131,2.42297,.537116,.619441])
iopgen,ioptt,iopb,iopr = [0.]*4
pdyn=parmod[0]
dst_ast=parmod[1]*0.8-13*np.sqrt(pdyn)
bximf,byimf,bzimf=[0.,parmod[2],parmod[3]]
w1,w2,w3,w4,w5,w6 = parmod[4:10]
pss,xx,yy,zz = [ps,x,y,z]
return extern(iopgen,ioptt,iopb,iopr,a,69,pdyn,dst_ast,bximf,byimf,bzimf,
w1,w2,w3,w4,w5,w6,pss,xx,yy,zz)
def extern(iopgen,iopt,iopb,iopr,a,ntot,pdyn,dst,bximf,byimf,bzimf,w1,w2,w3,w4,w5,w6,ps,x,y,z):
"""
:param iopgen: general option flag:
iopgen=0 - calculate total field
iopgen=1 - dipole shielding only
iopgen=2 - tail field only
iopgen=3 - birkeland field only
iopgen=4 - ring current field only
iopgen=5 - interconnection field only
:param iopt: tail field flag:
iopt=0 - both modes
iopt=1 - mode 1 only
iopt=2 - mode 2 only
:param iopb: birkeland field flag:
iopb=0 - all 4 terms
iopb=1 - region 1, modes 1 and 2
iopb=2 - region 2, modes 1 and 2
:param iopr: ring current flag:
iopr=0 - both src and prc
iopr=1 - src only
iopr=2 - prc only
"""
# common /tail/ dxshift1,dxshift2,d,deltady ! the common blocks forward nonlinear parameters
# common /birkpar/ xkappa1,xkappa2
# common /rcpar/ sc_sy,sc_as,phi
# common /g/ g
# common /rh0/ rh0
global dxshift1, dxshift2, d, deltady
global xkappa1, xkappa2
global sc_sy, sc_pr, phi
global g
global rh0
a0_a,a0_s0,a0_x0 = [34.586,1.1960,3.4397] # Shue et al. parameters
dsig = 0.005
rh0,rh2 = [8.0,-5.2]
xappa = (pdyn/2.)**a[22] # overall scaling parameter
rh0 = 7.5 # tail hinging distance
g = 35.0 # tail warping parameter
xappa3=xappa**3
xx=x*xappa
yy=y*xappa
zz=z*xappa
sps=np.sin(ps)
x0=a0_x0/xappa
am=a0_a/xappa
s0=a0_s0
# Calculate "imf" components outside the magnetopause layer (hence begin with "o")
# They are needed only if the point (x,y,z) is within the transition magnetopause layer or outside the magnetosphere:
factimf=a[19]
oimfx=0.
oimfy=byimf*factimf
oimfz=bzimf*factimf
r=np.sqrt(x**2+y**2+z**2)
xss=x
zss=z
# begin iterative search of unwarped coords (to find sigma)
dd = 1.
while dd > 1e-6:
xsold=xss
zsold=zss
rh=rh0+rh2*(zss/r)**2
sinpsas=sps/(1+(r/rh)**3)**0.33333333
cospsas=np.sqrt(1-sinpsas**2)
zss=x*sinpsas+z*cospsas
xss=x*cospsas-z*sinpsas
dd=np.abs(xss-xsold)+np.abs(zss-zsold)
rho2=y**2+zss**2
asq=am**2
xmxm=am+xss-x0
if xmxm < 0: xmxm = 0 # the boundary is a cylinder tailward of x=x0-am
axx0=xmxm**2
aro=asq+rho2
sigma=np.sqrt((aro+axx0+np.sqrt((aro+axx0)**2-4.*asq*axx0))/(2.*asq))
# Now, there are three possible cases:
# (1) inside the magnetosphere
# (2) in the boundary layer
# (3) outside the magnetosphere and b.layer
# First of all, consider the cases (1) and (2):
if sigma < (s0+dsig): # cases (1) or (2); calculate the model field (with the potential "penetrated" interconnection field):
bxcf,bycf,bzcf = [0.]*3
if iopgen <= 1:
cfx,cfy,cfz = shlcar3x3(xx,yy,zz,ps) # dipole shielding field
bxcf=cfx*xappa3
bycf=cfy*xappa3
bzcf=cfz*xappa3
bxt1,byt1,bzt1,bxt2,byt2,bzt2 = [0.]*6
if (iopgen == 0) | (iopgen == 2):
dstt = -20.
if dst < dstt: dstt = dst
znam = np.abs(dstt)**0.37
dxshift1=a[23]-a[24]/znam
dxshift2=a[25]-a[26]/znam
d=a[35]*np.exp(-w1/a[36])+a[68]
deltady=4.7
bxt1,byt1,bzt1,bxt2,byt2,bzt2 = deformed(iopt,ps,xx,yy,zz)
bxr11,byr11,bzr11, bxr12,byr12,bzr12, bxr21,byr21,bzr21, bxr22,byr22,bzr22 = [0.]*12
if (iopgen == 0) | (iopgen == 3):
znam = np.abs(dst)
if dst >= -20: znam = 20.
xkappa1=a[31]*(znam/20)**a[32]
xkappa2=a[33]*(znam/20)**a[34]
# Birkeland field (two modes for r1 and two modes for r2)
bxr11,byr11,bzr11, bxr12,byr12,bzr12, bxr21,byr21,bzr21, bxr22,byr22,bzr22 = \
birk_tot(iopb,ps,xx,yy,zz)
bxsrc,bysrc,bzsrc, bxprc,byprc,bzprc = [0.]*6
if (iopgen == 0) | (iopgen == 4):
phi=a[37]
znam=np.abs(dst)
if dst >= -20: znam = 20
sc_sy=a[27]*(20/znam)**a[28]*xappa
sc_pr=a[29]*(20/znam)**a[30]*xappa
# shielded ring current (src and prc)
bxsrc,bysrc,bzsrc, bxprc,byprc,bzprc = full_rc(iopr,ps,xx,yy,zz)
hximf,hyimf,hzimf = [0.]*3
if (iopgen == 0) | (iopgen == 5):
# These are components of the penetrated field per unit of the penetration coefficient.
# In other words, these are derivatives of the penetration field components with respect
# to the penetration coefficient. We assume that only the transverse component of the
# field penetrates inside.
hximf,hyimf,hzimf = [0.,byimf,bzimf]
# Now, add up all the components:
dlp1=(pdyn/2)**a[20]
dlp2=(pdyn/2)**a[21]
tamp1=a[1]+a[2]*dlp1+a[3]*a[38]*w1/np.sqrt(w1**2+a[38]**2)+a[4]*dst
tamp2=a[5]+a[6]*dlp2+a[7]*a[39]*w2/np.sqrt(w2**2+a[39]**2)+a[8]*dst
a_src=a[9] +a[10]*a[40]*w3/np.sqrt(w3**2+a[40]**2)+a[11]*dst
a_prc=a[12]+a[13]*a[41]*w4/np.sqrt(w4**2+a[41]**2)+a[14]*dst
a_r11=a[15]+a[16]*a[42]*w5/np.sqrt(w5**2+a[42]**2)
a_r21=a[17]+a[18]*a[43]*w6/np.sqrt(w6**2+a[43]**2)
bbx=a[0]*bxcf + tamp1*bxt1+tamp2*bxt2 + a_src*bxsrc+a_prc*bxprc + a_r11*bxr11+a_r21*bxr21 + a[19]*hximf
bby=a[0]*bycf + tamp1*byt1+tamp2*byt2 + a_src*bysrc+a_prc*byprc + a_r11*byr11+a_r21*byr21 + a[19]*hyimf
bbz=a[0]*bzcf + tamp1*bzt1+tamp2*bzt2 + a_src*bzsrc+a_prc*bzprc + a_r11*bzr11+a_r21*bzr21 + a[19]*hzimf
# And we have the total external field.
# Now, let us check whether we have the case (1). if yes - we are done:
if sigma < (s0-dsig): # (x,y,z) is inside the magnetosphere
bx,by,bz = [bbx,bby,bbz]
else: # this is the most complex case: we are inside the interpolation region
fint=0.5*(1.-(sigma-s0)/dsig)
fext=0.5*(1.+(sigma-s0)/dsig)
qx,qy,qz = dipole(ps,x,y,z)
bx=(bbx+qx)*fint+oimfx*fext -qx
by=(bby+qy)*fint+oimfy*fext -qy
bz=(bbz+qz)*fint+oimfz*fext -qz
# The cases (1) and (2) are exhausted; the only remaining possibility is now the case (3):
else:
qx,qy,qz = dipole(ps,x,y,z)
bx=oimfx-qx
by=oimfy-qy
bz=oimfz-qz
return bx,by,bz
def shlcar3x3(x,y,z, ps):
"""
This subroutine returns the shielding field for the earth's dipole, represented by
2x3x3=18 "cartesian" harmonics, tilted with respect to the z=0 plane (nb#4, p.74)
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:param ps: geo-dipole tilt angle in radius.
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# The 36 coefficients enter in pairs in the amplitudes of the "cartesian" harmonics (A(1)-A(36).
# The 14 nonlinear parameters (A(37)-A(50) are the scales Pi,Ri,Qi,and Si entering the arguments of exponents, sines, and cosines in each of the
# 18 "cartesian" harmonics plus two tilt angles for the cartesian harmonics (one for the psi=0 mode and another for the psi=90 mode)
a = np.array([
-901.2327248,895.8011176,817.6208321,-845.5880889,-83.73539535,
86.58542841,336.8781402,-329.3619944,-311.2947120,308.6011161,
31.94469304,-31.30824526,125.8739681,-372.3384278,-235.4720434,
286.7594095,21.86305585,-27.42344605,-150.4874688,2.669338538,
1.395023949,-.5540427503,-56.85224007,3.681827033,-43.48705106,
5.103131905,1.073551279,-.6673083508,12.21404266,4.177465543,
5.799964188,-.3977802319,-1.044652977,.5703560010,3.536082962,
-3.222069852,9.620648151,6.082014949,27.75216226,12.44199571,
5.122226936,6.982039615,20.12149582,6.150973118,4.663639687,
15.73319647,2.303504968,5.840511214,.8385953499E-01,.3477844929])
p1,p2,p3, r1,r2,r3, q1,q2,q3, s1,s2,s3 = a[36:48]
t1,t2 = a[48:50]
cps=np.cos(ps)
sps=np.sin(ps)
s2ps=2*cps # modified here (sin(2*ps) instead of sin(3*ps))
st1=np.sin(ps*t1)
ct1=np.cos(ps*t1)
st2=np.sin(ps*t2)
ct2=np.cos(ps*t2)
x1=x*ct1-z*st1
z1=x*st1+z*ct1
x2=x*ct2-z*st2
z2=x*st2+z*ct2
# make the terms in the 1st sum ("perpendicular" symmetry):
# i=1:
sqpr= np.sqrt(1/p1**2+1/r1**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx1 =-sqpr*expr*cyp*szr
hy1 = expr/p1*syp*szr
fz1 =-expr*cyp/r1*czr
hx1 = fx1*ct1+fz1*st1
hz1 =-fx1*st1+fz1*ct1
sqpr= np.sqrt(1/p1**2+1/r2**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx2 =-sqpr*expr*cyp*szr
hy2 = expr/p1*syp*szr
fz2 =-expr*cyp/r2*czr
hx2 = fx2*ct1+fz2*st1
hz2 =-fx2*st1+fz2*ct1
sqpr= np.sqrt(1/p1**2+1/r3**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx3 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy3 = expr/p1*syp*(z1*czr+x1/r3*szr/sqpr)
fz3 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx3 = fx3*ct1+fz3*st1
hz3 =-fx3*st1+fz3*ct1
# i=2:
sqpr= np.sqrt(1/p2**2+1/r1**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx4 =-sqpr*expr*cyp*szr
hy4 = expr/p2*syp*szr
fz4 =-expr*cyp/r1*czr
hx4 = fx4*ct1+fz4*st1
hz4 =-fx4*st1+fz4*ct1
sqpr= np.sqrt(1/p2**2+1/r2**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx5 =-sqpr*expr*cyp*szr
hy5 = expr/p2*syp*szr
fz5 =-expr*cyp/r2*czr
hx5 = fx5*ct1+fz5*st1
hz5 =-fx5*st1+fz5*ct1
sqpr= np.sqrt(1/p2**2+1/r3**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx6 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy6 = expr/p2*syp*(z1*czr+x1/r3*szr/sqpr)
fz6 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx6 = fx6*ct1+fz6*st1
hz6 =-fx6*st1+fz6*ct1
# i=3:
sqpr= np.sqrt(1/p3**2+1/r1**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx7 =-sqpr*expr*cyp*szr
hy7 = expr/p3*syp*szr
fz7 =-expr*cyp/r1*czr
hx7 = fx7*ct1+fz7*st1
hz7 =-fx7*st1+fz7*ct1
sqpr= np.sqrt(1/p3**2+1/r2**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx8 =-sqpr*expr*cyp*szr
hy8 = expr/p3*syp*szr
fz8 =-expr*cyp/r2*czr
hx8 = fx8*ct1+fz8*st1
hz8 =-fx8*st1+fz8*ct1
sqpr= np.sqrt(1/p3**2+1/r3**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx9 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy9 = expr/p3*syp*(z1*czr+x1/r3*szr/sqpr)
fz9 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx9 = fx9*ct1+fz9*st1
hz9 =-fx9*st1+fz9*ct1
a1=a[0]+a[1]*cps
a2=a[2]+a[3]*cps
a3=a[4]+a[5]*cps
a4=a[6]+a[7]*cps
a5=a[8]+a[9]*cps
a6=a[10]+a[11]*cps
a7=a[12]+a[13]*cps
a8=a[14]+a[15]*cps
a9=a[16]+a[17]*cps
bx=a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
# make the terms in the 2nd sum ("parallel" symmetry):
# i=1
sqqs= np.sqrt(1/q1**2+1/s1**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx1 =-sqqs*exqs*cyq*czs *sps
hy1 = exqs/q1*syq*czs *sps
fz1 = exqs*cyq/s1*szs *sps
hx1 = fx1*ct2+fz1*st2
hz1 =-fx1*st2+fz1*ct2
sqqs= | np.sqrt(1/q1**2+1/s2**2) | numpy.sqrt |
import os
import numpy as np
import random
from shapely.geometry import Polygon, MultiPolygon, LineString, MultiLineString, Point
from shapely.ops import polygonize, cascaded_union
from scipy.spatial.qhull import Delaunay
from crowddynamics.core.distance import distance_circle_line
from crowddynamics.simulation.agents import Agents, AgentGroup, Circular
from crowddynamics.core.geometry import geom_to_linear_obstacles
from crowddynamics.core.sampling import triangle_area_cumsum, random_sample_triangle
from crowddynamics.core.vector2D import length
from crowddynamics.core.distance import distance_circle_line, distance_circles
from finlandia_talo import FinlandiaTalo2ndFloor, FinlandiaTalo2ndFloorField
# Import Finlandia Hall floor field
field = FinlandiaTalo2ndFloorField()
# Import obstacles
obstacles = field.obstacles
# Minimal radius of a leader
max_r = 0.27
# Number of guides
n_guides = 10
# Number of times spawned leaders are allowed to overlap each other before the program is
# terminated.
#overlaps = n_guides * 20
overlaps = 10000
# Bound box representing the room. Used later in making Voronoi tessalation.
width = 150
height = 70
boundbox = Polygon([(0, 0), (0, height), (width, height), (width, 0)])
# Create a grid structure over the room geometry.
# Cell size in the grid, determines the resolution of the micro-macro converted data
cell_size = 10
m = np.round(width / cell_size)
n = np.round(height / cell_size)
m = m.astype(int)
n = n.astype(int)
X = np.linspace(0, width, m + 1)
Y = np.linspace(0, height, n + 1)
hlines = [((x1, yi), (x2, yi)) for x1, x2 in zip(X[:-1], X[1:]) for yi in Y]
vlines = [((xi, y1), (xi, y2)) for y1, y2 in zip(Y[:-1], Y[1:]) for xi in X]
grids = list(polygonize(MultiLineString(hlines + vlines)))
# Number of cells
n_cells = len(grids)
# Load followers positions and radius
agents = np.load('agents_initialization_conference.npy')
positions = agents['position']
radii = agents['radius']
# Guides' spawn areas (shapely polygons)
guide_spawns = []
# Leader's spawn points
spawn_points = []
# Guides' spawn areas (cell numbers) (that intersect with the hexagon)
cells = []
# Check which cells intersect with the Finlandia floor field
for i in range(n_cells):
print(i)
cell = i
polygons = []
for j in range(8):
poly = field.spawns[j].intersection(grids[cell])
if not poly.is_empty:
polygons.append(poly)
spawn_poly = cascaded_union(polygons)
if not spawn_poly.is_empty:
guide_spawns.append(spawn_poly)
cells.append(cell)
print(cells)
# Loop through all the feasible cells and check if 10 guides can be positioned to them.
for i in range(len(guide_spawns)):
print(cells[i])
spawn_points = []
for j in range(n_guides):
n_spawnpoints = len(spawn_points)
geom = guide_spawns[i] - obstacles.buffer(max_r)
k = 0 # set overlaps counter to zero (the total number of overlaps, when positioning all guides)
if isinstance(geom, MultiPolygon):
n_polygons = len(geom)
for l in range(n_polygons):
vertices = np.asarray(geom[l].convex_hull.exterior)
delaunay = Delaunay(vertices)
mesh = vertices[delaunay.simplices]
if l == 0:
meshes = mesh
else:
meshes = np.concatenate((mesh, meshes), axis=0)
# Computes cumulative sum of the areas of the triangle mesh.
weights = triangle_area_cumsum(meshes)
weights /= weights[-1]
while k < overlaps:
distances = [] # temporarily store distances from the spawned point to the previously spawned
# During a single spawn, the number of times the guide overlaps with an obstacle/guide
n_overlaps = 0
# Spawn a random point for the guide.
x = np.random.random()
rand_triangle = np.searchsorted(weights, x)
a, b, c = meshes[rand_triangle]
spawn_point = random_sample_triangle(a, b, c)
#print(spawn_point)
if n_spawnpoints != 0: # if there are no other spawned guides skip this step
for l in range(0, n_spawnpoints):
d = length(spawn_point - spawn_points[l])
h = d - 2 * max_r
distances.append(h)
distances_array = distances
distances_array = np.asarray(distances_array)
n_overlaps += len(np.where(distances_array < 0)[0])
for obstacle in obstacles:
obstacle = list(obstacle.coords)
n_obstacle_points = len(obstacle)
for l in range(0, n_obstacle_points):
if l == n_obstacle_points - 1:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[l]),
np.asarray(obstacle[0]))
else:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[l]),
np.asarray(obstacle[l + 1]))
if h < 0.0:
n_overlaps += 1
for agent in range(len(radii)):
#print(positions[agent])
#print(radii[agent])
#print(spawn_point)
#print(max_r)
h, _ = distance_circles(positions[agent], radii[agent], spawn_point, max_r)
if h < 0.0:
n_overlaps += 1
if n_overlaps == 0:
# Append the point to spawn points
print("{}{}{}".format('Leader number ', j+1, ' fits in the cell'))
spawn_points.append([spawn_point[0], spawn_point[1]])
break
k += 1
if k == overlaps:
print("{}{}{}".format('Leader number ', j+1, ' does not fit in the cell'))
break
else:
vertices = np.asarray(geom.convex_hull.exterior)
delaunay = Delaunay(vertices)
mesh = vertices[delaunay.simplices]
weights = triangle_area_cumsum(mesh)
weights /= weights[-1]
while k < overlaps:
distances = [] # temporarily store distances from the spawned point to the previously spawned
n_overlaps = 0 # for each attempt to position the guide, set number of overlaps to zero
# Spawn a random point for the guide
x = np.random.random()
rand_triangle = np.searchsorted(weights, x)
a, b, c = mesh[rand_triangle]
spawn_point = random_sample_triangle(a, b, c)
#print(spawn_point)
if n_spawnpoints != 0:
for l in range(0, n_spawnpoints):
d = length(spawn_point - spawn_points[l])
h = d - 2 * max_r
distances.append(h)
distances_array = distances
distances_array = np.asarray(distances_array)
n_overlaps += len(np.where(distances_array < 0)[0])
for obstacle in obstacles:
obstacle = list(obstacle.coords)
n_obstacle_points = len(obstacle)
for l in range(0, n_obstacle_points):
if l == n_obstacle_points - 1:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[l]),
np.asarray(obstacle[0]))
else:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[l]),
| np.asarray(obstacle[l + 1]) | numpy.asarray |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import os
import numpy as np
import tensorflow as tf
from l2l import util
from l2l.tools.custom_tst import *
from l2l.datasets import *
from l2l.experiments.rnn_clever_recursive import *
class TestRnnCleverRecursive(CustomTest):
def setUp(self):
return super().setUp()
def test_call_optimizer_twice(self):
cells = get_optimizer()
cells_2 = get_optimizer()
self.assertTrue(True)
tf.reset_default_graph()
def test_run_optimizer_build(self):
# we only build the graph, but never run it
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
y = tf.placeholder(dtype=tf.int32, shape=[None, 10])
coordinates, _ = util.get_variables(get_model_loss, {'x': x,
'y': y,
'custom_variables': None})
loss = get_model_loss(x, y, custom_variables=coordinates)
grads = util.get_grads(loss, coordinates)
optimizer = get_optimizer()
hidden = util.get_lstm_state_tuples(optimizer, grads[0])
output, state = optimizer(util.reshape_inputs(grads[0]), hidden)
self.assertTrue(True)
tf.reset_default_graph()
def test_run_optimizer(self):
# we just build the graph here
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
y = tf.placeholder(dtype=tf.int32, shape=[None, 10])
coordinates, _ = util.get_variables(get_model_loss, {'x': x,
'y': y,
'custom_variables': None})
loss = get_model_loss(x, y, custom_variables=coordinates)
grads = util.get_grads(loss, coordinates)
output, state, delta = run_optimizer(grads[0])
self.assertTrue(True)
tf.reset_default_graph()
def test_run_optimizer_different_states(self):
# test if the graph can be built without any
# errors
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
y = tf.placeholder(dtype=tf.int32, shape=[None, 10])
coordinates, _ = util.get_variables(get_model_loss, {'x': x,
'y': y,
'custom_variables': None})
loss = get_model_loss(x, y, custom_variables=coordinates)
grads = util.get_grads(loss, coordinates)
outputs, states, delta = run_optimizer_for_different_states(grads)
self.assertTrue(True)
tf.reset_default_graph()
def test_run_loop(self):
# build the complete running loop graph
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
y = tf.placeholder(dtype=tf.int32, shape=[None, 10])
input_arguments = {'x': x, 'y': y}
coordinates, _ = util.get_variables(get_model_loss, {'x': x,
'y': y,
'custom_variables': None})
model_loss = get_model_loss(x, y, custom_variables=coordinates)
loop_loss = run_loop(input_arguments, coordinates)
self.assertTrue(True)
def test_run_one_iteration_loop(self):
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
y = tf.placeholder(dtype=tf.int32, shape=[None, 10])
input_arguments = {'x': x, 'y': y}
coordinates, constants = util.get_variables(get_model_loss, {'x': x,
'y': y,
'custom_variables': None})
loss, reset_loop_vars, update_op, final_vars, final_loss = run_loop(
input_arguments, coordinates, constants)
with tf.name_scope("reset"):
vars = coordinates
if len(constants) > 0:
vars = | np.concatenate(coordinates, constants) | numpy.concatenate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 14:07:02 2019
@author: marcoaqil
"""
import numpy as np
from psychopy import visual
from psychopy import tools
class ApertureStim(object):
def __init__(self, session):
self.session = session
self.session.win.allowStencil = True
self.aperture_factor = self.session.settings['PRF stimulus settings'].get('aperture_factor_of_prf_size')
self.aperture_stimulus = visual.Aperture(win=self.session.win,
size=(self.session.size_prf_pix*self.aperture_factor,
self.session.size_prf_pix*self.aperture_factor),
pos=(self.session.x_loc_pix,
self.session.y_loc_pix))
self.aperture_stimulus.enabled = False
def draw(self):
pass
# self.aperture_stimulus.enabled = True
class PRFStim(object):
def __init__(self, session,
squares_in_bar=2 ,
bar_width_deg=1.25,
tex_nr_pix=2048,
flicker_frequency=6,
**kwargs):
self.session = session
self.squares_in_bar = squares_in_bar
self.bar_width_deg = bar_width_deg
self.tex_nr_pix = tex_nr_pix
self.flicker_frequency = flicker_frequency
#calculate the bar width in pixels, with respect to the texture
self.bar_width_in_pixels = tools.monitorunittools.deg2pix(bar_width_deg, self.session.monitor)*self.tex_nr_pix/self.session.win.size[1]
#construct basic space for textures
bar_width_in_radians = np.pi*self.squares_in_bar
bar_pixels_per_radian = bar_width_in_radians/self.bar_width_in_pixels
pixels_ls = np.linspace((-self.tex_nr_pix/2)*bar_pixels_per_radian,(self.tex_nr_pix/2)*bar_pixels_per_radian,self.tex_nr_pix)
tex_x, tex_y = np.meshgrid(pixels_ls, pixels_ls)
#construct textues, alsoand making sure that also the single-square bar is centered in the middle
if squares_in_bar==1:
self.sqr_tex = np.sign(np.sin(tex_x-np.pi/2) * np.sin(tex_y))
self.sqr_tex_phase_1 = np.sign(np.sin(tex_x-np.pi/2) * np.sin(tex_y+np.sign(np.sin(tex_x-np.pi/2))*np.pi/4))
self.sqr_tex_phase_2 = np.sign(np.sign(np.abs(tex_x-np.pi/2)) * np.sin(tex_y+np.pi/2))
else:
self.sqr_tex = np.sign(np.sin(tex_x) * np.sin(tex_y))
self.sqr_tex_phase_1 = np.sign(np.sin(tex_x) * np.sin(tex_y+np.sign(np.sin(tex_x))*np.pi/4))
self.sqr_tex_phase_2 = np.sign(np.sign(np.abs(tex_x)) * np.sin(tex_y+np.pi/2))
bar_start_idx=int(np.round(self.tex_nr_pix/2-self.bar_width_in_pixels/2))
bar_end_idx=int(bar_start_idx+self.bar_width_in_pixels)+1
self.sqr_tex[:,:bar_start_idx] = 0
self.sqr_tex[:,bar_end_idx:] = 0
self.sqr_tex_phase_1[:,:bar_start_idx] = 0
self.sqr_tex_phase_1[:,bar_end_idx:] = 0
self.sqr_tex_phase_2[:,:bar_start_idx] = 0
self.sqr_tex_phase_2[:,bar_end_idx:] = 0
#construct stimuli with psychopy and textures in different position/phases
self.checkerboard_1 = visual.GratingStim(self.session.win,
tex=self.sqr_tex,
units='pix',
size=[self.session.win.size[1],self.session.win.size[1]])
self.checkerboard_2 = visual.GratingStim(self.session.win,
tex=self.sqr_tex_phase_1,
units='pix',
size=[self.session.win.size[1],self.session.win.size[1]])
self.checkerboard_3 = visual.GratingStim(self.session.win,
tex=self.sqr_tex_phase_2,
units='pix',
size=[self.session.win.size[1],self.session.win.size[1]])
#for reasons of symmetry, some stimuli (4 and 8 in the order) are generated differently if the bar has only one square
if self.squares_in_bar!=1:
self.checkerboard_4 = visual.GratingStim(self.session.win,
tex=np.fliplr(self.sqr_tex_phase_1),
units='pix',
size=[self.session.win.size[1],self.session.win.size[1]])
self.checkerboard_8 = visual.GratingStim(self.session.win,
tex=-np.fliplr(self.sqr_tex_phase_1),
units='pix',
size=[self.session.win.size[1],self.session.win.size[1]])
else:
self.checkerboard_4 = visual.GratingStim(self.session.win,
tex= | np.flipud(self.sqr_tex_phase_1) | numpy.flipud |
'''
Created on December 2019.
@author: <NAME> <<EMAIL>>
https://github.com/tayebiarasteh/
'''
import numpy as np
from Layers.Base import *
class ReLU(base_layer):
def __init__(self):
super().__init__()
pass
def forward(self, input_tensor):
'''
returns the input_tensor for the next layer.
'''
self.input_tensor = input_tensor
return np.maximum(0, input_tensor) #element-wise
def backward(self, error_tensor):
'''
returns the error_tensor for the next layer.
'''
return | np.where(self.input_tensor > 0, error_tensor, 0) | numpy.where |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_basic_agents.ipynb (unless otherwise specified).
__all__ = ['ActionSelector', 'ArgmaxActionSelector', 'EpsilonGreedyActionSelector', 'ProbabilityActionSelector',
'default_states_preprocessor', 'float32_preprocessor', 'BaseAgent', 'TestAgent', 'DiscreteAgent', 'DQNAgent',
'TargetNet', 'PolicyAgent', 'ActorCriticAgent']
# Cell
import torch, torch.nn.functional as F
from torch import ByteTensor, DoubleTensor, FloatTensor, HalfTensor, LongTensor, ShortTensor, Tensor
from torch import nn, optim, as_tensor
from torch.utils.data import BatchSampler, DataLoader, Dataset, Sampler, TensorDataset
from torch.nn.utils import weight_norm, spectral_norm
from dataclasses import asdict,dataclass
from typing import Callable,Tuple,Union
# from fastai.torch_core import *
# from fastai.basic_data import *
# from fastai.basic_train import *
from fastai.basics import *
import textwrap
import numpy as np
import logging
"Note these are modified versions of 'Shmuma/Ptan'. Github, 2020, https://github.com/Shmuma/ptan/blob/master/ptan/agent.py. Accessed 13 June 2020."
# Cell
class ActionSelector:
"Abstract class which converts scores to the actions."
def __call__(self,scores):raise NotImplementedError
class ArgmaxActionSelector(ActionSelector):
"Selects actions using argmax."
def __call__(self,scores):
assert isinstance(scores,np.ndarray)
return np.argmax(scores,axis=1)
@dataclass
class EpsilonGreedyActionSelector(ActionSelector):
epsilon:float=0.05
selector:ActionSelector=ArgmaxActionSelector()
def __call__(self,scores):
assert isinstance(scores,np.ndarray)
bs,n_a=scores.shape
a=self.selector(scores)
mask=np.random.random(size=bs)<self.epsilon
rand_a=np.random.choice(n_a, sum(mask))
a[mask]=rand_a
return a
class ProbabilityActionSelector(ActionSelector):
"Converts probabilities of actions into action by sampling them."
def __call__(self,probs):
assert isinstance(probs,np.ndarray)
actions=[np.random.choice(len(prob),p=prob) for prob in probs]
return np.array(actions)
# Cell
def default_states_preprocessor(s,dtype=np.float32):
"Convert list of states into the form suitable for model. By default we assume Variable."
np_s=np.expand_dims(s,0) if len(np.array(s).shape)==1 else | np.array(s, copy=False) | numpy.array |
import stk
import numpy as np
import stko
import os
def main():
examples_output = 'aligner_directory'
if not os.path.exists(examples_output):
os.mkdir(examples_output)
bb1 = stk.BuildingBlock('NCCN', [stk.PrimaryAminoFactory()])
bb2 = stk.BuildingBlock(
smiles='O=CC(C=O)C=O',
functional_groups=[stk.AldehydeFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
(bb1, bb2), optimizer=stk.MCHammer(num_steps=2000),
),
)
mol_list = [
(stk.BuildingBlock('NCCNCCN'), (('N', 'N'), ), True),
(
stk.BuildingBlock('CN1C=NC2=C1C(=O)N(C(=O)N2C)C'),
(('N', 'N'), ('O', 'O'), ),
True,
),
(stk.BuildingBlock('C1=CN=CN=C1'), (('N', 'N'), ), True),
(stk.BuildingBlock('c1ccccc1'), (('C', 'C'), ), True),
(stk.BuildingBlock('C1CCCCC1'), (('C', 'C'), ), True),
(cage, (('N', 'N'), ), True),
]
_opt = stko.UFF()
for i, (mol, pairs, uff_opt) in enumerate(mol_list):
initial = mol.with_rotation_about_axis(
1.34, np.array((0, 0, 1)), | np.array((0, 0, 0)) | numpy.array |
import numpy as np
from astropy.io import fits
from astropy.table import Table, vstack
from astropy.wcs import WCS
import os
import argparse
import logging, traceback
import time
import pandas as pd
from copy import copy, deepcopy
from config import solid_angle_dpi_fname, rt_dir
from sqlite_funcs import get_conn, make_timeIDs
from wcs_funcs import world2val
from event2dpi_funcs import det2dpis, mask_detxy
from models import Bkg_Model_wFlatA, CompoundModel,\
im_dist, Point_Source_Model_Binned_Rates, Source_Model_InOutFoV
from flux_models import Plaw_Flux, Cutoff_Plaw_Flux
from gti_funcs import add_bti2gti, bti2gti, gti2bti, union_gtis
from ray_trace_funcs import RayTraces
from coord_conv_funcs import convert_radec2imxy, convert_imxy2radec, imxy2theta_phi, theta_phi2imxy
from LLH import LLH_webins
from minimizers import NLLH_ScipyMinimize_Wjacob, NLLH_ScipyMinimize
from do_llh_inFoV4realtime2 import do_scan_around_peak, find_peaks2scan, parse_bkg_csv
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--evfname', type=str,\
help="Event data file",
default='filter_evdata.fits')
parser.add_argument('--dmask', type=str,\
help="Detmask fname",
default='detmask.fits')
parser.add_argument('--job_id', type=int,\
help="ID to tell it what seeds to do",\
default=-1)
parser.add_argument('--Njobs', type=int,\
help="Total number of jobs submitted",\
default=64)
parser.add_argument('--Ntrials', type=int,\
help="Number of trials to run",\
default=8)
parser.add_argument('--Afact', type=float,\
help="A factor to use",\
default=1.0)
parser.add_argument('--theta', type=float,\
help="Theta to sim at",\
default=0.0)
parser.add_argument('--phi', type=float,\
help="phi to sim at",\
default=0.0)
parser.add_argument('--trig_time', type=float,\
help="trigger time to center sims at",\
default=0.0)
parser.add_argument('--dbfname', type=str,\
help="Name to save the database to",\
default=None)
parser.add_argument('--pcfname', type=str,\
help="partial coding file name",\
default='pc_2.img')
parser.add_argument('--bkg_fname', type=str,\
help="Name of the file with the bkg fits",\
default='bkg_estimation.csv')
parser.add_argument('--log_fname', type=str,\
help="Name for the log file",\
default='sim_and_min')
args = parser.parse_args()
return args
def mk_sim_evdata(sim_mod, sim_params, dur, tstart):
# first make sim DPIs
# then make an event for each count
# so each event will have the DETX, DETY and
# it can just be given the energy of middle of the ebin
# then can assign times with just a uniform distribution
# from tstart to tstop
# then sort the events and return the Table
col_names = ['TIME', 'DET_ID', 'EVENT_FLAGS', 'PHA', 'DETX', 'DETY',\
'PI', 'ENERGY']
# only need to assign, TIME, DETX, and DETY
# make all flags 0, and the rest don't matter
tab = Table(names=['DETX', 'DETY', 'ENERGY'], dtype=(np.int, np.int, np.float))
ebins0 = sim_mod.ebins0
ebins1 = sim_mod.ebins1
rate_dpis = sim_mod.get_rate_dpis(sim_params)
sim_dpis = np.random.poisson(lam=(rate_dpis*dur))
for ebin, sim_dpi in enumerate(sim_dpis):
simdpi = np.zeros_like(sim_mod.bl_dmask, dtype=np.int)
simdpi[sim_mod.bl_dmask] = sim_dpi
detys, detxs = np.where(simdpi>0)
emid = (ebins1[ebin]+ebins0[ebin])/2.
for jj in range(len(detys)):
dety = detys[jj]
detx = detxs[jj]
for ii in range(simdpi[dety,detx]):
row = (detx, dety, emid)
tab.add_row(row)
tab['TIME'] = dur*np.random.random(size=len(tab)) + tstart
tab['DET_ID'] = np.zeros(len(tab), dtype=np.int)
tab['PHA'] = np.ones(len(tab), dtype=np.int)
tab['EVENT_FLAGS'] = np.zeros(len(tab), dtype=np.int)
tab['PI'] = np.rint(tab['ENERGY']*10).astype(np.int)
tab.sort(keys='TIME')
return tab
def analysis_for_imxy_square(imx0, imx1, imy0, imy1, bkg_bf_params_list,\
bkg_mod, sig_mod, ev_data,\
ebins0, ebins1, tbins0, tbins1,\
timeIDs, TS2keep=4.5,\
max_frac2keep=0.75, minTS2scan=6.0):
bl_dmask = bkg_mod.bl_dmask
# dimxy = 0.0025
dimxy = np.round(imx1 - imx0, decimals=4)
imstep = 0.003
imxstep = 0.004
# imx_ax = np.arange(imx0, imx1+dimxy/2., dimxy)
# imy_ax = np.arange(imy0, imy1+dimxy/2., dimxy)
# imxg,imyg = np.meshgrid(imx_ax, imy_ax)
# imx_ax = np.arange(imx0, imx1, imxstep)
# imy_ax = np.arange(imy0, imy1, imstep)
imx_ax = np.arange(0, dimxy, imxstep)
imy_ax = np.arange(0, dimxy, imstep)
imxg,imyg = np.meshgrid(imx_ax, imy_ax)
bl = np.isclose((imyg*1e4).astype(np.int)%int(imstep*2*1e4),0)
imxg[bl] += imxstep/2.
imxs = np.ravel(imxg) + imx0
imys = np.ravel(imyg) + imy0
Npnts = len(imxs)
print(Npnts)
logging.info("%d imxy points to do" %(Npnts))
thetas, phis = imxy2theta_phi(imxs, imys)
gamma_ax = np.linspace(-0.4, 1.6, 8+1)
gamma_ax = np.linspace(-0.4, 1.6, 4+1)[1:-1]
# gamma_ax = np.array([0.4, 0.9])
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[1:-1]
Epeak_ax = np.logspace(np.log10(45.0), 3, 4+1)[1:-1]
# Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[3:]
logging.info("Epeak_ax: ")
logging.info(Epeak_ax)
logging.info("gammas_ax: ")
logging.info(gamma_ax)
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
Nspec_pnts = len(Epeaks)
ntbins = len(tbins0)
# rt_obj = RayTraces(rt_dir)
# fp_obj = FootPrints(fp_dir)
# sig_mod = Source_Model_InOutFoV(flux_mod, [ebins0,ebins1], bl_dmask,\
# rt_obj, use_deriv=True)
# sig_mod.set_theta_phi(np.mean(thetas), np.mean(phis))
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob('')
tmin = np.min(tbins0)
tmax = np.max(tbins1)
if (tmax - tmin) > 40.0:
logging.debug("tmax - tmin > 40.0s, using twinds for tbl")
gti_dict = {'START':tbins0,'STOP':tbins1}
gti_twinds = Table(data=gti_dict)
gtis = union_gtis([gti_twinds])
tbl = mk_gti_bl(ev_data['TIME'], gtis, time_pad=0.1)
logging.debug("np.sum(tbl): %d"%(np.sum(tbl)))
else:
tbl = (ev_data['TIME']>=(tmin-1.0))&(ev_data['TIME']<(tmax+1.0))
logging.debug("np.sum(tbl): %d"%(np.sum(tbl)))
sig_llh_obj = LLH_webins(ev_data[tbl], ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {'A':1.0, 'gamma':0.5, 'Epeak':1e2}
bkg_name = bkg_mod.name
pars_ = {}
pars_['Signal_theta'] = np.mean(thetas)
pars_['Signal_phi'] = np.mean(phis)
for pname,val in bkg_bf_params_list[0].items():
# pars_['Background_'+pname] = val
pars_[bkg_name+'_'+pname] = val
for pname,val in flux_params.items():
pars_['Signal_'+pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = list(pars_.keys())
fixed_vals = list(pars_.values())
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(['Signal_A'], fixed=False)
res_dfs_ = []
for ii in range(Npnts):
print(imxs[ii], imys[ii])
print(thetas[ii], phis[ii])
sig_miner.set_fixed_params(['Signal_theta', 'Signal_phi'],\
values=[thetas[ii],phis[ii]])
res_dfs = []
for j in range(Nspec_pnts):
flux_params['gamma'] = gammas[j]
flux_params['Epeak'] = Epeaks[j]
sig_mod.set_flux_params(flux_params)
res_dict = {}
res_dict['Epeak'] = Epeaks[j]
res_dict['gamma'] = gammas[j]
nllhs = np.zeros(ntbins)
As = np.zeros(ntbins)
for i in range(ntbins):
parss_ = {}
for pname,val in bkg_bf_params_list[i].items():
# pars_['Background_'+pname] = val
parss_[bkg_name+'_'+pname] = val
sig_miner.set_fixed_params(list(parss_.keys()), values=list(parss_.values()))
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
try:
pars, nllh, res = sig_miner.minimize()
As[i] = pars[0][0]
nllhs[i] = nllh[0]
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to minimize seed: ")
logging.error((imxs[ii],imys[ii]))
logging.error((timeIDs[i]))
nllhs[i] = np.nan
# print "res: "
# print res
res_dict['nllh'] = nllhs
res_dict['A'] = As
res_dict['time'] = np.array(tbins0)
res_dict['dur'] = np.array(tbins1)-np.array(tbins0)
res_dict['timeID'] = np.array(timeIDs)
res_dict['theta'] = thetas[ii]
res_dict['phi'] = phis[ii]
res_dict['imx'] = imxs[ii]
res_dict['imy'] = imys[ii]
res_dfs.append(pd.DataFrame(res_dict))
# logging.info("Done with spec %d of %d" %(j+1,Nspec_pnts))
res_df = pd.concat(res_dfs, ignore_index=True)
bkg_nllhs = np.zeros(len(res_df))
bkg_bf_param_dict = {}
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
for pname,val in bkg_bf_params_list[i].items():
pars_[bkg_name+'_'+pname] = val
bkg_bf_param_dict[timeIDs[i]] = bkg_bf_params_list[i]
pars_['Signal_theta'] = thetas[ii]
pars_['Signal_phi'] = phis[ii]
pars_['Signal_A'] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
bl = np.isclose(res_df['time']-t0,t0-t0)& | np.isclose(res_df['dur'],dt) | numpy.isclose |
from db_query import DBQuery
import numpy as np
from utils import convert_list_to_dict
from dialogue_config import all_intents, all_slots, usersim_default_key
import copy
class StateTracker:
"""Tracks the state of the episode/conversation and prepares the state representation for the agent.
Theo dõi state của tập / hội thoại và chuẩn bị biểu diễn trạng thái cho agent."""
def __init__(self, database, constants):
"""
The constructor of StateTracker.
Hàm khởi tạo của StateTracker.
The constructor of StateTracker which creates a DB query object, creates necessary state rep. dicts, etc. and
calls reset.
Hàm tạo của StateTracker tạo đối tượng truy vấn DB, tạo dict đại diện trạng thái cần thiết, v.v. và đặt lại cuộc gọi.
Parameters:
database (dict): The database with format dict(long: dict)
constants (dict): Loaded constants in dict
"""
self.db_helper = DBQuery(database)
self.match_key = usersim_default_key
self.intents_dict = convert_list_to_dict(all_intents)
self.num_intents = len(all_intents)
self.slots_dict = convert_list_to_dict(all_slots)
self.num_slots = len(all_slots)
self.max_round_num = constants['run']['max_round_num']
self.none_state = np.zeros(self.get_state_size())
self.reset()
def get_state_size(self):
"""Returns the state size of the state representation used by the agent."""
# Trả về kích thước trạng thái của biểu diễn trạng thái được agent sử dụng.
return 2 * self.num_intents + 7 * self.num_slots + 3 + self.max_round_num
def reset(self):
"""Resets current_informs, history and round_num."""
# Đặt lại current_informs, history và round_num
self.current_informs = {}
# A list of the dialogues (dicts) by the agent and user so far in the conversation
# Danh sách các cuộc đối thoại (phân đoạn) của agent và user cho đến nay trong cuộc trò chuyện
self.history = []
self.round_num = 0
def print_history(self):
"""Helper function if you want to see the current history action by action."""
"""Hàm trợ giúp nếu bạn muốn xem lịch sử hiện tại của từng hành động."""
for action in self.history:
print(action)
def get_state(self, done=False):
"""
Returns the state representation as a numpy array which is fed into the agent's neural network.
Trả về biểu diễn trạng thái dưới dạng một mảng numpy được đưa vào mạng nơ-ron của agent.
The state representation contains useful information for the agent about the current state of the conversation.
Biểu diễn trạng thái chứa thông tin hữu ích cho agent về trạng thái hiện tại của cuộc hội thoại.
Processes by the agent to be fed into the neural network. Ripe for experimentation and optimization.
Các tiến trình của agent sẽ được đưa vào mạng nơ-ron. Đã chín muồi (đã đc đào tạo hoàn thiện) để thử nghiệm và tối ưu hóa.
Parameters:
done (bool): Indicates whether this is the last dialogue in the episode/conversation. Default: False
Cho biết đây có phải là đoạn hội thoại cuối cùng trong tập / cuộc hội thoại hay không. Mặc định: Sai
Returns:
numpy.array: A numpy array of shape (state size,)
"""
# If done then fill state with zeros
# Nếu done = True thì điền trạng thái bằng không
if done:
return self.none_state
user_action = self.history[-1] # user action là hành động cuối cùng trong lịch sử
# Nhận thông tin database hữu ích cho agent
db_results_dict = self.db_helper.get_db_results_for_slots(self.current_informs)
# hành động cuối của agent là hành động thứ 2 tính từ cuối list lịch sử nếu list dài hơn 1
last_agent_action = self.history[-2] if len(self.history) > 1 else None
# Create one-hot of intents to represent the current user action
# Tạo một nhóm các ý định để biểu diễn hành động hiện tại của người dùng
user_act_rep = np.zeros((self.num_intents,)) # self.num_intents là tổng số tất cả các ý định khác nhau được xác định trong config.py
# self.intents_dict là một dict có các key của các ý định và giá trị của chỉ mục của chúng trong danh sách các ý định
user_act_rep[self.intents_dict[user_action['intent']]] = 1.0
# giá trị tại chỉ số của ý định của user action trong mảng biểu diễn user action = 1
# Create bag of inform slots representation to represent the current user action
# Tạo túi biểu diễn inform slot để biểu diễn hành động người dùng hiện tại
user_inform_slots_rep = np.zeros((self.num_slots,))
for key in user_action['inform_slots'].keys():
user_inform_slots_rep[self.slots_dict[key]] = 1.0 # self.slots_dict giống như self.intents_dict ngoại trừ việc có slot
# giá trị tại chỉ số của key của hành động inform của user trong mảng biểu diễn user inform = 1
# vd: key = city => value của 'city' trong slots_dict = 3(tra file config.py) => user_inform_slots_rep[3] = 1.0
# Create bag of request slots representation to represent the current user action
# Tạo túi biểu diễn request slot để biểu diễn hành động người dùng hiện tại
user_request_slots_rep = np.zeros((self.num_slots,))
for key in user_action['request_slots'].keys():
user_request_slots_rep[self.slots_dict[key]] = 1.0
# giá trị tại chỉ số của key của hành động request của user trong mảng biểu diễn user request = 1
# Create bag of filled_in slots based on the current_slots
# Tạo túi của filled_in slots dựa trên current_slots
current_slots_rep = np.zeros((self.num_slots,))
for key in self.current_informs:
current_slots_rep[self.slots_dict[key]] = 1.0
# giá trị tại chỉ số của key của current inform trong mảng biểu diễn current slot = 1
# Encode last agent intent
# Mã hóa ý định agent cuối cùng
agent_act_rep = np.zeros((self.num_intents,))
if last_agent_action:
agent_act_rep[self.intents_dict[last_agent_action['intent']]] = 1.0
# Encode last agent inform slots
agent_inform_slots_rep = | np.zeros((self.num_slots,)) | numpy.zeros |
import tensorflow as tf
import numpy as np
np.random.seed(1234)
import os
import time
import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from builddata import *
from model_R_MeN_TripleCls_v1 import RMeN
# Parameters
# ==================================================
parser = ArgumentParser("RMeN", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--data", default="./data/", help="Data sources.")
parser.add_argument("--run_folder", default="../", help="Data sources.")
parser.add_argument("--name", default="WN11", help="Name of the dataset.")
parser.add_argument("--embedding_dim", default=50, type=int, help="Dimensionality of character embedding")
parser.add_argument("--learning_rate", default=0.0001, type=float, help="Learning rate")
parser.add_argument("--batch_size", default=8, type=int, help="Batch Size")
parser.add_argument("--num_epochs", default=50, type=int, help="Number of training epochs")
parser.add_argument("--saveStep", default=1, type=int, help="")
parser.add_argument("--neg_ratio", default=1.0, type=float, help="Number of negative triples generated by positive")
parser.add_argument("--allow_soft_placement", default=True, type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False, type=bool, help="Log placement of ops on devices")
parser.add_argument("--model_name", default='cora_trans', help="")
parser.add_argument("--dropout_keep_prob", default=0.5, type=float, help="Dropout keep probability")
parser.add_argument("--num_heads", default=2, type=int, help="Number of attention heads. 1 2 4")
parser.add_argument("--memory_slots", default=1, type=int, help="Number of memory slots. 1 2 4")
parser.add_argument("--head_size", default=32, type=int, help="")
parser.add_argument("--gate_style", default='memory', help="unit,memory")
parser.add_argument("--attention_mlp_layers", default=2, type=int, help="2 3 4")
parser.add_argument("--use_pos", default=1, type=int, help="1 when using positional embeddings. Otherwise.")
parser.add_argument("--score_function", default="time", help="time,last")
args = parser.parse_args()
print(args)
# Load data
print("Loading data...")
train, valid, test, words_indexes, indexes_words, \
headTailSelector, entity2id, id2entity, relation2id, id2relation = build_data(path=args.data, name=args.name)
data_size = len(train)
train_batch = Batch_Loader(train, words_indexes, indexes_words, headTailSelector, \
entity2id, id2entity, relation2id, id2relation, batch_size=args.batch_size,
neg_ratio=args.neg_ratio)
entity_array = np.array(list(train_batch.indexes_ents.keys()))
print("Using pre-trained model.")
lstEmbed = np.empty([len(words_indexes), args.embedding_dim]).astype(np.float32)
initEnt, initRel = init_norm_Vector(args.data + args.name + '/relation2vec' + str(args.embedding_dim) + '.init',
args.data + args.name + '/entity2vec' + str(args.embedding_dim) + '.init', args.embedding_dim)
for _word in words_indexes:
if _word in relation2id:
index = relation2id[_word]
_ind = words_indexes[_word]
lstEmbed[_ind] = initRel[index]
elif _word in entity2id:
index = entity2id[_word]
_ind = words_indexes[_word]
lstEmbed[_ind] = initEnt[index]
else:
print('*****************Error********************!')
break
lstEmbed = np.array(lstEmbed, dtype=np.float32)
assert len(words_indexes) % (len(entity2id) + len(relation2id)) == 0
#######################
x_valid = []
y_valid = []
with open(args.data + '/' + args.name + '/valid.txt') as f:
lines = f.readlines()
for _, line in enumerate(lines):
sub, obj, rel, val = parse_line(line)
x_valid.append([words_indexes[sub], words_indexes[rel], words_indexes[obj]])
y_valid.append(val)
x_valid = np.array(x_valid).astype(np.int32)
y_valid = np.array(y_valid).astype(np.float32)
x_test = []
y_test = []
with open(args.data + '/' + args.name + '/test.txt') as f:
lines = f.readlines()
for _, line in enumerate(lines):
sub, obj, rel, val = parse_line(line)
x_test.append([words_indexes[sub], words_indexes[rel], words_indexes[obj]])
y_test.append(val)
x_test = np.array(x_test).astype(np.int32)
y_test = | np.array(y_test) | numpy.array |
#!/usr/bin/env python3
#FAST ROLLING HOUGH TRANSFORM
#<NAME>, <NAME>
#-----------------------------------------------------------------------------------------
#Imports
#-----------------------------------------------------------------------------------------
from __future__ import division #Must be first line of code in the file
from __future__ import print_function
from builtins import filter, input, zip, range
from astropy.io import fits
import argparse
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
import scipy.ndimage
import math
import os
import sys
import string
import tempfile
import shutil
import time
import fnmatch
import matplotlib.pyplot as plt
import numpy as np
#-----------------------------------------------------------------------------------------
# Initialization 1 of 3: Calculation Parameters
#-----------------------------------------------------------------------------------------
# Diameter of a 'window' to be evaluated at one time
WLEN = 55
# Fraction (percent) of one angle that must be 'lit up' to be counted
FRAC = 0.70
# Smoothing radius of unsharp mask function
SMR = 15
# Compute the standard RHT (False sets the dRHT)
ORIGINAL = True
#-----------------------------------------------------------------------------------------
# Initialization 2 of 3: Runtime Variable
#-----------------------------------------------------------------------------------------
# Optional Local Files
# Name of Readme file included with this software
README = 'README'
# Output Formatting
# Directory for RHT output
OUTPUT = '.'
if not os.path.isdir(OUTPUT):
os.mkdir(OUTPUT)
# Output format is standard fits. In the future we may support saved numpy arrays.
xyt_format = '.fits'
# String that will be added to the input filename to denote RHT output.
xyt_suffix = '_xyt'
# Limits ouput to 10^DIGITS files per input (_xyt00.fits to _xyt99.fits)
DIGITS = 2
# User Interface
# Width of some displayed text objects
TEXTWIDTH = 70
# Displays a progress bar, at a minor cost to speed.
PROGRESS = True
# Displays information that would be helpful to developers and advanced users.
DEBUG = True
# Allows processing of larger files than RAM alone would allow
# Give the program permission to create a temporary directory for RHT data.
BUFFER = True
# Single precision
DTYPE = np.float32
# Maximum number of bytes allowed for a single buffer file. There can be multiple buffer files.
FILECAP = int(5e8)
# Excluded Data Types
BAD_0 = False
BAD_INF = True
BAD_Neg = False
# Timers #______________Should be put into a timer object
start_time = None
stop_time = None
#-----------------------------------------------------------------------------------------
# Utility Functions
#-----------------------------------------------------------------------------------------
def announcement(strings):
result = ''
if type(strings) == list:
strings.append('*'*TEXTWIDTH)
strings.insert(0, '*'*TEXTWIDTH)
result = '\n'.join(str.center(str(s), TEXTWIDTH, ' ') for s in strings)
elif type(strings) == str:
result = announcement(strings.split('\n'))
else:
result = announcement(str(strings))
return result
def announce(strings):
print(announcement(strings))
def update_progress(progress, message='Progress:', final_message='Finished:'):
# Create progress meter that looks like:
# message + ' ' + '[' + '#'*p + ' '*(length-p) + ']' + time_message
if not PROGRESS:
# Allows time-sensitive jobs to be completed without timing overhead
return
if not 0.0 <= progress <= 1.0:
# Fast fail for values outside the allowed range
raise ValueError('Progress value outside allowed value in update_progress')
#TODO_________________Slow Global Implementation
global start_time
global stop_time
# First call
if 0.0 == progress:
start_time = time.time()
stop_time = None
return
# Second call
elif stop_time is None:
stop_time = start_time + (time.time() - start_time)/progress
# Randomly callable re-calibration
elif np.random.rand() > 0.98:
stop_time = start_time + (time.time() - start_time)/progress
# Normal Call with Progress
sec_remaining = int(stop_time - time.time())
if sec_remaining >= 60:
time_message = ' < ' + str(sec_remaining//60 +1) + 'min'
else:
time_message = ' < ' + str(sec_remaining +1) + 'sec'
length = int(0.55 * TEXTWIDTH)
messlen = TEXTWIDTH-(length+3)-len(time_message)
message = str.ljust(message, messlen)[:messlen]
p = int(length*progress/1.0)
sys.stdout.write('\r{2} [{0}{1}]{3}'.format('#'*p, ' '*(length-p), message, time_message))
sys.stdout.flush()
# Final call
if p == length:
total = int(time.time()-start_time)
if total > 60:
time_message = ' ' + str(total//60) + 'min'
else:
time_message = ' ' + str(total) + 'sec'
final_offset = TEXTWIDTH-len(time_message)
final_message = str.ljust(final_message, final_offset)[:final_offset]
sys.stdout.write('\r{0}{1}'.format(final_message, time_message))
sys.stdout.flush()
start_time = None
stop_time = None
print('')
#-----------------------------------------------------------------------------------------
# Naming Conventions and Converisons
#-----------------------------------------------------------------------------------------
def filename_from_path(filepath):
# Maintains all characters in path except for those after and including the last period
return os.path.basename('.'.join( filepath.split('.')[ 0:filepath.count('.') ] ) )
def xyt_name_factory(filepath, wlen, smr, frac, original):
# Returns the filename that _xyt output should have.
# Will have the general behavior: filename_xyt00.format
# filepath ~ dirname/name.fits
# filename ~ dirname/name
# fnmatch_string ~ name + xyt_suffix + ?? + xyt_format
# Remove RHT-specific endings
filename = filename_from_path(filepath)
if OUTPUT == '.':
dirname = os.path.dirname(os.path.abspath(filepath))
else:
dirname = OUTPUT
fnmatch_string = filename + xyt_suffix + '?'*DIGITS + xyt_format
xyt_files = fnmatch.filter(os.listdir(dirname), fnmatch_string)
xyt_array = [None]*(10**DIGITS)
# Try to find a parameter match among existing files
left = str.find(fnmatch_string, '?')
for x in xyt_files:
abs_x = os.path.join(dirname, x)
if getXYT(abs_x, match_only={'WLEN':wlen, 'SMR':smr, 'FRAC':frac, 'ORIGINAL':original} ): #TODO ______________________________________#print 'Found _xyt file matching your input parameters!'
return os.path.normpath(abs_x)
else:
xyt_array[int( x[left:(left+DIGITS)] )] = x
# Try to find the lowest-numbered name that is unoccupied
for i, y in enumerate(xyt_array):
if y is None:
# Print 'Found _xyt available for these parameters!'
int_string = str.zfill(str(i), DIGITS)[:DIGITS]
xyt_filename = filename+ xyt_suffix+ int_string+ xyt_format
return os.path.normpath(os.path.join(dirname, xyt_filename))
# Failure: No match and no available output slots
xyt_filename = str.replace(fnmatch_string, '?', '0')
print('In xyt_filename(): No existing ouput matches the input parameters and no namespace is available')
print('Overwrite ' + xyt_filename + '?..')
choice = input(' [y]/n/'+'0'*(DIGITS-1)+'x')
if len(choice) == 0 or choice == 'y':
return os.path.normpath(os.path.join(dirname, xyt_filename))
elif choice != 'n':
int_string = str.zfill(str(int(choice)), DIGITS)[:DIGITS]
xyt_filename = filename+ xyt_suffix+ int_string+ xyt_format
return os.path.normpath(os.path.join(dirname, xyt_filename))
else:
raise RuntimeError('In xyt_filename(): No existing ouput matches the input parameters and no namespace is available')
#-----------------------------------------------------------------------------------------
# Image Processing Functions
#-----------------------------------------------------------------------------------------
def is_valid_file(filepath):
'''
filepath: Potentially a string path to a source file for the RHT
return: Boolean, True ONLY when the data might have rht() applied successfully
'''
excluded_file_endings = [] #TODO___More Endings
if any([filepath.endswith(e) for e in excluded_file_endings]):
return False
excluded_file_content = ['_xyt', '_backproj', '_spectrum', '_plot', '_result']
if any([e in filepath for e in excluded_file_content]):
return False
return True
def ntheta_w(w=WLEN):
# Returns the number of theta bins in each Hthet array
# Linearly proportional to wlen
return int(math.ceil( np.pi*(w-1)/np.sqrt(2.0) ))
# Saves the data into the given xyt_filename, depending upon filetype. Supports .fits and .npz currently
def putXYT(filepath, xyt_filename, hi, hj, hthets, wlen, smr, frac, original, backproj=None, compressed=True):
if xyt_filename.endswith('.npz'):
# IMPLEMENTATION1: Zipped Numpy arrays of Data #TODO _______________________________________ALWAYS BE CAREFUL WITH HEADER VARS
if compressed:
save = np.savez_compressed
else:
save = np.savez
if backproj is None:
save(xyt_filename, hi=hi, hj=hj, hthets=hthets, wlen=wlen, smr=smr, frac=frac, original=original, ntheta=hthets.shape[1])
else:
save(xyt_filename, hi=hi, hj=hj, hthets=hthets, wlen=wlen, smr=smr, frac=frac, original=original, ntheta=hthets.shape[1], backproj=backproj)
elif xyt_filename.endswith('.fits'):
# IMPLEMENTATION2: FITS Table File
Hi = fits.Column(name='hi', format='1I', array=hi)
Hj = fits.Column(name='hj', format='1I', array=hj)
ntheta = hthets.shape[1]
Hthets = fits.Column(name='hthets', format=str(int(ntheta))+'E', array=hthets)
cols = fits.ColDefs([Hi, Hj, Hthets])
tbhdu = fits.BinTableHDU.from_columns(cols)
# Header Values for RHT Parameters
prihdr = fits.Header()
prihdr['WLEN'] = wlen
prihdr['SMR'] = smr
prihdr['FRAC'] = frac
prihdr['ORIGINAL'] = original
# Other Header Values
prihdr['NTHETA'] = ntheta
"""
Adding RA, DEC and other possible header values to your new header
First, the old header is loaded in from filepath.
You can then overwrite your desired header information by
adding/removing the keywords below.
"""
# Old header
my_header = fits.getheader(filepath)
# If you do not want header keywords from your old header, make this an empty list.
# If you do, just input them as strings: ['CTYPE1', 'CRVAL1'] etc.
header_keywords = []
if len(header_keywords) > 0:
for keyword in header_keywords:
if keyword not in my_header:
print("Provided header keyword not in your old header. Please adjust variable header_keywords in function putXYT. Exiting...")
sys.exit()
prihdr[keyword] = my_header[keyword]
# Whole FITS File
prihdu = fits.PrimaryHDU(data=backproj, header=prihdr)
thdulist = fits.HDUList([prihdu, tbhdu])
thdulist.writeto(xyt_filename, output_verify='silentfix', overwrite=True, checksum=True)
#TODO__________________Compress Fits Files After Saving
else:
raise ValueError('Supported output filetypes in putXYT include: .npz and .fits only')
def getXYT(xyt_filename, match_only=False):
# Read in a .fits or .npz file containing the output of the RHT.
# If match_only is given, and a dictionary of Keys:
# This will return whether ALL keys are found in the data of the given file
# Else:
# This will return the image coordinates of significant linearity, and the theta power spectrum at those coords.
# This will return as two integer arrays of some_length, and an ntheta*some_length array of theta power
if not os.path.isfile(xyt_filename):
# Fast Failure Case - This file does not exist.
if match_only:
return False
else:
raise ValueError('Input xyt_filename in getXYT matches no existing file')
else:
# Attempts to extract header information for Matching, or else the data itself
if xyt_filename.endswith('.npz'):
# Allows very large files to be read in.
data = np.load(xyt_filename, mmap_mode='r')
if match_only:
try:
return all([ match_only[x] == data[str.lower(x)] for x in list(match_only.keys()) ])
except KeyError:
return False
Hi = data['hi']
Hj = data['hj']
Hthets = data['hthets']
elif xyt_filename.endswith('.fits'):
hdu_list = fits.open(xyt_filename, mode='readonly', memmap=True, save_backup=False, checksum=True) #Allows for reading in very large files!
header = hdu_list[0].header
if match_only:
try:
return all([ match_only[x] == header[str.upper(x)] for x in list(match_only.keys()) ])
except KeyError:
return False
data = hdu_list[1].data
Hi = data['hi']
Hj = data['hj']
Hthets = data['hthets']
else:
raise ValueError('Supported input types in getXYT include .npz and .fits only')
rebuild = None
# Formats output properly
if rebuild and filepath is not None:
# Can recreate an entire 3D array of mostly 0s.
data = getData(filepath)
datay, datax = data.shape
ntheta = Hthets[0].shape
if BUFFER:
xyt = np.memmap(tempfile.TemporaryFile(), dtype=DTYPE, mode='w+', shape=(datay, datax, ntheta))
xyt.fill(0.0)
else:
print('Warning: Reconstructing very large array in memory! Set BUFFER to True!')
xyt = np.zeros((datay, datax, ntheta))
coords = list(zip(Hj, Hi))
for c in range(len(coords)):
j,i = coords[c]
xyt[j,i,:] = Hthets[c]
return xyt
else:
# Returns the sparse, memory mapped form only.
return Hi, Hj, Hthets
def bad_pixels(data):
# Returns an array of the same shape as data
# NaN values MUST ALWAYS be considered bad.
# Bad values become 1, all else become 0
data = np.array(data, np.float) #TODO________________________Double Check This?
# IMPLEMENTATION1: Do Comparisons which are VERY different depending on boolean choices .
try:
if BAD_INF:
if BAD_0:
if BAD_Neg:
return np.logical_or(np.logical_not(np.isfinite(data)), np.less_equal(data, 0.0))
else:
return np.logical_or(np.logical_not(np.isfinite(data)), np.equal(data, 0.0))
else:
if BAD_Neg:
return np.logical_or(np.logical_not(np.isfinite(data)), np.less(data, 0.0))
else:
return np.logical_not(np.isfinite(data))
else:
if BAD_0:
if BAD_Neg:
return np.logical_or(np.isnan(data), np.less_equal(data, 0.0))
else:
return np.logical_not(np.nan_to_num(data)) #(Nans or 0) ---> (0) ---> (1)
else:
if BAD_Neg:
return np.logical_or(np.isnan(data), np.less(data, 0.0))
else:
return np.isnan(data)
'''
#IMPLEMENTATION2: Map values determined by flags into the data array
not_that = np.zeros_like(data)
infs = np.empty_like(data).fill(BAD_INF)
zeros = np.empty_like(data).fill(BAD_0)
negs = np.empty_like(data).fill(BAD_Neg)
isinf = np.where(np.isinf(data), infs, not_that)
iszero = np.where(np.logical_not(data), zeros, not_that)
isneg = np.where(np.less(0.0), negs, not_that)
return np.logical_or(np.isnan(data), np.logical_or(isinf, np.logical_or(iszero, isneg)))
'''
except:
# IMPLEMENTATION3: Give up?
print('Unable to properly mask data in bad_pixels()...')
return data.astype(np.bool)
def all_within_diameter_are_good(data, diameter):
assert diameter%2
r = int(np.int(diameter/2))
# Base case, 'assume all pixels are bad'
mask = np.zeros_like(data)
# Edge case, 'any pixel not within r of the edge might be ok'
datay, datax = data.shape
mask[r:datay-r, r:datax-r] = 1
# Identifiably bad case, 'all pixels within r of me are not bad'
circle = circ_kern(diameter)
y_arr, x_arr = np.nonzero(circle)
y_arr = y_arr - r
x_arr = x_arr - r
# IMPLEMENTATION1: Zero any mask pixel within r of a bad pixel
update_progress(0.0)
coords = list(zip(*np.nonzero(bad_pixels(data))))
N = len(coords)
for c in range(N):
j,i = coords[c]
x = (x_arr + i).astype(np.int).clip(0, datax-1)
y = (y_arr + j).astype(np.int).clip(0, datay-1)
mask[y, x] = 0
update_progress((c+1)/float(N), message='Masking:', final_message='Finished Masking:')
'''
#IMPLEMENTATION2: For each good pixel, 'Not Any Bad pixels near me'
update_progress(0.0)
coords = zip(*np.nonzero(mask))
for c in range(len(coords)):
j,i = coords[c]
x = (x_arr + i).astype(np.int).clip(0, datax-1)
y = (y_arr + j).astype(np.int).clip(0, datay-1)
mask[j][i] = np.logical_not(np.any(bad_pixels( data[y, x] )))
update_progress((c+1)/float(N), message='Masking:', final_message='Finished Masking:')
'''
return mask
def getData(filepath):
# Reads in data for images from various sources
# Supports .fits, .npy, and PIL formats
try:
# Reading Data
if filepath.endswith('.fits'):
# Fits file handling
hdu = fits.open(filepath, memmap=True)[0] #TODO___________________Assumes all data is in first HDU
data = hdu.data
elif filepath.endswith('.npy'):
# Numpy file handling
data = np.load(filepath, mmap_mode='r')
elif filepath.endswith('.npz'):
data = np.load(filepath, mmap_mode='r')[0] #TODO___________________Assumes data in first ndarray is 2D
else:
data = scipy.ndimage.imread(filepath, flatten=True)[::-1] #Makes B/W array, reversing y-coords
except:
# Failure Reading Data
print('Failure in getData({})... Returning'.format(filepath))
return None
return data
def getMask(data, smr=SMR, wlen=WLEN):
# Makes proper masks for images from data
# smr_mask masks any pixel within smr of any bad pixels, and the edge
# wlen_mask masks any pixel within wlen of any bad pixels, and the edge
# Cuts away smr radius from bads, then wlen from bads
smr_mask = all_within_diameter_are_good(data, 2*smr+1)
nans = np.empty(data.shape, dtype=np.float).fill(np.nan)
wlen_mask = all_within_diameter_are_good( np.where(smr_mask, data,
nans), wlen)
return smr_mask, wlen_mask
# Performs a circle-cut of given diameter on inkernel.
# Outkernel is 0 anywhere outside the window.
def circ_kern(diameter):
assert diameter%2
r = diameter//2 #int(np.floor(diameter/2))
mnvals = np.indices((diameter, diameter)) - r
rads = np.hypot(mnvals[0], mnvals[1])
return np.less_equal(rads, r).astype(np.int)
# Unsharp mask. Returns binary data.
def umask(data, radius, smr_mask=None):
assert data.ndim == 2
kernel = circ_kern(2*radius+1)
outdata = scipy.ndimage.filters.correlate(data, kernel)
# Correlation is the same as convolution here because kernel is symmetric
# Our convolution has scaled outdata by sum(kernel), so we will divide out these weights.
kernweight = np.sum(kernel)
subtr_data = data - outdata/kernweight
# Convert to binary data
bindata = np.greater(subtr_data, 0.0)
if smr_mask is None:
return bindata
else:
return np.logical_and(smr_mask, bindata)
def fast_hough(in_arr, xyt):
assert in_arr.ndim == 2
assert xyt.ndim == 3
assert in_arr.shape[0] == xyt.shape[0]
assert in_arr.shape[1] == xyt.shape[1]
# IMPLEMENTATION0: Let python figure out the implementation. (FASTEST)
return np.einsum('ijk,ij', xyt, in_arr)
'''
if hout == None:
return np.einsum('ijk,ij', xyt, in_arr) #, dtype=np.int)
else:
assert hout.ndim == 1
assert hout.shape[0] == xyt.shape[2]
np.einsum('ijk,ij', xyt, in_arr, out=hout)
'''
# IMPLEMENTATION1: Copy 2D array into 3D stack, and multiply by other stack (SLOW)
# cube = np.repeat(in_arr[:,:,np.newaxis], repeats=ntheta, axis=2)*xyt
# IMPLEMENTATION2: Broadcast 2D array against 3D stack and multiply (FAST)
# cube = np.multiply( in_arr.reshape((in_arr.shape[0],in_arr.shape[1],1)), xyt).astype(np.float, copy=False)
# IMPLEMENTATION3: Broadcast 2D array against 3D stack and AND them together (VERY FAST)
# assert in_arr.dtype == np.bool_
# assert xyt.dtype == np.bool_
# cube = np.logical_and( in_arr.reshape((in_arr.shape[0],in_arr.shape[1],1)), xyt)
# return np.sum(np.sum( cube , axis=0, dtype=np.int), axis=0, dtype=np.float) #WORKS FAST AND DIVIDES PROPERLY
# return np.sum(cube, axis=(1,0), dtype=np.int)
def houghnew(image, cos_theta, sin_theta):
assert image.ndim == 2
assert cos_theta.ndim == 1
assert sin_theta.ndim == 1
assert len(cos_theta) == len(sin_theta)
assert image.shape[0] == image.shape[1]
# Midpoint is wlen/2
wmid = image.shape[0]//2
# Compute the distance from each cell.
nr_bins = np.ceil(np.hypot(*image.shape))
# Allocate the output data.
out = np.zeros((int(nr_bins), len(cos_theta)), dtype=np.int)
# Find the indices of the non-zero values in the input data.
y, x = np.nonzero(image)
# x and y can be large, so we can't just broadcast to 2D arrays as we may run out of memory.
# Instead we process one vertical slice at a time.
for i, (cT, sT) in enumerate(zip(cos_theta, sin_theta)):
# Compute the base distances
distances = (x - wmid) * cT + (y - wmid) * sT
# Round the distances to the nearest integer and shift them to a nonzero bin.
shifted = np.round(distances) + nr_bins/2
# Cast the shifted values to ints to use as indices
indices = shifted.astype(np.int)
# Use bin count to accumulate the HT coefficients
bincount = np.bincount(indices)
# Assign the proper values to the out array
out[:len(bincount), i] = bincount
return out[np.int(nr_bins/2), :]
def all_thetas(wlen, theta, original):
assert theta.ndim == 1
assert wlen%2
# Initialize a circular window of ones
window = circ_kern(wlen)
assert window.shape[0] == window.shape[1]
if not original:
window[:,:wlen//2] = 0
# Precompute the sin and cos of the angles.
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
# Makes prism; output has dimensions (y, x, theta).
ntheta = len(theta)
#outshape = (wlen, wlen, ntheta)
out = np.zeros(window.shape+(ntheta,), np.int)
coords = list(zip( *np.nonzero(window)))
for (j, i) in coords:
# At each x/y value, create new single-pixel data.
w_1 = np.zeros_like(window)
w_1[j,i] = 1
out[j, i, :] = houghnew(w_1, cos_theta, sin_theta)
if not original:
out[:,:,ntheta//2:] = out[::-1,::-1,ntheta//2:]
out[:wlen//2+1,:,ntheta//2] = 0
out[wlen//2:,:,0] = 0
return out
def theta_rht(theta_array, original, uv=False):
# Maps an XYT cube into a 2D Array of angles- weighted by their significance.
if original:
thetas = np.linspace(0.0, np.pi, len(theta_array), endpoint=False, retstep=False)
ys = theta_array*np.sin(2.0*thetas)
xs = theta_array*np.cos(2.0*thetas)
# Clark, Peek, & Putman: Equation (7)
rough_angle = 0.5*np.arctan2(np.sum(ys), np.sum(xs))
# Clark, Peek, & Putman: Equation (8)
angle = np.pi-math.fmod(rough_angle+np.pi, np.pi)
else:
thetas = np.linspace(0.0, 2*np.pi, len(theta_array), endpoint=False, retstep=False)
ys = theta_array*np.sin(thetas)
xs = theta_array*np.cos(thetas)
angle = np.arctan2(np.sum(ys), np.sum(xs))
if not uv:
# Returns the <theta>_rht as described by Clark, Peek, & Putman for a given array.
return angle
else:
# Maps an array of theta power to an angle, and the components of one unit vector.
# Designed for use with plt.quiver()
return angle, np.cos(angle), np.sin(angle)
def buffershape(ntheta, filesize=FILECAP):
# Shape of maximum sized array that can fit into a single buffer file.
ntheta = int(ntheta)
filesize = int(filesize)
if not 0 < filesize <= FILECAP:
print('Chosen buffer size exceeds existing limit. Reset to', str(FILECAP), 'Bytes')
filesize = FILECAP
bits_per_element_in_bits = np.dtype(DTYPE).itemsize
bits_per_file_in_bits = filesize*8
elements_per_file_in_elements = int(bits_per_file_in_bits // bits_per_element_in_bits)
length_in_elements = int(elements_per_file_in_elements // ntheta)
if length_in_elements <= 0:
print('In buffershape, ntheta has forced your buffer size to become larger than', filesize, 'Bytes')
length_in_elements = 1
return (length_in_elements, ntheta)
def concat_along_axis_0(memmap_list):
# Combines memmap objects of the same shape, except along axis 0,
# by leaving them all on disk and appending them sequentially.
if len(memmap_list) == 0:
raise ValueError('Failed to buffer any data!')
elif len(memmap_list) == 1:
return memmap_list[0]
else:
'''
#IMPLEMENTATION1: Make a new large memmapped file and sequentially dump data in
lengths = [memmap.shape[0] for memmap in memmap_list]
shapes = [memmap.shape[1:] for memmap in memmap_list]
assert all([x==shapes[0] for x in shapes[1:]])
big_memmap = np.memmap(os.path.join(temp_dir, +'rht.dat'), dtype=DTYPE, mode='r+', shape=(sum(lengths), *shapes[0]) )
lengths.insert(0, sum(lengths))
for i in range(len(memmap_list)):
temp_file = memmap_list[i]
big_memmap[ sum(lengths[0:i]) : sum(lengths[0:i+1])-1, ...] = tempfile[:, ...]
temp_file.flush()
temp_file.close()
del temp_file
return big_memmap
'''
# IMPLEMENTATION2: Append data to first given memmaped file, then delete and repeat
seed = memmap_list[0]
others = memmap_list[1:]
lengths = [memmap.shape[0] for memmap in others]
shapes = [memmap.shape[1:] for memmap in others]
assert all([x==seed.shape[1:] for x in shapes])
bits_per_element_in_bits = np.dtype(DTYPE).itemsize
elements_per_shape_in_elements = np.multiply.reduce(seed.shape[1:])
bytes_per_shape_in_bytes = elements_per_shape_in_elements * bits_per_element_in_bits // 8
def append_memmaps(a, b):
a.flush()
a.close()
c = np.memmap(a.filename, dtype=DTYPE, mode='r+', offset=bytes_per_shape_in_bytes*a.shape[0], shape=b.shape )
# Depends on offset correctly allocating new space at end of file
c[:,...] = b[:,...]
b.flush()
b.close()
del b
return c
return reduce(append_memmaps, others, initializer=seed)
def window_step(data, wlen, frac, smr, original, smr_mask, wlen_mask,
xyt_filename, message, filepath):
assert frac == float(frac)
assert 0 <= frac <= 1
assert wlen == int(wlen)
assert wlen > 0
# wlen must be an odd number to ensure the circle has a single-pixel center.
assert wlen%2
assert smr == int(smr)
assert smr > 0
# Needed values
r = wlen//2
ntheta = ntheta_w(wlen)
if original:
theta, dtheta = np.linspace(0.0, np.pi, ntheta, endpoint=False,
retstep=True)
else:
# For the dRHT, we maintain ntheta by doubling dtheta.
theta, dtheta = np.linspace(0.0, 2*np.pi, ntheta, endpoint=False,
retstep=True)
# Cylinder of all lit pixels along a theta value
xyt = all_thetas(wlen=wlen, theta=theta, original=original)
xyt.setflags(write=0)
# Unsharp masks the whole data set
masked_udata = umask(data=data, radius=smr, smr_mask=smr_mask)
masked_udata.setflags(write=0)
# Hough transform of same-sized circular window of 1's
h1 = fast_hough(circ_kern(wlen), xyt)
h1.setflags(write=0)
# Local function calls are faster than globals
Hthets = []
Hi = []
Hj = []
htapp = Hthets.append
hiapp = Hi.append
hjapp = Hj.append
nptruediv = np.true_divide
npge = np.greater_equal
# Bonus Backprojection Creation
backproj = np.zeros_like(data)
if BUFFER:
# Preparing to write hout to file during operation so it does not over-fill RAM.
temp_dir = tempfile.mkdtemp()
# List of memmap objects.
temp_files = []
buffer_shape = buffershape(ntheta)
def next_temp_filename():
return os.path.join(temp_dir, 'rht'+ str(len(temp_files)) + '.dat')
#print 'Temporary files in:', temp_dir
# Number of RHT operations that will be performed, and their coordinates
update_progress(0.0)
coords = list(zip( *np.nonzero( wlen_mask)))
N = len(coords)
for c in range(N):
j,i = coords[c]
h = fast_hough(masked_udata[j-r:j+r+1, i-r:i+r+1], xyt)
# Original RHT Implementation Subtracts Threshold From All Theta-Power Spectrums
hout = nptruediv(h, h1) - frac
hout *= npge(hout, 0.0)
# Deprecated Implementation Leaves Theta-Power Spectrum AS IS
#hout = nptruediv(h, h1)
#hout *= npge(hout, frac)
if np.any(hout):
htapp(hout)
hiapp(i)
hjapp(j)
backproj[j][i] = np.sum(hout)
if BUFFER and len(Hthets) == buffer_shape[0]:
# Creates full memmap object
temp_files.append( np.memmap( next_temp_filename(), dtype=DTYPE, mode='w+', shape=buffer_shape ))
# Convert list to array
theta_array = np.array(Hthets, dtype=DTYPE)
# Write array to last memmapped object in list
temp_files[-1][:] = theta_array[:]
# Reset Hthets
Hthets = []
update_progress((c+1)/float(N), message=message, final_message=message)
#End
if not BUFFER:
# Save data
putXYT(filepath, xyt_filename, np.array(Hi), np.array(Hj), np.array(Hthets), wlen, smr, frac, original=original, backproj=np.divide(backproj, np.amax(backproj)) )
return True
else:
if len(Hthets) > 0:
# Create small memmap object
temp_files.append( np.memmap( next_temp_filename(), dtype=DTYPE, mode='w+', shape=(len(Hthets), ntheta) ))
# Write array to last memmapped object in list
theta_array = np.array(Hthets, dtype=DTYPE)
temp_files[-1][:] = theta_array[:]
#print 'Converting list of buffered hthet arrays into final XYT cube'
# Combine memmap objects sequentially
converted_hthets = concat_along_axis_0(temp_files)
converted_hthets.flush()
putXYT(filepath, xyt_filename, np.array(Hi), np.array(Hj), converted_hthets, wlen, smr, frac, original=original, backproj=np.divide(backproj, np.amax(backproj)) ) #Saves data
del converted_hthets
def rmtree_failue(function, path, excinfo):
try:
#os.listdir(temp_dir):
for obj in temp_files:
#q = file(obj)
#q.close()
#del q
obj.close()
os.remove(obj)
os.removedirs(temp_dir)
except:
print('Failed to delete temporary files:', path)
shutil.rmtree(temp_dir, ignore_errors=False, onerror=rmtree_failue)
return True
#-----------------------------------------------------------------------------------------
# Interactive Functions
#-----------------------------------------------------------------------------------------
def rht(filepath, force=False, original=ORIGINAL, wlen=WLEN, frac=FRAC,
smr=SMR, data=None):
"""
filepath: String path to source data, which will have the Rolling Hough
Transform applied - if data is input (see below) then filepath is not
read but is just used to construct the name of the output filename
force: Boolean indicating if rht() should still be run, even when output
exists for these inputs
original: Boolean if one should use the original Rolling Hough Transform
wlen: Diameter of a 'window' to be evaluated at one time
frac: Fraction in [0.0, 1.0] of pixels along one angle that must be 'lit
up' to be counted
smr: Integer radius of gaussian smoothing kernel to be applied to an data
data: Input data array (image) - alternative to giving filepath as source
of the data
Saves:
X-Y-Theta Power Arrays
Backprojection
return: Boolean, if the function succeeded
"""
assert frac == float(frac)
assert 0 <= frac <= 1
assert wlen == int(wlen)
assert wlen > 0
assert wlen%2
assert smr == int(smr)
assert smr > 0
if not is_valid_file(filepath):
# Check to see if a file should have the rht applied to it.
print('Invalid filepath encountered in rht('+filepath+')...')
return False
try:
xyt_filename = xyt_name_factory(filepath, wlen=wlen, smr=smr,
frac=frac, original=original)
if (not force) and os.path.isfile(xyt_filename):
# If the program recognizes that the RHT has already been
# completed, it will not rerun. This can overridden by setting
# the 'force' flag.
return True
if data is None:
print('1/4:: Retrieving Data from:', filepath)
data = getData(filepath)
else:
print('1/4:: Getting Mask for Data')
smr_mask, wlen_mask = getMask(data, smr=smr, wlen=wlen)
datay, datax = data.shape
print('2/4:: Size: {} x {}, Wlen: {}, Smr: {}, Frac: {},'.format(
datax,datay,wlen,smr,frac),
'Standard (half-polar) RHT:'.format(original))
message = '3/4:: Running RHT...'
success = window_step(data=data, wlen=wlen, frac=frac, smr=smr,
original=original, smr_mask=smr_mask, wlen_mask=wlen_mask,
xyt_filename=xyt_filename, message=message,
filepath = filepath)
print('4/4:: Successfully Saved Data As', xyt_filename)
return success
except:
raise #__________________________________________________________________________________________________________ Raise
return False
def interpret(filepath, force=False, wlen=WLEN, frac=FRAC, smr=SMR, original=ORIGINAL):
'''
filepath: String path to source data, which will have the Rolling Hough Transform applied
force: Boolean indicating if rht() should still be run, even when output exists for these inputs
original: Boolean if one should use the original Rolling Hough Transform
wlen: Diameter of a 'window' to be evaluated at one time
frac: Fraction in [0.0, 1.0] of pixels along one angle that must be 'lit up' to be counted
smr: Integer radius of gaussian smoothing kernel to be applied to an data
Displays:
?
Saves:
?
return: Boolean, if the function succeeded
'''
print('viewer() is currently in disrepair! Exiting to avoid unpleasant results!')
return False
# Make sure relevant files are present.
filename = filename_from_path(filepath)
xyt_filename = filename + '_xyt.fits'
backproj_filename = filename + '_backproj.npy'
spectrum_filename = filename + '_spectrum.npy'
required_files = [backproj_filename, spectrum_filename, xyt_filename]
any_missing = any([not os.path.isfile(f) for f in required_files])
if any_missing or force:
# Interprets file, after clearing old output, since that needs to be done.
for f in required_files:
try:
# Try deleting obsolete output.
os.remove(f)
except:
# Assume it's not there.
continue
interpret(filepath, force=force, wlen=wlen, frac=frac, smr=smr, original=original)
# Load in relevant files and data
data = getData(filepath)
backproj = np.load(backproj_filename).astype(np.float)
spectrum = np.load(spectrum_filename).astype(np.float)
hi, hj, hthets = getXYT(xyt_filename)
# Gather parameters from that data
ntheta = len(spectrum)
datay, datax = data.shape
# Produce Specific Plots
masked_udata = umask(data, smr)
log = np.log(np.where( np.isfinite(data), data, np.ones_like( data) ))
U = np.zeros_like(hi)
V = np.zeros_like(hj)
C = np.zeros((len(U)), dtype=np.float)
coords = list(zip(hi, hj))
for c in range(len(coords)):
C[c], U[c], V[c] = theta_rht(hthets[c], original, uv=True)
C *= | np.isfinite(C) | numpy.isfinite |
import copy
import time
from collections import OrderedDict
import torch
from data.dataloader import local_client_dataset, test_dataset
from models.utils import *
from utils.train_helper import validate_one_model
from utils.sampling import *
import numpy as np
from multiprocessing import Process
import time
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier}
def load_state_dict(network, state_dict):
"""
restore model from state_dict
"""
network["feat_model"].load_state_dict(state_dict["feat_model"])
network["classifier"].load_state_dict(state_dict["classifier"])
# for name, param in state_dict["feat_model"].items():
# print(name, "\t", param.size())
return network
def check_status(status_list, selected_idx, target_status):
"""
0. original status (1st FL round)
1. server finished sending: server_network --> mp_list
2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list
3. server received: mp_list --> networks[i]
--> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1
---
Return True: when all clients meet conditions, else False
"""
tmp = np.array(status_list)
if (tmp[selected_idx] == target_status).all() == True:
return True
else:
return False
def set_status(status_list, selected_idx, target_status):
"""
see function: check_status
"""
if type(selected_idx) is int:
selected_idx = [selected_idx]
for i in selected_idx:
status_list[i] = target_status
# print(f"set_status {target_status}")
def difference_models_norm_2(model_1, model_2):
"""
Return the norm 2 difference between the two model parameters. Used in FedProx.
"""
tensor_1_backbone = list(model_1["feat_model"].parameters())
tensor_1_classifier = list(model_1["classifier"].parameters())
tensor_2_backbone = list(model_2["feat_model"].parameters())
tensor_2_classifier = list(model_2["classifier"].parameters())
diff_list = [torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i])**2) for i in range(len(tensor_1_backbone))]
diff_list.extend([torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i])**2) for i in range(len(tensor_1_classifier))])
norm = sum(diff_list)
return norm
class Fed_server(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self, init_network, criterion, config, per_client_data,
per_client_label, idx_per_client_train,
test_data, test_label, state_list=None, state_dict_list=None, idx=None
):
super(Fed_server, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = [], [], [], []
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights/self.client_weights.sum()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.server_network = copy.deepcopy(init_network)
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Server (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
""" Server does not need
# list of optimizer_dict. One optimizer for one network
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
"""
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
def local_train(self, selected_idx):
"""
server-side code
"""
# self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(self.server_network) # model transfer
set_status(self.status_list, selected_idx, 1)
if self.local_ep > 10: # is local training
print("Waiting")
# wait until all clients returning the model
while check_status(self.status_list, selected_idx, 2) is False:
time.sleep(0.1)
# mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.
for i in selected_idx:
load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer
print("===> Local training finished")
def aggregation(self, selected_idx, mode):
"""
server-side code: aggregation
"""
if mode in ["fedavg", "fedavgm", "fedbn", "fedprox"]:
self.aggregate_layers(selected_idx, mode, backbone_only=False)
elif mode == "fedavg_fs":
opt = self.config["fl_opt"]
backbone_only, imprint, spread_out = opt["backbone_only"], opt["imprint"], opt["spread_out"]
self.aggregate_layers(selected_idx, "fedavg", backbone_only=backbone_only)
if imprint:
self.imprint(selected_idx)
if spread_out:
self.spread_out()
# model: self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(self.server_network) # model transfer
set_status(self.status_list, selected_idx, 0) # back to original
print("===> Aggregation finished")
def aggregate_layers(self, selected_idx, mode, backbone_only):
"""
backbone_only: choose to only aggregate backbone
"""
weights_sum = self.client_weights[selected_idx].sum()
with torch.no_grad():
if mode in ["fedavg", "fedprox"]:
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if 'num_batches_tracked' in key:
# num_batches_tracked is a non trainable LongTensor
# and num_batches_tracked are the same for
# all clients for the given datasets
layer.data.copy_(self.networks[0][net_name].state_dict()[key])
else:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx]/weights_sum
temp += weight * self.networks[idx][net_name].state_dict()[key]
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedbn": # https://openreview.net/pdf?id=6YEQUn0QICG
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if 'bn' not in key:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx]/weights_sum
temp += weight * self.networks[idx][net_name].state_dict()[key]
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedavgm":
raise NotImplementedError
def evaluate_global(self, train_dataset=None, test_dataset=None):
"""
Accuracy of the global model and all classes
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.server_network, train_dataset, self.device, per_cls_acc=True)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.server_network, test_dataset, self.device, per_cls_acc=True)
print("===> Evaluation finished\n")
return train_loss_per_cls, train_acc_per_cls, test_loss_per_cls, test_acc_per_cls
def evaluate_global_all(self, train_dataset=None, test_dataset=None):
"""
Accuracy of models of all nodes and all classes
Return: all_results
shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
all_results = [None for i in range(self.num_clients)]
for idx in range(self.num_clients):
# evaluate on test set: per-class loss/acc
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.networks[idx], train_dataset, self.device, per_cls_acc=True)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.networks[idx], test_dataset, self.device, per_cls_acc=True)
all_results[idx] = train_loss_per_cls, train_acc_per_cls, test_loss_per_cls, test_acc_per_cls
print(f"===> Evaluation finished{idx}\n")
all_results = np.array(all_results).transpose(1,0,2)
return all_results
class Fed_client(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self, init_network, criterion, config, per_client_data,
per_client_label, idx_per_client_train,
test_data, test_label, state_list=None, state_dict_list=None, idx=None
):
super(Fed_client, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = [], [], [], []
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.device = config["device_client"][idx]
self.server_network = copy.deepcopy(init_network)
self.balanced_loader = config["fl_opt"]["balanced_loader"]
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
if config["fl_opt"]["aggregation"] == "fedprox":
self.fedprox = True
else:
self.fedprox = False
self.mu = 0.05
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights/self.client_weights.sum()
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Client {idx} (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
# list of network and optimizer_dict. One optimizer for one network.
if client_i != self.client_idx:
self.networks.append(None)
self.optimizers.append(None)
self.optimizers_stage2.append(None)
else:
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
""" clients do not need
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
"""
def run(self):
"""
client-side code
"""
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
self.networks[self.client_idx]["feat_model"].to(self.device)
self.networks[self.client_idx]["classifier"].to(self.device)
while(1):
while check_status(self.status_list, self.client_idx, 1) is False:
time.sleep(0.1)
# model: mp_list --> server_network
load_state_dict(self.server_network, self.state_dict_list[self.client_idx]) # model transfer
self.train_lt(self.client_idx) # local model updating
# self.networks[i] --> mp_list
self.state_dict_list[self.client_idx] = return_state_dict(self.networks[self.client_idx]) # model transfer
set_status(self.status_list, self.client_idx, 2)
def train_lt(self, idx):
"""
client-side code
---
Argus:
- idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).
If self.prefetch is true: the index in selected clients,
If self.prefetch is true: the index in all clients
"""
idx_in_all = idx
# server broadcast the model to clients
"""
# optimizer will not work if use this, because optimizer needs the params from the model
# self.networks[idx_in_all] = copy.deepcopy(self.server_network)
"""
for net_name, net in self.server_network.items(): # feat_model, classifier
state_dict = self.networks[idx_in_all][net_name].state_dict()
for key, layer in net.state_dict().items():
state_dict[key].data.copy_(layer.data)
for net in self.networks[idx_in_all].values():
net.train()
for net in self.server_network.values():
net.train()
teacher = self.server_network
# torch.cuda.empty_cache()
"""
(Per-cls) Covariance Calculation
"""
if self.feat_aug:
# probability for augmentation for every class
max_num = max(self.local_num_per_cls[idx])
prob = torch.tensor([1.0-i/max_num for i in self.local_num_per_cls[idx]])
# obtain features and labels under eval mode
feat_list, label_list = [], []
# self.networks[idx_in_all]['feat_model'].eval()
for (imgs, labels, indexs) in self.train_loaders[idx]:
with torch.no_grad():
imgs = imgs.to(self.device)
feat_list.append(teacher['feat_model'](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
# self.networks[idx_in_all]['feat_model'].train()
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor([unique_labels.index(i) for i in label_list]) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feats, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feats)
# calculate the variance
sampled_data, sample_label = [], []
per_cls_cov = []
for feats in feats_per_cls:
if len(feats) > 1:
per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))
else:
per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))
per_cls_cov = | np.array(per_cls_cov) | numpy.array |
"""Class to perform duple-balanced hybrid-sampling."""
# Authors: Anon.
# License: MIT
# %%
import numbers
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils import _safe_indexing
from .base import BaseSampler
from ..utils._validation_param import check_pred_proba, check_type
from ..utils._validation import _deprecate_positional_args, check_target_type
# # For local test
# import sys
# sys.path.append("../..")
# from sampler.base import BaseSampler
# from utils._validation_param import check_pred_proba, check_type
# from utils._validation import _deprecate_positional_args, check_target_type
class DupleBalanceHybridSampler(BaseSampler):
_sampling_type = "hybrid-sampling"
@_deprecate_positional_args
def __init__(
self, *,
how='shem',
sampling_strategy="auto",
k_bins=5,
replacement=True,
random_state=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.k_bins = k_bins
self.replacement = replacement
self.random_state = random_state
self.how = how
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
@_deprecate_positional_args
def fit_resample(self, X, y, *, sample_weight, **kwargs):
return super().fit_resample(X, y, sample_weight=sample_weight, **kwargs)
@_deprecate_positional_args
def _fit_resample(self, X, y, *,
y_pred_proba,
i_estimator:int,
total_estimator:int,
classes_,
sample_weight=None,):
# Check parameters
k_bins_ = check_type(self.k_bins, 'k_bins', numbers.Integral)
if k_bins_ <= 0:
raise ValueError(
f"'k_bins' must be an integer and > 0, got {k_bins_}."
)
self.k_bins_ = k_bins_
self.replacement_ = check_type(self.replacement, 'replacement', bool)
self.how_ = check_type(self.how, 'how', str)
n_samples, n_classes = X.shape[0], classes_.shape[0]
how = self.how_
# Check random_state and predict probabilities
random_state = check_random_state(self.random_state)
y_pred_proba = check_pred_proba(y_pred_proba, n_samples, n_classes, dtype=np.float64)
indexes = | np.arange(n_samples) | numpy.arange |
import numpy as np
import torch
from collections import Counter
import ipdb as pdb
from abc import ABC, abstractmethod
from src.utils.crl_generator import DFA
class TomitaLanguage(ABC):
def __init__(self, p, q):
self.p = p
self.q = q
self.sigma = ['0', '1']
self.n_letters = len(self.sigma)
@abstractmethod
def belongs_to_lang(self, seq):
pass
def generate_string(self,min_length, max_length):
string = ''
symbols = self.sigma + ['T']
while len(string) < max_length:
symbol = | np.random.choice(symbols, p = [self.p, self.q, 1-(self.p + self.q)]) | numpy.random.choice |
import shutil
import tempfile
from numpy import array, vstack
from numpy.testing import assert_array_almost_equal
from scipy.stats import ttest_ind
from thunder.decoding.uniclassify import MassUnivariateClassifier
from test_utils import PySparkTestCase
from thunder.rdds.series import Series
class ClassificationTestCase(PySparkTestCase):
def setUp(self):
super(ClassificationTestCase, self).setUp()
self.outputdir = tempfile.mkdtemp()
def tearDown(self):
super(ClassificationTestCase, self).tearDown()
shutil.rmtree(self.outputdir)
class TestMassUnivariateClassification(ClassificationTestCase):
"""Test accuracy of mass univariate classification on small
test data sets with either 1 or 2 features
"""
def test_massUnivariateClassificationTTest_1d(self):
"""Simple classification problem, 1d features"""
X = array([-1, -0.1, -0.1, 1, 1, 1.1])
labels = array([1, 1, 1, 2, 2, 2])
params = dict([('labels', labels)])
clf = MassUnivariateClassifier.load(params, "ttest")
# should match direct calculation using scipy
data = Series(self.sc.parallelize(zip([1], [X])))
result = clf.fit(data).values().collect()
groundTruth = ttest_ind(X[labels == 1], X[labels == 2])
assert_array_almost_equal(result[0], groundTruth[0])
def test_massUnivariateClassificationTTest_2d(self):
"""Simple classification problem, 2d features"""
X = array([-1, -2, -0.1, -2, -0.1, -2.1, 1, 1.1, 1, 1, 1.1, 2])
features = array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2])
samples = array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6])
labels = array([1, 1, 1, 2, 2, 2])
params = dict([('labels', labels), ('features', features), ('samples', samples)])
clf = MassUnivariateClassifier.load(params, "ttest")
# should match direct calculation using scipy
# test first feature only
data = Series(self.sc.parallelize(zip([1], [X])))
result = clf.fit(data, [[1]]).values().collect()
groundTruth = ttest_ind(X[features == 1][:3], X[features == 1][3:])
assert_array_almost_equal(result[0], groundTruth[0])
# test both features
result = clf.fit(data, [[1, 2]]).values().collect()
groundTruth = ttest_ind(vstack((X[features == 1][:3], X[features == 2][:3])).T,
vstack((X[features == 1][3:], X[features == 2][3:])).T)
assert_array_almost_equal(result[0][0], groundTruth[0])
def test_massUnivariateClassificationGNB_1d(self):
"""Simple classification problem, 1d features"""
X1 = array([-1, -1, -1.2, 1, 1, 1.2])
X2 = array([-1, -1, 1.2, 1, 1, 1.2])
labels = array([1, 1, 1, 2, 2, 2])
params = dict([('labels', labels)])
clf = MassUnivariateClassifier.load(params, "gaussnaivebayes", cv=0)
# should predict perfectly
data = Series(self.sc.parallelize(zip([1], [X1])))
result = clf.fit(data).values().collect()
assert_array_almost_equal(result[0], [1.0])
# should predict all but one correctly
data = Series(self.sc.parallelize(zip([1], [X2])))
result = clf.fit(data).values().collect()
assert_array_almost_equal(result[0], [5.0/6.0])
def test_massUnivariateClassificationGNB_2d(self):
"""Simple classification problem, 2d features"""
X = array([-1, 1, -2, -1, -3, -2, 1, 1, 2, 1, 3, 2])
features = array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2])
samples = | array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) | numpy.array |
from typing import Any, Dict, List
import numpy as np
from scipy.optimize import linear_sum_assignment
from .tracklet import Tracklet
class Tracker:
def __init__(self, distance_threshold: float = 0.05) -> None:
"""
An object to maintain and update tracklets
distance_threshold
A cost score beyond which potential pairs are eliminated
"""
self.active_tracklets: List[Tracklet] = []
self.finished_tracklets: List[Tracklet] = []
self.previous_boxes: np.ndarray = np.array([])
self.distance_threshold: float = distance_threshold
self.frame_index: int = 0
def track(self, boxes: np.ndarray) -> None:
"""
Update tracklets with boxes from a new frame
boxes
A `(n_boxes, 4)` array of bounding boxes
"""
prev_indices = boxes_indices = []
if len(boxes) > 0 and len(self.previous_boxes) > 0:
# Pairwise cost: euclidean distance between boxes
cost = np.linalg.norm(self.previous_boxes[:, None] - boxes[None], axis=-1)
# Object matching
prev_indices, boxes_indices = linear_sum_assignment(cost)
mask = cost[prev_indices, boxes_indices] < self.distance_threshold
prev_indices = prev_indices[mask]
boxes_indices = boxes_indices[mask]
# Add matches to active tracklets
for prev_idx, box_idx in zip(prev_indices, boxes_indices):
self.active_tracklets[prev_idx].add_box(boxes[box_idx])
# Finalize lost tracklets
lost_indices = set(range(len(self.active_tracklets))) - set(prev_indices)
for lost_idx in sorted(lost_indices, reverse=True):
self.finished_tracklets.append(self.active_tracklets.pop(lost_idx))
# Activate new tracklets
new_indices = set(range(len(boxes))) - set(boxes_indices)
for new_idx in new_indices:
self.active_tracklets.append(Tracklet(self.frame_index, boxes[new_idx]))
# "Predict" next frame for comparison
if len(self.active_tracklets):
self.previous_boxes = np.stack(
[tracklet.previous_box for tracklet in self.active_tracklets]
)
else:
self.previous_boxes = | np.array([]) | numpy.array |
import argparse
import numpy
import common_tools as ct
import pdb
# Example usage: run randomization experiments and store the stats for North America, large mammals, 1000 data copies randomized with the Curveball algorithm, computing the number of genera with co-occurring species
# python run_rnd.py --continent NA --group L --rnd_nb 1000 --null_model CB --measure gen
SEED_MAX = 2**32
params = {"continent": ["NA", "EU"],
"group": ["L", "S", "C"],
"null_model": ["CB", "UG", "shuffle"],
"measure": ["gen", "pairs"]}
hlp = {"continent": "continent data subset",
"group": "ecological group data subset",
"null_model": "null-model randomization algorithm",
"measure": "co-occurrence measure type"}
parser = argparse.ArgumentParser(description='Perform co-occurrence randomization experiments.')
parser.add_argument("-r", "--rnd_nb", type=int, help="number of randomized data copies to generate", default=10)
parser.add_argument("-k", "--keep_nb", type=int, help="number of randomized data copies to generate to store", default=2)
parser.add_argument("-d", "--data_folder", type=str, help="folder containing the data subsets", default="../prepared_data/")
parser.add_argument("-t", "--time_folder", type=str, help="folder containing the time bins specifications", default="../times/")
parser.add_argument("-x", "--xps_folder", type=str, help="folder to store the raw results", default="../xps_rnd/")
parser.add_argument("-z", "--xps_suffix", type=str, help="series suffix for the raw results", default="")
for p in params.keys():
parser.add_argument("-"+p[0], "--"+p, type=str, choices=list(params[p]), action='append', default=argparse.SUPPRESS, help=hlp[p])
pargs = vars(parser.parse_args())
for k, v in pargs.items():
params[k] = v
RND_FLD = params["xps_folder"]
ct.make_fld(RND_FLD)
for WHICH in params["continent"]:
for GROUP in params["group"]:
print("### %s-%s" % (WHICH, GROUP))
#######################################
# OUTPUT FILES
SSUFF = "%s-%s%s" % (WHICH, GROUP, params["xps_suffix"])
DATA_STATB_FILE = RND_FLD+"statB-%s_" + SSUFF+"_%s.csv"
DATA_RND_FILE = RND_FLD+"fossils_"+SSUFF+"_%s_%d.csv"
DATA_PROBAS_FILE = None # RND_FLD+"pairs_"+SSUFF+"_%s.csv"
SEED_STORE_FILE = RND_FLD+"seeds_"+SSUFF+".csv"
#######################################
# LOADING THE DATA
DATA_FLD = params["data_folder"]
TIMES_FLD = params["time_folder"]
if WHICH == "NA":
LIST_FILE = DATA_FLD+"fossils_NorthAmerica-%s.csv" % GROUP
SLICES_FILE = TIMES_FLD+"time-slices_america_filter.csv"
BOUNDARIES = {"LAT_MIN": 19, "LAT_MAX": 84,
"LONG_MIN": -140, "LONG_MAX": -70,
"MIN_AGE_MIN": 2.5, "MAX_AGE_MAX": 34}
MAX_RATIO_OUT = 0.1
else:
WHICH = "EU"
LIST_FILE = DATA_FLD+"fossils_Europe-wide-%s.csv" % GROUP
SLICES_FILE = TIMES_FLD+"time-slices_europe_filter.csv"
BOUNDARIES = {"LAT_MIN": 14, "LAT_MAX": 82,
"LONG_MIN": -24, "LONG_MAX": 75,
"MIN_AGE_MIN": 2.5, "MAX_AGE_MAX": 23}
MAX_RATIO_OUT = 0.1
FIELDS_SITES = ["LIDNUM", "NAME", "LAT", "LONG", "MAX_AGE", "MIN_AGE", "SLICE_ID", "SLICE_NAME", "MEAN_HYPSODONTY"]
# FIELDS_SITES = ["LAT","LONG","MAX_AGE","MIN_AGE","SLICE_ID","SLICE_NAME"]
MAP_FIELDS_SITES = dict([(v, k) for (k, v) in enumerate(FIELDS_SITES)])
# FIELDS_SPECIES = ["ORDER","FAMILY","GENUS","SPECIES"] #, "UNIQUE"]
FIELDS_SPECIES = ["ORDER", "SUBORDERORSUPERFAMILY", "FAMILY", "SUBFAMILY", "GENUS", "SPECIES"]
MAP_FIELDS_SPECIES = dict([(v, k) for (k, v) in enumerate(FIELDS_SPECIES)])
FIELDS_FORMAT = {"LAT": float, "LONG": float, "MAX_AGE": float, "MIN_AGE": float, "SLICE_ID": int}
fossils_data = ct.read_fossils(LIST_FILE, SLICES_FILE, FIELDS_FORMAT, FIELDS_SITES, FIELDS_SPECIES)
marg_species = numpy.sum(fossils_data["occurrences"], axis=0)
sort_species = numpy.argsort(marg_species)
marg_sites = | numpy.sum(fossils_data["occurrences"], axis=1) | numpy.sum |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A script that builds boosted trees over higgs data.
If you haven't, please run data_download.py beforehand to prepare the data.
For some more details on this example, please refer to README.md as well.
Note that the model_dir is cleaned up before starting the training.
Usage:
$ python train_higgs.py --n_trees=100 --max_depth=6 --learning_rate=0.1 \
--model_dir=/tmp/higgs_model
Note that BoostedTreesClassifier is available since Tensorflow 1.8.0.
So you need to install recent enough version of Tensorflow to use this example.
The training data is by default the first million examples out of 11M examples,
and eval data is by default the last million examples.
They are controlled by --train_start, --train_count, --eval_start, --eval_count.
e.g. to train over the first 10 million examples instead of 1 million:
$ python train_higgs.py --n_trees=100 --max_depth=6 --learning_rate=0.1 \
--model_dir=/tmp/higgs_model --train_count=10000000
Training history and metrics can be inspected using tensorboard.
Set --logdir as the --model_dir set by flag when training
(or the default /tmp/higgs_model).
$ tensorboard --logdir=/tmp/higgs_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.utils.flags import core as flags_core
from official.utils.flags._conventions import help_wrap
from official.utils.logs import logger
NPZ_FILE = "HIGGS.csv.gz.npz" # numpy compressed file containing "data" array
def read_higgs_data(data_dir, train_start, train_count, eval_start, eval_count):
"""Reads higgs data from csv and returns train and eval data.
Args:
data_dir: A string, the directory of higgs dataset.
train_start: An integer, the start index of train examples within the data.
train_count: An integer, the number of train examples within the data.
eval_start: An integer, the start index of eval examples within the data.
eval_count: An integer, the number of eval examples within the data.
Returns:
Numpy array of train data and eval data.
"""
npz_filename = os.path.join(data_dir, NPZ_FILE)
try:
# gfile allows numpy to read data from network data sources as well.
with tf.gfile.Open(npz_filename, "rb") as npz_file:
with np.load(npz_file) as npz:
data = npz["data"]
except tf.errors.NotFoundError as e:
raise RuntimeError(
"Error loading data; use data_download.py to prepare the data.\n{}: {}"
.format(type(e).__name__, e))
return (data[train_start:train_start + train_count],
data[eval_start:eval_start + eval_count])
# This showcases how to make input_fn when the input data is available in the
# form of numpy arrays.
def make_inputs_from_np_arrays(features_np, label_np):
"""Makes and returns input_fn and feature_columns from numpy arrays.
The generated input_fn will return tf.data.Dataset of feature dictionary and a
label, and feature_columns will consist of the list of
tf.feature_column.BucketizedColumn.
Note, for in-memory training, tf.data.Dataset should contain the whole data
as a single tensor. Don't use batch.
Args:
features_np: A numpy ndarray (shape=[batch_size, num_features]) for
float32 features.
label_np: A numpy ndarray (shape=[batch_size, 1]) for labels.
Returns:
input_fn: A function returning a Dataset of feature dict and label.
feature_names: A list of feature names.
feature_column: A list of tf.feature_column.BucketizedColumn.
"""
num_features = features_np.shape[1]
features_np_list = | np.split(features_np, num_features, axis=1) | numpy.split |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import lag_plot
from pandas import datetime
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
df = pd.read_csv("corona2.csv")
df.head(6)
plt.figure()
lag_plot(df['InfectadosDia'], lag = 1)
plt.title('TESLA Stock - Autocorrelation plot with lag = 1')
plt.show()
plt.plot(df["Date"], df["InfectadosDia"])
plt.xticks(np.arange(0,200,5), df['Date'][0:200:5], rotation="vertical")
plt.title("TESLA stock price over time")
plt.xlabel("time")
plt.ylabel("deaths")
plt.show()
train_data, test_data = df[0:int(len(df)*0.7)], df[int(len(df)*0.7):]
training_data = train_data['InfectadosDia'].values
test_data = test_data['InfectadosDia'].values
history = [x for x in training_data]
model_predictions = []
N_test_observations = len(test_data)
for time_point in range(N_test_observations):
model = ARIMA(history, order=(4,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
model_predictions.append(yhat)
true_test_value = test_data[time_point]
history.append(true_test_value)
MSE_error = mean_squared_error(test_data, model_predictions)
print('Testing Mean Squared Error is {}'.format(MSE_error))
test_set_range = df[int(len(df)*0.7):].index
plt.plot(test_set_range, model_predictions, color='blue', marker='o', linestyle='dashed',label='Muertes pronosticadas python')
plt.plot(test_set_range, test_data, color='red', label='Muertes reales')
plt.title('Muertes covid')
plt.xlabel('Fecha')
plt.ylabel('Muertes')
plt.xticks( | np.arange(125,200,1) | numpy.arange |
'''
TSDF fusion.
'''
import numpy as np
from skimage import measure
try:
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
TSDF_GPU_MODE = 1
except Exception as err:
print('Warning: %s'%(str(err)))
print('Failed to import PyCUDA. Running tsdf fusion in CPU mode.')
TSDF_GPU_MODE = 0
class TSDFVolume(object):
def __init__(self, vol_bnds, voxel_size):
# Define voxel volume parameters.
self._vol_bnds = vol_bnds # 3x2, rows: (x, y, z), columns: (min, max) in world coordinates in meters
self._voxel_size = voxel_size # in meters (determines volume discretization and resolution)
self._trunc_margin = self._voxel_size * 5 # truncation on SDF
# Adjust volume bounds.
self._vol_dim = np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size).copy(order='C').astype(int) # ensure C-order contigous
self._vol_bnds[:,1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size
self._vol_origin = self._vol_bnds[:, 0].copy(order='C').astype(np.float32) # ensure C-order contigous
print("Voxel volume size: {:d} x {:d} x {:d}".format(self._vol_dim[0], self._vol_dim[1], self._vol_dim[2]))
# Initialize pointers to voxel volume in CPU memory.
self._tsdf_vol_cpu = np.ones(self._vol_dim).astype(np.float32)
self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) # for computing the cumulative moving average of observations per voxel
self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
# Copy voxel volumes to GPU.
if TSDF_GPU_MODE:
self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)
cuda.memcpy_htod(self._tsdf_vol_gpu,self._tsdf_vol_cpu)
self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)
cuda.memcpy_htod(self._weight_vol_gpu,self._weight_vol_cpu)
self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)
cuda.memcpy_htod(self._color_vol_gpu,self._color_vol_cpu)
# Cuda kernel function (C++)
self._cuda_src_mod = SourceModule("""
__global__ void integrate(float * tsdf_vol,
float * weight_vol,
float * color_vol,
float * vol_dim,
float * vol_origin,
float * cam_intr,
float * cam_pose,
float * other_params,
float * color_im,
float * depth_im) {
// Get voxel index.
int gpu_loop_idx = (int) other_params[0];
int max_threads_per_block = blockDim.x;
int block_idx = blockIdx.z * gridDim.y * gridDim.x + blockIdx.y * gridDim.x + blockIdx.x;
int voxel_idx = gpu_loop_idx * gridDim.x * gridDim.y * gridDim.z * max_threads_per_block + block_idx * max_threads_per_block + threadIdx.x;
int vol_dim_x = (int)vol_dim[0];
int vol_dim_y = (int)vol_dim[1];
int vol_dim_z = (int)vol_dim[2];
if (voxel_idx > vol_dim_x * vol_dim_y * vol_dim_z)
return;
// Get voxel grid coordinates.
float voxel_x = floorf(((float)voxel_idx) / ((float)(vol_dim_y * vol_dim_z)));
float voxel_y = floorf(((float)(voxel_idx - ((int)voxel_x) * vol_dim_y * vol_dim_z)) / ((float)vol_dim_z));
float voxel_z = (float)(voxel_idx - ((int)voxel_x) * vol_dim_y * vol_dim_z - ((int)voxel_y) * vol_dim_z);
// Voxel grid coordinates to world coordinates.
float voxel_size = other_params[1];
float pt_x = vol_origin[0] + voxel_x * voxel_size;
float pt_y = vol_origin[1] + voxel_y * voxel_size;
float pt_z = vol_origin[2] + voxel_z * voxel_size;
// World coordinates to camera coordinates.
float tmp_pt_x = pt_x - cam_pose[0*4+3];
float tmp_pt_y = pt_y - cam_pose[1*4+3];
float tmp_pt_z = pt_z - cam_pose[2*4+3];
float cam_pt_x = cam_pose[0*4+0] * tmp_pt_x + cam_pose[1*4+0] * tmp_pt_y + cam_pose[2*4+0] * tmp_pt_z;
float cam_pt_y = cam_pose[0*4+1] * tmp_pt_x + cam_pose[1*4+1] * tmp_pt_y + cam_pose[2*4+1] * tmp_pt_z;
float cam_pt_z = cam_pose[0*4+2] * tmp_pt_x + cam_pose[1*4+2] * tmp_pt_y + cam_pose[2*4+2] * tmp_pt_z;
// Camera coordinates to image pixels.
int pixel_x = (int) roundf(cam_intr[0*3+0] * (cam_pt_x / cam_pt_z) + cam_intr[0*3+2]);
int pixel_y = (int) roundf(cam_intr[1*3+1] * (cam_pt_y / cam_pt_z) + cam_intr[1*3+2]);
// Skip if outside view frustum.
int im_h = (int) other_params[2];
int im_w = (int) other_params[3];
if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z < 0)
return;
// Skip invalid depth.
float depth_value = depth_im[pixel_y*im_w+pixel_x];
if (depth_value == 0)
return;
// Integrate TSDF.
float trunc_margin = other_params[4];
float depth_diff = depth_value-cam_pt_z;
if (depth_diff < -trunc_margin)
return;
float dist = fmin(1.0f, depth_diff / trunc_margin);
float w_old = weight_vol[voxel_idx];
float obs_weight = other_params[5];
float w_new = w_old + obs_weight;
weight_vol[voxel_idx] = w_new;
tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx] * w_old + dist) / w_new;
// Integrate color.
float old_color = color_vol[voxel_idx];
float old_b = floorf(old_color / (256 * 256));
float old_g = floorf((old_color - old_b * 256 * 256) / 256);
float old_r = old_color - old_b * 256 * 256 - old_g * 256;
float new_color = color_im[pixel_y*im_w+pixel_x];
float new_b = floorf(new_color / (256 * 256));
float new_g = floorf((new_color - new_b * 256 * 256) / 256);
float new_r = new_color - new_b * 256 * 256 - new_g * 256;
new_b = fmin(roundf((old_b*w_old + new_b) / w_new), 255.0f);
new_g = fmin(roundf((old_g*w_old + new_g) / w_new), 255.0f);
new_r = fmin(roundf((old_r*w_old + new_r) / w_new), 255.0f);
color_vol[voxel_idx] = new_b * 256 * 256 + new_g * 256 + new_r;
}""")
self._cuda_integrate = self._cuda_src_mod.get_function("integrate")
# Determine block/grid size on GPU.
gpu_dev = cuda.Device(0)
self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK
n_blocks = int(np.ceil(float(np.prod(self._vol_dim)) / float(self._max_gpu_threads_per_block)))
grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X, int(np.floor(np.cbrt(n_blocks))))
grid_dim_y = min(gpu_dev.MAX_GRID_DIM_Y, int(np.floor(np.sqrt(n_blocks / grid_dim_x))))
grid_dim_z = min(gpu_dev.MAX_GRID_DIM_Z, int(np.ceil(float(n_blocks) / float(grid_dim_x*grid_dim_y))))
self._max_gpu_grid_dim = np.array([grid_dim_x, grid_dim_y, grid_dim_z]).astype(int)
self._n_gpu_loops = int(np.ceil(float(np.prod(self._vol_dim)) / float(np.prod(self._max_gpu_grid_dim) * self._max_gpu_threads_per_block)))
def integrate(self,color_im,depth_im,cam_intr,cam_pose,obs_weight=1.):
im_h = depth_im.shape[0]
im_w = depth_im.shape[1]
# Fold RGB color image into a single channel image.
color_im = color_im.astype(np.float32)
color_im = np.floor(color_im[:, :, 2] * 256 * 256 + color_im[:, :, 1] * 256 + color_im[:, :, 0])
# GPU mode: integrate voxel volume (calls CUDA kernel).
if TSDF_GPU_MODE:
for gpu_loop_idx in range(self._n_gpu_loops):
self._cuda_integrate(self._tsdf_vol_gpu,
self._weight_vol_gpu,
self._color_vol_gpu,
cuda.InOut(self._vol_dim.astype(np.float32)),
cuda.InOut(self._vol_origin.astype(np.float32)),
cuda.InOut(cam_intr.reshape(-1).astype(np.float32)),
cuda.InOut(cam_pose.reshape(-1).astype(np.float32)),
cuda.InOut(np.asarray([gpu_loop_idx, self._voxel_size, im_h, im_w, self._trunc_margin, obs_weight], np.float32)),
cuda.InOut(color_im.reshape(-1).astype(np.float32)),
cuda.InOut(depth_im.reshape(-1).astype(np.float32)),
block=(self._max_gpu_threads_per_block, 1, 1), grid=(int(self._max_gpu_grid_dim[0]), int(self._max_gpu_grid_dim[1]), int(self._max_gpu_grid_dim[2])))
# CPU mode: integrate voxel volume (vectorized implementation).
else:
# Get voxel grid coordinates.
xv, yv, zv = np.meshgrid(range(self._vol_dim[0]), range(self._vol_dim[1]), range(self._vol_dim[2]), indexing='ij')
vox_coords = np.concatenate((xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)), axis=0).astype(int)
# Voxel coordinates to world coordinates.
world_pts = self._vol_origin.reshape(-1, 1) + vox_coords.astype(float) * self._voxel_size
# World coordinates to camera coordinates.
world2cam = np.linalg.inv(cam_pose)
cam_pts = np.dot(world2cam[:3, :3], world_pts) + np.tile(world2cam[:3, 3].reshape(3, 1), (1, world_pts.shape[1]))
# Camera coordinates to image pixels.
pix_x = np.round(cam_intr[0, 0] * (cam_pts[0, :] / cam_pts[2, :]) + cam_intr[0, 2]).astype(int)
pix_y = np.round(cam_intr[1, 1] * (cam_pts[1, :] / cam_pts[2, :]) + cam_intr[1, 2]).astype(int)
# Skip if outside view frustum.
valid_pix = np.logical_and(pix_x >= 0,
np.logical_and(pix_x < im_w,
np.logical_and(pix_y >= 0,
np.logical_and(pix_y < im_h,
cam_pts[2,:] > 0))))
depth_val = np.zeros(pix_x.shape)
depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]]
# Integrate TSDF.
depth_diff = depth_val - cam_pts[2,:]
valid_pts = np.logical_and(depth_val > 0, depth_diff >= -self._trunc_margin)
dist = np.minimum(1., np.divide(depth_diff, self._trunc_margin))
w_old = self._weight_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]]
w_new = w_old + obs_weight
self._weight_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]] = w_new
tsdf_vals = self._tsdf_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]]
self._tsdf_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]] = np.divide( | np.multiply(tsdf_vals, w_old) | numpy.multiply |
"""
Collection of function to pre-process the master curve and perform the Prony
series parameter identification.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize, nnls
from . import shift
"""
--------------------------------------------------------------------------------
Prony series - Domain independent functions
--------------------------------------------------------------------------------
"""
def discretize(df_master, window='round', nprony=0):
"""
Discretizes relaxation times over time or frequency axis.
Discrete relaxation times are required for Prony parameter curve fitting
routine. This function spaces the relaxation times over the experimental characterization window.
Parameters
----------
df_master : pandas.DataFrame
Contains the master curve data.
window : {'round', 'exact', 'min'}
Defines the location of the discretization of the relaxation times.
- 'exact' : Use whole window of the experimental data and logarithmically
space the relaxation times inbetween.
- 'round' : Round the minimum and maximum values of the experimental data
to the nearest base 10 number and logarithmically space the
remaining relaxation times inbetween the rounded numbers
- 'min' : Position of relaxation times is optimized during minimization
routine to reduce the number of Prony terms.
nprony : numeric, default = 0
Number of Prony terms to be used for the discretization. The number
of Prony terms and the number of relaxation times is equal. If no number
or 0 is specified, the default behavior of one Prony term per decade is
used to automatically calculate the number of Prony terms.
Returns
-------
df_dis : pandas.DataFrame
Contains discrete point, equal to the relaxation times, of the
master curve data (df_master).
References
----------
Kraus, <NAME>., and <NAME>. "Generalized collocation method using
Stiffness matrices in the context of the Theory of Linear viscoelasticity
(GUSTL)." Technische Mechanik-European Journal of Engineering Mechanics
37.1 (2017): 82-106.
"""
modul = df_master.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
stor_filt = '{}_stor_filt'.format(modul)
loss_filt = '{}_loss_filt'.format(modul)
relax_filt = '{}_relax_filt'.format(modul)
#Get relaxation times
a = 1 #[Tschoegl 1989]
#omega = (1/(a*tau)) #[Kraus 2017, Eq. 25]
_tau = 1/(a*df_master['omega'])
#Window Time Domain
if df_master.domain == 'freq':
exp_inf = int(np.floor(np.log10(_tau.iloc[0]))) #highest time domain exponent
exp_0 = int(np.ceil(np.log10(_tau.iloc[-1]))) #lowest time domain exponent
val_inf = _tau.iloc[0]
val_0 = _tau.iloc[-1]
elif df_master.domain == 'time':
exp_inf = int(np.floor(np.log10(_tau.iloc[-1]))) #highest time domain exponent
exp_0 = int(np.ceil(np.log10(_tau.iloc[0]))) #lowest time domain exponent
val_inf = _tau.iloc[-1]
val_0 = _tau.iloc[0]
decades = exp_inf - exp_0
#Space evenly on a log scale in time domain
if nprony == 0:
nprony = exp_inf - exp_0 + 1 #One prony term per decade
if window == 'round':
tau = np.flip(np.geomspace(float(10**exp_0), float(10**exp_inf), nprony))
elif window == 'exact':
tau = np.flip(np.geomspace(val_0, val_inf, nprony))
elif window == 'min':
tau = np.flip(np.geomspace(val_0, val_inf, nprony+2))[1:-1]
#Get dataframe with discretized values
omega_dis = (1/(a*tau)) #[Kraus 2017, Eq. 25]
freq_dis = omega_dis/(2*np.pi) #Convert to cycles per second [Hz]
t_dis = 1/freq_dis
if df_master.domain == 'freq':
#Interpolate E_stor and E_loss at discretization poins
E_stor_dis = np.interp(freq_dis, df_master['f'], df_master[stor_filt])
E_loss_dis = np.interp(freq_dis, df_master['f'], df_master[loss_filt])
#Estimate instantenous (E_0) and equilibrium (E_inf) modulus
E_0 = df_master[stor_filt].iloc[-1]
E_inf = df_master[stor_filt].iloc[0]
#Assembly data frame
df_dis = pd.DataFrame([freq_dis, E_stor_dis, E_loss_dis, omega_dis, tau]).T
df_dis.columns = ['f', stor, loss, 'omega', 'tau_i']
elif df_master.domain == 'time':
#Interpolate E_stor and E_loss at discretization poins
E_relax_dis = np.interp(t_dis, df_master['t'], df_master[relax_filt])
#Estimate instantenous (E_0) and equilibrium (E_inf) modulus
E_0 = df_master[relax_filt].iloc[0]
E_inf = df_master[relax_filt].iloc[-1]
#Assembly data frame
df_dis = pd.DataFrame([tau, t_dis, E_relax_dis, omega_dis, freq_dis]).T
df_dis.columns = ['tau_i', 't', relax, 'omega', 'f']
#Add df attributes
df_dis.index += 1
df_dis.nprony = nprony
df_dis.E_0 = E_0
df_dis.E_inf = E_inf
df_dis.RefT = df_master.RefT
df_dis.f_min = df_master['f'].min()
df_dis.f_max = df_master['f'].max()
df_dis.decades = decades
df_dis.domain = df_master.domain
df_dis.modul = df_master.modul
return df_dis
def plot_dis(df_master, df_dis, units):
"""
Plot relaxation times on top of master curve.
Parameters
----------
df_master : pandas.DataFrame
Contains the master curve data.
df_dis : pandas.DataFrame
Contains the discrete relaxation times and corresponding data.
units : dict of {str : str}
Contains the names of the physical quantities as key and
the corresponding names of the units as item.
Returns
-------
fig : matplotlib.pyplot.figure
Plot showing the relaxation times on top of the master curve.
"""
modul = df_master.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
if df_master.domain == 'freq':
fig, ax1 = plt.subplots()
df_master.plot(x='f', y=[stor, loss],
ax=ax1, logx=True, color=['C0', 'C1'], alpha=0.5)
df_dis.plot(x='f', y=[stor, loss], label=['tau_i', 'tau_i'], ax=ax1,
logx=True, ls='', marker='o', color=['C0', 'C1'])
ax1.set_xlabel('Frequency ({})'.format(units['f']))
ax1.set_ylabel('Storage and loss modulus ({})'.format(units[stor]))
ax1.legend()
fig.show()
return fig
elif df_master.domain == 'time':
fig, ax1 = plt.subplots()
df_master.plot(x='t', y=[relax], ax=ax1, logx=True, color=['k'])
df_dis.plot(x='t', y=[relax], label = ['tau_i'],
ax=ax1, logx=True, ls='', marker='o', color=['red'])
ax1.set_xlabel('Time ({})'.format(units['t']))
ax1.set_ylabel('Relaxation modulus ({})'.format(units[relax]))
ax1.legend()
fig.show()
return fig
def ls_res(func):
"""
Wrapper function that calculates the least squares residual.
Parameters
----------
func : function
Time domain: prony.E_relax_norm
Frequency domain: prony.E_freq_norm
Returns
-------
residual : function
Calculates least squares residual for specified domain.
"""
def residual(alpha_i, tau_i, E_meas_norm, tf_meas):
"""
Calculate least squares resdiual.
Parameters
----------
alpha_i : array-like
Normalized relaxation moduli (unitless).
tau_i : array-like
relaxation times in s.
E_meas_norm : array-like
Normalized modulus from experimental measurement data.
tf_meas : array-like
Time domain: time data of measurements in s.
Frequency domain: frequency data of measurements in Hz.
Returns
-------
numeric
Least squares residual of measurement data and curve fit data.
"""
return np.sum((E_meas_norm - func(tf_meas, alpha_i, tau_i))**2)
return residual
def split_x0(func):
"""
Wrapper that splits array x0 of the minimization routine into two arrays.
Splits the the first argument x0 into two arrays alpha_i and tau_i and
forwards both arrays to the called function. A single array x0 is necessary
to optimize both alpha_i and tau_i at the same time. However, typically,
only alpha_i is optimized and tau_i is kept constant. This wrapper allows
to use the same function in both scenarios.
Parameters
----------
func : function
Function that calculates least squares residual.
Returns
-------
split : function
See also
--------
prony.ls_res : Function to be wrapped during minimization of Prony terms.
"""
def split(*args):
alpha_i = args[0][0:int(args[0].shape[0]/2)]
tau_i = args[0][int(args[0].shape[0]/2):]
return func(alpha_i, tau_i, args[1], args[2])
return split
"""
--------------------------------------------------------------------------------
Prony series - Time domain
--------------------------------------------------------------------------------
"""
def E_relax_norm(time, alpha_i, tau_i):
"""
Calculate normalized relaxation modulus values.
Parameters
----------
time : array-like
Time in s.
alpha_i : array-like
Normalized relaxation moduli (unitless).
tau_i : array-like
relaxation times in s.
Returns
-------
numpy.ndarray
Relaxation modulus values.
"""
#Loop implementation
#-------------------
#y = np.zeros(time.shape[0])
#for i, t in enumerate(time):
# y[i] = E_0 * (1 - np.sum(alpha_i*(1-np.exp(-t/tau_i))))
#return y
#-----------------------------
#Linear algebra implementation
return 1-np.sum(alpha_i) + np.dot(alpha_i, np.exp(-time/tau_i[:,None]))
def fit_time(df_dis, df_master, opt=False):
"""
Fit Prony series parameter in time domain.
A least-squares minimization is performed using the L-BFGS-B method from
the scipy package. The implementation is similar to the optimization problem described by [1] for a homogenous distribution of discrete times.
Parameters
----------
df_dis : pandas.DataFrame
Contains the discrete relaxation times and corresponding data.
df_master : pandas.DataFrame
Contains the master curve data.
opt : bool, default = False
Flag indicates wether the Prony term minimization routine should be
executed or not.
Returns
-------
prony : dict
Contains the Prony series parameters of the fit.
References
----------
[1] <NAME>., <NAME>., <NAME>. et al. Optimal discrete-time
Prony series fitting method for viscoelastic materials. Mech Time-Depend
Mater 23, 193-206 (2019). https://doi.org/10.1007/s11043-018-9394-z
"""
m = df_dis.modul
#Initial guess: alpha_i = 1
alpha_i = np.ones(df_dis['tau_i'].values.shape)
tau_i = df_dis['tau_i'].values
#Get measurement data and normalize modul
E_meas_norm = df_master['{}_relax_filt'.format(m)].values / df_dis.E_0
time_meas = df_master['t'].values
#Define bounds
bnd_a = ((0,1),)*alpha_i.shape[0]
#Perform minimization to obtain alpha_i
res = minimize(ls_res(E_relax_norm), alpha_i,
args=(tau_i, E_meas_norm, time_meas), method='L-BFGS-B', bounds=bnd_a)
alpha_i = res.x
#Use initial fit and try to optimize both alpha_i and tau_i
if opt:
#Stack alpha_i and tau_i into single array
x0 = np.hstack((alpha_i, tau_i))
#Define bounds
tau_max = 1/(2*np.pi*df_dis.f_min)
tau_min = 1/(2*np.pi*df_dis.f_max)
bnd_t = ((tau_min, tau_max),)*alpha_i.shape[0]
bnd = bnd_a + bnd_t
#Find optimal Prony terms
res = minimize(split_x0(ls_res(E_relax_norm)), x0,
args=(E_meas_norm, time_meas), method='L-BFGS-B' , bounds=bnd)
#Print success of optimization
if res.success:
msg = 'Prony series fit N = {:02d}: Convergence criterion reached!'
print(msg.format(alpha_i.shape[0]))
else:
msg = 'Prony series fit N = {:02d}: Convergence criterion not reached!'
print(msg.format(alpha_i.shape[0]))
#Store Prony terms in dataframe
alpha_i = res.x[0:int(res.x.shape[0]/2)]
df_dis['tau_i'] = res.x[int(res.x.shape[0]/2):]
#Ensure that Sum(alpha_i) < 1 (otherwise can lead to numerical difficulties in FEM)
if alpha_i.sum() >= 1:
df_dis['alpha_i'] = 0.999/alpha_i.sum()*alpha_i #normalize to 0.999
else:
df_dis['alpha_i'] = alpha_i
#Store Prony terms in dataframe
df_prony = df_dis[['tau_i', 'alpha_i']].copy()
df_prony = df_prony.iloc[::-1].reset_index(drop=True)
df_prony.index += 1
df_prony['{}_0'.format(m)] = df_dis.E_0
df_prony['{}_i'.format(m)] = df_dis.E_0 * df_prony['alpha_i']
df_prony.RefT = df_dis.RefT
#Store Prony parameters in dictionary
prony = {'E_0':df_dis.E_0, 'df_terms':df_prony, 'f_min':df_dis.f_min,
'f_max':df_dis.f_max, 'label':'equi.', 'err' : res.fun,
'decades':df_dis.decades, 'modul':m}
return prony
"""
--------------------------------------------------------------------------------
Prony series - Frequency domain
--------------------------------------------------------------------------------
"""
def E_freq_norm(omega, alpha_i, tau_i):
"""
Calculate normalized storage and loss modulus values.
Parameters
----------
omega : array-like
Angular frequency in rad/s.
alpha_i : array-like
Normalized relaxation moduli (unitless).
tau_i : array-like
relaxation times in s.
Returns
-------
numpy.ndarray
Concatenated array of normalized storage and loss modulus values.
"""
A = (omega*tau_i[:,None])
A2 = A**2
E_stor = 1- | np.sum(alpha_i) | numpy.sum |
import numpy as np
import os
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
from .networks import BC, Embedding, AutoEncoder
from .training import update, evaluate
from .datasets import BCSet, StateActionEmbeddingSet, AESet
from .utils import entropy, BColors
from hyperloglog import HyperLogLog
import matplotlib.pyplot as plt
from math import sqrt
import copy
from .plotting import plot_histograms, plot_states
from .latex import create_latex_table
class Evaluator():
def __init__(self,
environment: str,
buffer_type: str,
states:np.ndarray,
actions:np.ndarray,
rewards:np.ndarray,
dones:np.ndarray,
workers=0,
seed=42,
num_actions=None):
assert len(states.shape) == 2, f"States must be of dimension (ds_size, feature_size), were ({states.shape})"
if len(actions.shape) == 1:
actions = actions.reshape(-1, 1)
assert len(actions.shape) == 2, f"Actions must be of dimension (ds_size, 1), were ({actions.shape})"
if len(rewards.shape) == 1:
rewards = rewards.reshape(-1, 1)
assert len(rewards.shape) == 2, f"Rewards must be of dimension (ds_size, 1), were ({actions.shape})"
if len(dones.shape) == 1:
dones = dones.reshape(-1, 1)
assert len(dones.shape) == 2, f"Dones must be of dimension (ds_size, 1), were ({actions.shape})"
# task information
self.environment = environment
self.buffer_type = buffer_type
# Dataset, last state and actions are meaningless
self.states = states
self.actions = actions
self.rewards = rewards
self.dones = dones
# auxiliary parameters
self.workers = workers
self.seed = seed
# could be that dataset contains not every action, then one can pass the correct number of actions
self.num_actions = num_actions if num_actions is not None else np.max(self.actions) + 1
device = "cuda" if torch.cuda.is_available() else "cpu"
# behavioral cloning network
self.behavioral_trained = False
self.behavioral = BC(num_state=self.states.shape[1], num_actions=self.num_actions, seed=self.seed).to(device)
# state embedding network
self.state_embedding_trained = False
self.state_embedding = Embedding(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# state-action embedding network
self.state_action_embedding_trained = False
self.state_action_embedding = Embedding(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# state ae network
self.state_ae_trained = False
self.state_ae = AutoEncoder(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# state-action ae network
self.state_action_ae_trained = False
self.state_action_ae = AutoEncoder(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# copies that stay random
self.random_state_embedding = copy.deepcopy(self.state_embedding)
self.random_state_action_embedding = copy.deepcopy(self.state_action_embedding)
# limits for estimation
self.limits = [None] * 8
def evaluate(self, state_limits=None, action_limits=None,
epochs=10, batch_size=64, lr=1e-3,
subsample=1., verbose=False):
assert 0 <= subsample <= 1, f"subsample must be in [0;1] but is {subsample}."
self.train_behavior_policy(epochs, batch_size, lr, verbose)
returns = self.get_returns()
sparsities = self.get_sparsities()
ep_lengths = self.get_episode_lengths()
entropies = self.get_bc_entropy()
unique_states = self.get_unique_states(limits=state_limits)
unique_state_actions = self.get_unique_state_actions(limits=action_limits)
print("-"*50)
print("Min / Mean / Max Return: \t\t", f"{round(np.min(returns), 2)} / {round(np.mean(returns), 2)} "
f"/ {round(np.max(returns), 2)}")
print("Unique States: \t", f"{unique_states}")
print("Unique State-Actions: \t", f"{unique_state_actions}")
print("Min / Mean / Max Entropy: \t", f"{round(np.min(entropies), 2)} / {round(np.mean(entropies), 2)} "
f"/ {round(np.max(entropies), 2)}")
print("Min / Mean / Max Sparsity: \t", f"{round(np.min(sparsities), 2)} / "
f"{round(np.mean(sparsities), 2)} "
f"/ {round(np.max(sparsities), 2)}")
print("Min / Mean / Max Episode Length: \t", f"{round(np.min(ep_lengths), 2)} / "
f"{round(np.mean(ep_lengths), 2)} "
f"/ {round(np.max(ep_lengths), 2)}")
print("-" * 50)
return returns, unique_states, unique_state_actions, entropies, sparsities, ep_lengths
def get_returns(self):
rewards, ep_reward = list(), 0
for i, done in enumerate(self.dones):
ep_reward += self.rewards[i].item()
if done:
rewards.append(ep_reward)
ep_reward = 0
return rewards
@staticmethod
def get_normalized_rewards(rewards, random_reward, optimal_reward):
normalized_reward = []
for reward in rewards:
normalized_reward.append((reward - random_reward) / (optimal_reward - random_reward))
return normalized_reward
def get_sparsities(self):
sparsity, num_not_obtained = list(), list()
for i, done in enumerate(self.dones):
num_not_obtained.append(self.rewards[i].item() == 0)
if done:
sparsity.append(np.mean(num_not_obtained))
num_not_obtained = list()
return sparsity
def get_episode_lengths(self):
lengths, ep_length = list(), 0
for i, done in enumerate(self.dones):
ep_length += 1
if done:
lengths.append(ep_length)
ep_length = 0
return lengths
def get_bc_entropy(self):
if not self.behavioral_trained:
print(BColors.WARNING + "Attention, behavioral policy was not trained before calling get_bc_entropy!" + BColors.ENDC)
entropies = []
dl = DataLoader(BCSet(states=self.states, actions=self.actions), batch_size=512, drop_last=False,
shuffle=False, num_workers=self.workers)
for x, _ in dl:
x = x.to(next(self.behavioral.parameters()).device)
entropies.extend(entropy(self.behavioral(x)))
# calculate entropy
entropies = np.asarray(entropies)
return entropies
def get_similarity_distance(self):
states = torch.FloatTensor(self.states)[:len(self.dones)]
with torch.no_grad():
states = states.to(next(self.behavioral.parameters()).device)
states = self.state_embedding.embed(states).cpu().numpy()
rng = np.random.default_rng(self.seed)
ep_distances = []
general_distances = []
dones = []
for d, done in enumerate(self.dones):
if done:
dones.append(d + 1)
start = 0
for end in dones:
ep_states = states[start:end]
for s, state in enumerate(ep_states):
idx = rng.integers(len(ep_states))
# in case the same state is sampled by chance
while idx == s or np.allclose(state, ep_states[idx]):
idx = rng.integers(len(ep_states))
if len(ep_states) == 1:
break
if np.allclose(state, ep_states[idx]):
continue
distance = (state - ep_states[idx]).reshape(-1,)
ep_distances.append(np.linalg.norm(distance))
start = end
for s, state in enumerate(states):
idx = rng.integers(len(states))
# in case the same state is sampled by chance
while idx == s:
idx = rng.integers(len(states))
distance = (state - states[idx]).reshape(-1, )
general_distances.append(np.linalg.norm(distance))
return np.mean(general_distances) / np.mean(ep_distances)
def get_state_pseudo_coverage(self, no_cells=100, use_random=False):
states = torch.FloatTensor(self.states)[:len(self.dones)]
with torch.no_grad():
if use_random:
states = states.to(next(self.random_state_embedding.parameters()).device)
states = self.random_state_embedding.embed(states).cpu().numpy()
if self.limits[4] is None:
self.limits[4] = (np.min(states[:, 0]), np.max(states[:, 0]),
np.min(states[:, 1]), np.max(states[:, 1]))
limits = self.limits[4]
else:
states = states.to(next(self.state_embedding.parameters()).device)
states = self.state_embedding.embed(states).cpu().numpy()
if self.limits[0] is None:
self.limits[0] = (np.min(states[:, 0]), np.max(states[:, 0]),
np.min(states[:, 1]), np.max(states[:, 1]))
limits = self.limits[0]
return self.calc_coverage(states, limits, no_cells)
def get_state_action_pseudo_coverage(self, no_cells=100, use_random=False):
states = torch.FloatTensor(np.concatenate((self.states, self.actions), axis=1))[:len(self.dones)]
with torch.no_grad():
if use_random:
states = states.to(next(self.random_state_action_embedding.parameters()).device)
states = self.random_state_action_embedding.embed(states).cpu().numpy()
if self.limits[5] is None:
self.limits[5] = (np.min(states[:, 0]), np.max(states[:, 0]),
np.min(states[:, 1]), np.max(states[:, 1]))
limits = self.limits[5]
else:
states = states.to(next(self.state_action_embedding.parameters()).device)
states = self.state_action_embedding.embed(states).cpu().numpy()
if self.limits[1] is None:
self.limits[1] = (np.min(states[:, 0]), np.max(states[:, 0]),
np.min(states[:, 1]), np.max(states[:, 1]))
limits = self.limits[1]
return self.calc_coverage(states, limits, no_cells)
def get_state_ae_pseudo_coverage(self, no_cells=100):
states = torch.FloatTensor(self.states)
with torch.no_grad():
states = states.to(next(self.state_ae.parameters()).device)[:len(self.dones)]
states = self.state_ae.embed(states).cpu().numpy()
if self.limits[2] is None:
self.limits[2] = (np.min(states[:, 0]), np.max(states[:, 0]),
| np.min(states[:, 1]) | numpy.min |
# Copyright 2017 <NAME> Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""
Wasserstein Auto-Encoder models
"""
import sys
import time
import os
import logging
from math import sqrt, cos, sin, pi
import numpy as np
import tensorflow as tf
import ops
import utils
from priors import init_gaussian_prior, init_cat_prior
from sampling_functions import sample_mixtures, sample_pz, generate_linespace
from loss_functions import matching_penalty, reconstruction_loss, moments_loss
from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot
from plot_functions import save_train, save_vizu
from model_nn import label_encoder, cat_encoder, gaussian_encoder
from model_nn import continuous_decoder, discrete_decoder
from datahandler import datashapes
import pdb
class WAE(object):
def __init__(self, opts):
logging.error('Building the Tensorflow Graph')
# --- Create session
self.sess = tf.Session()
self.opts = opts
# --- Some of the parameters for future use
assert opts['dataset'] in datashapes, 'Unknown dataset.'
self.data_shape = datashapes[opts['dataset']]
# --- Placeholders
self.add_model_placeholders()
self.add_training_placeholders()
sample_size = tf.shape(self.u_points,out_type=tf.int64)[0]
range = tf.range(sample_size)
zero = tf.zeros([tf.cast(sample_size,dtype=tf.int32)],dtype=tf.int64)
# --- Initialize prior parameters
self.pz_mean, self.pz_sigma = init_gaussian_prior(opts)
self.pi0 = init_cat_prior(opts)
# --- Encoding inputs
probs_logit = label_encoder(self.opts, self.u_points, False,
self.is_training)
self.probs = ops.softmax(probs_logit,axis=-1)
logit_pi, self.u_enc_mean, self.u_enc_logSigma = self.encoder(
self.u_points,
False)
log_Zpi = ops.log_sum_exp(logit_pi,axis=-1,keepdims=True)
logit = logit_pi - log_Zpi \
+ tf.expand_dims(probs_logit,axis=-1)
u_logit = ops.log_sum_exp(logit,axis=1,keepdims=False)
#self.u_pi = ops.softmax(u_logit,axis=-1)
u_pi = tf.multiply(ops.softmax(logit_pi,axis=-1),tf.expand_dims(self.probs,axis=-1))
self.u_pi = tf.reduce_sum(u_pi,axis=1,keepdims=False)
logit_pi, self.l_enc_mean, self.l_enc_logSigma = self.encoder(
self.l_points,
True)
idx_label = tf.stack([range,self.l_labels], axis=-1)
logit = tf.gather_nd(logit_pi,idx_label)
self.l_pi = ops.softmax(logit,axis=-1)
# --- Sampling from encoded MoG prior
self.u_mixtures_encoded = sample_mixtures(opts, self.u_enc_mean,
tf.exp(self.u_enc_logSigma),
sample_size,'tensorflow')
self.l_mixtures_encoded = sample_mixtures(opts, self.l_enc_mean,
tf.exp(self.l_enc_logSigma),
sample_size,'tensorflow')
# --- Decoding encoded points (i.e. reconstruct)
self.u_reconstructed, self.u_reconstructed_logits = self.decoder(
self.u_mixtures_encoded,
False)
self.l_reconstructed, self.l_reconstructed_logits = self.decoder(
self.l_mixtures_encoded,
True)
self.labels_reconstructed, self.labels_reconstructed_logits = discrete_decoder(
opts,
self.label_noise,
False,
self.is_training)
# --- Reconstructing inputs (only for visualization)
idx = tf.reshape(tf.multinomial(tf.nn.log_softmax(u_logit),1),[-1])
mix_idx = tf.stack([range,idx],axis=-1)
self.encoded_point = tf.gather_nd(self.u_mixtures_encoded,mix_idx)
self.reconstructed_point = tf.gather_nd(self.u_reconstructed,mix_idx)
self.reconstructed_logit = tf.gather_nd(self.u_reconstructed_logits,mix_idx)
# --- Sampling from model (only for generation)
self.decoded, self.decoded_logits = self.decoder(self.sample_noise,
True)
# --- Objectives, losses, penalties, pretraining
# Compute reconstruction cost
self.l_loss_reconstruct = reconstruction_loss(opts, self.l_pi,
self.l_points,
self.l_reconstructed,
self.l_labels,
tf.argmax(self.labels_reconstructed,axis=-1))
self.u_loss_reconstruct = reconstruction_loss(opts, self.u_pi,
self.u_points,
self.u_reconstructed)
# Compute matching penalty cost
self.kl_g, self.kl_d, self.l_cont_penalty, self.l_disc_penalty = matching_penalty(opts,
self.pi0, self.l_pi,
self.l_enc_mean, self.l_enc_logSigma,
self.pz_mean, self.pz_sigma,
self.l_sample_mix_noise, self.l_mixtures_encoded)
self.kl_g, self.kl_d, self.u_cont_penalty, self.u_disc_penalty = matching_penalty(opts,
self.pi0, self.u_pi,
self.u_enc_mean, self.u_enc_logSigma,
self.pz_mean, self.pz_sigma,
self.u_sample_mix_noise, self.u_mixtures_encoded)
# Compute Labeled obj
self.l_loss = self.l_loss_reconstruct\
+ self.l_lmbd * self.l_cont_penalty\
+ self.l_beta * self.l_disc_penalty
# Compute Unlabeled obj
self.u_loss = self.u_loss_reconstruct\
+ self.u_lmbd * self.u_cont_penalty\
+ self.u_beta * self.u_disc_penalty
# Compute wae obj
self.objective = self.alpha*self.alpha_decay * self.l_loss + self.u_loss
# Pre Training
self.pretrain_loss()
# --- Optimizers, savers, etc
self.add_optimizers()
self.add_savers()
self.init = tf.global_variables_initializer()
def add_model_placeholders(self):
opts = self.opts
shape = self.data_shape
self.l_points = tf.placeholder(tf.float32,
[None] + shape,
name='l_points_ph')
self.l_labels = tf.placeholder(tf.int64,
[None,],
name='l_labels_ph')
self.l_sample_mix_noise = tf.placeholder(tf.float32,
[None] + [opts['nmixtures'],opts['zdim']],
name='l_mix_noise_ph')
self.u_points = tf.placeholder(tf.float32,
[None] + shape,
name='u_points_ph')
self.u_sample_mix_noise = tf.placeholder(tf.float32,
[None] + [opts['nmixtures'],opts['zdim']],
name='u_mix_noise_ph')
self.sample_noise = tf.placeholder(tf.float32,
[None] + [opts['nmixtures'],opts['zdim']],
name='noise_ph')
# self.l_points = l_data
# self.l_labels = l_label
# self.l_sample_mix_noise = l_mix_noise
# self.u_points = u_data
# self.u_sample_mix_noise = u_mix_noise
# self.sample_noise = noise
# self.label_noise = tf.placeholder(tf.float32,
# [None,1],
# name='noise_ph')
label_noise = tf.range(opts['nmixtures'],
dtype=tf.float32,
name='label_noise_ph')
self.label_noise = tf.expand_dims(label_noise, axis=0)
# placeholders fo logistic regression
self.preds = tf.placeholder(tf.float32, [None, 10], name='predictions') # discrete probabilities
self.y = tf.placeholder(tf.float32, [None, 10],name='labels') # 0-9 digits recognition => 10 classes
# self.preds = preds
# self.y = y
def add_training_placeholders(self):
opts = self.opts
decay = tf.placeholder(tf.float32, name='rate_decay_ph')
is_training = tf.placeholder(tf.bool, name='is_training_ph')
alpha = tf.placeholder(tf.float32, name='alpha')
alpha_decay = tf.placeholder(tf.float32, name='alpha')
l_lmbda = tf.placeholder(tf.float32, name='lambda')
l_beta = tf.placeholder(tf.float32, name='beta')
u_lmbda = tf.placeholder(tf.float32, name='lambda')
u_beta = tf.placeholder(tf.float32, name='beta')
self.lr_decay = decay
self.is_training = is_training
self.alpha = alpha
self.alpha_decay = alpha_decay
self.l_lmbd = l_lmbda
self.l_beta = l_beta
self.u_lmbd = u_lmbda
self.u_beta = u_beta
def add_savers(self):
opts = self.opts
saver = tf.train.Saver(max_to_keep=10)
# tf.add_to_collection('real_points_ph', self.sample_points)
# tf.add_to_collection('noise_ph', self.sample_noise)
# tf.add_to_collection('is_training_ph', self.is_training)
# if self.enc_mean is not None:
# tf.add_to_collection('encoder_mean', self.enc_mean)
# tf.add_to_collection('encoder_var', self.enc_logsigma)
# tf.add_to_collection('encoder', self.encoded_point)
# tf.add_to_collection('decoder', self.decoded)
#tf.add_to_collection('lambda', self.lmbd)
self.saver = saver
def optimizer(self, lr, decay=1.):
opts = self.opts
lr *= decay
if opts['optimizer'] == 'sgd':
return tf.train.GradientDescentOptimizer(lr)
elif opts['optimizer'] == 'adam':
return tf.train.AdamOptimizer(lr, beta1=opts['adam_beta1'])
else:
assert False, 'Unknown optimizer.'
def add_optimizers(self):
opts = self.opts
# SWAE optimizer
lr = opts['lr']
opt = self.optimizer(lr, self.lr_decay)
encoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
decoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
prior_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='prior')
ae_vars = encoder_vars + decoder_vars
#ae_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if opts['clip_grad']:
grad, var = zip(*opt.compute_gradients(loss=self.objective, var_list=ae_vars))
clip_grad, _ = tf.clip_by_global_norm(grad, opts['clip_norm'])
self.swae_opt = opt.apply_gradients(zip(clip_grad, var))
else:
self.swae_opt = opt.minimize(loss=self.objective, var_list=ae_vars)
# Pretraining optimizer
pre_opt = self.optimizer(lr)
self.pre_opt = pre_opt.minimize(loss=self.pre_loss, var_list=encoder_vars+prior_vars)
def encoder(self, input_points, reuse=False):
## Categorical encoding
logit = cat_encoder(self.opts, inputs=input_points, reuse=reuse,
is_training=self.is_training)
## Gaussian encoding
if self.opts['e_means']=='fixed':
eps = tf.zeros([tf.cast(sample_size, dtype=tf.int32), self.opts['nmixtures'],
self.opts['zdim']],dtype=tf.float32)
enc_mean = self.pz_mean + eps
enc_logSigma = self.opts['init_e_std']*tf.ones([
tf.cast(sample_size,dtype=tf.int32),
self.opts['nmixtures'],
self.opts['zdim']],dtype=tf.float32)
elif self.opts['e_means']=='mean':
enc_mean, _ = gaussian_encoder(opts, inputs=input_points, reuse=reuse,
is_training=self.is_training)
enc_logSigma = tf.exp(self.opts['init_e_std'])*tf.ones([
tf.cast(sample_size,dtype=tf.int32),
self.opts['nmixtures'],
self.opts['zdim']],dtype=tf.float32)
elif self.opts['e_means']=='learnable':
enc_mean, enc_logSigma = gaussian_encoder(self.opts,
inputs=input_points,
reuse=reuse,
is_training=self.is_training)
return logit, enc_mean, enc_logSigma
def decoder(self, encoded, reuse=False):
noise = tf.reshape(encoded,[-1,self.opts['zdim']])
recon, log = continuous_decoder(self.opts, noise=noise,
reuse=reuse,
is_training=self.is_training)
reconstructed = tf.reshape(recon,
[-1,self.opts['nmixtures']]+self.data_shape)
logits = tf.reshape(log,
[-1,self.opts['nmixtures']]+self.data_shape)
return reconstructed, logits
def pretrain_loss(self):
# Adding ops to pretrain the encoder so that mean and covariance
# of Qz will try to match those of Pz
l_pre_loss = moments_loss(self.l_sample_mix_noise, self.l_mixtures_encoded)
u_pre_loss = moments_loss(self.u_sample_mix_noise, self.u_mixtures_encoded)
# Loss
self.pre_loss = l_pre_loss + u_pre_loss
def pretrain_encoder(self, data):
opts=self.opts
steps_max = 500
batch_size = opts['e_pretrain_sample_size']
full_train_size = data.num_points
l_train_size = max(int(full_train_size*opts['lu_split']),5)
u_train_size = full_train_size-l_train_size
for step in range(steps_max):
data_ids = np.random.choice(l_train_size,
batch_size,
replace=True)
l_batch_images = data.data[data_ids].astype(np.float32)
l_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
batch_size,
sampling_mode='all_mixtures')
data_ids = l_train_size + np.random.choice(u_train_size,
batch_size,
replace=False)
u_batch_images = data.data[data_ids].astype(np.float32)
u_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
batch_size,
sampling_mode='all_mixtures')
[_, pre_loss] = self.sess.run(
[self.pre_opt, self.pre_loss],
feed_dict={self.l_points: l_batch_images,
self.l_sample_mix_noise: l_batch_mix_noise,
self.u_points: u_batch_images,
self.u_sample_mix_noise: u_batch_mix_noise,
self.is_training: True})
logging.error('Pretraining the encoder done.')
logging.error ('Loss after %d iterations: %.3f' % (steps_max,pre_loss))
def train(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Train MoG model with chosen method
"""
opts = self.opts
if opts['method']=='swae':
logging.error('Training WAE')
elif opts['method']=='vae':
logging.error('Training VAE')
print('')
# Create work_dir
utils.create_dir(opts['method'])
work_dir = os.path.join(opts['method'],opts['work_dir'])
# Split data set
full_train_size = data.num_points
l_train_size = max(int(full_train_size*opts['lu_split']),opts['min_u_size'])
u_train_size = full_train_size-l_train_size
debug_str = 'Total:%d, Unlabelled:%d, Labelled:%d' % (
full_train_size, u_train_size, l_train_size)
logging.error(debug_str)
print('')
# Init sess and load trained weights if needed
if opts['use_trained']:
if not tf.gfile.Exists(WEIGHTS_FILE+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_FILE)
else:
self.sess.run(self.init)
if opts['e_pretrain']:
logging.error('Pretraining the encoder')
self.pretrain_encoder(data)
print('')
batches_num = int(max(l_train_size,u_train_size)/opts['batch_size'])
npics = opts['plot_num_pics']
fixed_noise = sample_pz(opts, self.pz_mean, self.pz_sigma,
opts['plot_num_pics'],
sampling_mode = 'per_mixture')
self.start_time = time.time()
losses, losses_rec, losses_match, losses_xent = [], [], [], []
kl_gau, kl_dis = [], []
decay, alpha_decay = 1., 1.
counter = 0
if opts['method']=='swae':
alpha = opts['alpha']
l_lmbda = opts['l_lambda']
l_beta = opts['l_beta']
u_lmbda = opts['u_lambda']
u_beta = opts['u_beta']
else:
assert False, 'to implement VAE'
wae_lmbda = 1
wait = 0
for epoch in range(opts['epoch_num']):
# Update learning rate if necessary
if epoch == 30:
decay = decay / 2.
if epoch == 50:
decay = decay / 5.
if epoch == 100:
decay = decay / 10.
# Update alpha
if (epoch+1)%5 == 0:
alpha_decay = alpha_decay / 2.
# Save the model
if epoch > 0 and epoch % opts['save_every_epoch'] == 0:
self.saver.save(self.sess, os.path.join(
work_dir,'checkpoints',
'trained-wae'),
global_step=counter)
##### TRAINING LOOP #####
for it in range(batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(l_train_size,
opts['batch_size'],
replace=True)
l_batch_images = data.data[data_ids].astype(np.float32)
l_batch_labels = data.labels[data_ids].astype(np.float32)
l_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
opts['batch_size'],
sampling_mode='all_mixtures')
data_ids = l_train_size + np.random.choice(u_train_size,
opts['batch_size'],
replace=False)
u_batch_images = data.data[data_ids].astype(np.float32)
u_batch_labels = data.labels[data_ids].astype(np.float32)
u_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
opts['batch_size'],
sampling_mode='all_mixtures')
# Feeding dictionary
feed_dict={self.l_points: l_batch_images,
self.l_labels: l_batch_labels,
self.l_sample_mix_noise: l_batch_mix_noise,
self.u_points: u_batch_images,
self.u_sample_mix_noise: u_batch_mix_noise,
self.lr_decay: decay,
self.alpha: alpha,
self.alpha_decay: alpha_decay,
self.l_lmbd: l_lmbda,
self.l_beta: l_beta,
self.u_lmbd: u_lmbda,
self.u_beta: u_beta,
self.is_training: True}
# Update encoder and decoder
if opts['method']=='swae':
outputs = self.sess.run([self.swae_opt, self.objective,
self.l_loss_reconstruct,
self.l_cont_penalty,
self.l_disc_penalty,
self.u_loss_reconstruct,
self.u_cont_penalty,
self.u_disc_penalty,
self.probs],
feed_dict=feed_dict)
loss = outputs[1]
l_loss_rec, l_loss_match, l_loss_xent = outputs[2:5]
u_loss_rec, u_loss_match, u_loss_xent = outputs[5:8]
probs_labels = outputs[-1]
elif opts['method']=='vae':
assert False, 'to implement VAE'
[_, loss, loss_rec, loss_match, enc_mw, kl_g, kl_d] = self.sess.run(
[self.swae_opt,
self.objective,
self.loss_reconstruct,
self.penalty,
self.enc_mixweight,
self.kl_g,
self.kl_d],
feed_dict=feed_dict)
kl_gau.append(kl_g)
kl_dis.append(kl_d)
losses.append(loss)
losses_rec.append([l_loss_rec,u_loss_rec])
losses_match.append([l_loss_match,u_loss_match])
losses_xent.append([l_loss_xent,u_loss_xent])
#mean_probs += get_mean_probs(u_batch_labels,probs_labels) / batches_num
##### TESTING LOOP #####
if counter % opts['print_every'] == 0:
now = time.time()
test_size = np.shape(data.test_data)[0]
te_size = max(int(test_size*0.1),opts['batch_size'])
te_batches_num = int(te_size/opts['batch_size'])
tr_size = test_size - te_size
tr_batches_num = int(tr_size/opts['batch_size'])
# Determine clusters ID
mean_probs = np.zeros((10,10))
for it_ in range(tr_batches_num):
# Sample batches of data points
data_ids = te_size + np.random.choice(tr_size,
opts['batch_size'],
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
batch_labels = data.test_labels[data_ids].astype(np.float32)
probs_train = self.sess.run(self.probs,
feed_dict={self.u_points:batch_images,
self.is_training:False})
mean_prob = get_mean_probs(batch_labels,probs_train)
mean_probs += mean_prob / tr_batches_num
# Determine clusters given mean probs
labelled_clusters = relabelling_mask_from_probs(mean_probs)
# Test accuracy & loss
u_loss_rec_test, l_loss_rec_test = 0., 0.
u_acc_test = 0.
for it_ in range(te_batches_num):
# Sample batches of data points
data_ids = np.random.choice(te_size,
opts['batch_size'],
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
batch_labels = data.test_labels[data_ids].astype(np.float32)
[ulr, llr, probs_test] = self.sess.run(
[self.u_loss_reconstruct,
self.l_loss_reconstruct,
self.probs],
feed_dict={self.l_points:batch_images,
self.l_labels:batch_labels,
self.u_points:batch_images,
self.is_training:False})
# Computing accuracy
u_acc = accuracy(batch_labels, probs_test, labelled_clusters)
u_acc_test += u_acc / te_batches_num
u_loss_rec_test += ulr / te_batches_num
l_loss_rec_test += llr / te_batches_num
# Auto-encoding unlabeled test images
[rec_pics_test, encoded, labeling, probs_pics_test] = self.sess.run(
[self.reconstructed_point,
self.encoded_point,
self.labels_reconstructed,
self.probs],
feed_dict={self.l_points:data.test_data[:npics],
self.u_points:data.test_data[:npics],
self.is_training:False})
pi0 = self.sess.run(self.pi0,feed_dict={})
# Auto-encoding training images
[rec_pics_train, probs_pics_train] = self.sess.run(
[self.reconstructed_point,
self.probs],
feed_dict={self.u_points:data.data[l_train_size:l_train_size+npics],
self.is_training:False})
# Random samples generated by the model
sample_gen = self.sess.run(self.decoded,
feed_dict={self.u_points:data.data[l_train_size:l_train_size+npics],
self.sample_noise: fixed_noise,
self.is_training: False})
# Printing various loss values
debug_str = 'EPOCH: %d/%d, BATCH:%d/%d' % (
epoch + 1, opts['epoch_num'],
it + 1, batches_num)
logging.error(debug_str)
debug_str = 'TRAIN LOSS=%.3f, TEST ACC=%.2f' % (
losses[-1],
100*u_acc_test)
logging.error(debug_str)
debug_str = 'TEST REC(L/U)=%.3f/%.3f, TRAIN REC(L/U)=%.3f/%.3f' % (
l_loss_rec_test,
#opts['alpha']*alpha_decay*l_loss_rec_test,
u_loss_rec_test,
losses_rec[-1][0],
#opts['alpha']*losses_rec[-1][0],
losses_rec[-1][1])
logging.error(debug_str)
debug_str = 'MATCH(L/U)=%.3f/%.3f, XENT(L/U)=%.3f/%.3f' % (
opts['l_lambda']*losses_match[-1][0],
#opts['l_lambda']*opts['alpha']*losses_match[-1][0],
opts['u_lambda']*losses_match[-1][1],
opts['l_beta']*losses_xent[-1][0],
#opts['l_beta']*opts['alpha']*alpha_decay*losses_xent[-1][0],
opts['u_beta']*losses_xent[-1][1])
logging.error(debug_str)
debug_str = 'Clusters ID: %s' % (str(labelled_clusters))
logging.error(debug_str)
labs = np.argmax(labeling,axis=-1)
debug_str = 'Labelling: %s' % (str(labs))
logging.error(debug_str)
debug_str = 'Priors: %s' % (np.array2string(pi0,precision=3))
logging.error(debug_str)
print('')
# Making plots
#logging.error('Saving images..')
save_train(opts, data.data[:npics], data.test_data[:npics], # images
data.test_labels[:npics], # labels
rec_pics_test[:npics], rec_pics_test[:npics], # reconstructions
probs_pics_train, probs_pics_test, # mixweights
encoded, # encoded points
fixed_noise, # prior samples
sample_gen, # samples
losses, losses_rec, losses_match, losses_xent, # loses
kl_gau, kl_dis, # KL terms
work_dir, # working directory
'res_e%04d_mb%05d.png' % (epoch, it)) # filename
# Update learning rate if necessary and counter
# First 30 epochs do nothing
if epoch >= 30:
# If no significant progress was made in last 10 epochs
# then decrease the learning rate.
if loss < min(losses[-20 * batches_num:]):
wait = 0
else:
wait += 1
if wait > 10 * batches_num:
decay = max(decay / 1.4, 1e-6)
logging.error('Reduction in lr: %f' % decay)
wait = 0
counter += 1
# # Save the final model
# if epoch > 0:
# self.saver.save(self.sess,
# os.path.join(work_dir,
# 'checkpoints',
# 'trained-wae-final'),
# global_step=counter)
def test(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Test trained MoG model with chosen method
"""
opts = self.opts
# Load trained weights
MODEL_PATH = os.path.join(opts['method'],MODEL_DIR)
if not tf.gfile.IsDirectory(MODEL_PATH):
raise Exception("model doesn't exist")
WEIGHTS_PATH = os.path.join(MODEL_PATH,'checkpoints',WEIGHTS_FILE)
if not tf.gfile.Exists(WEIGHTS_PATH+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_PATH)
# Set up
batch_size = 100
tr_batches_num = int(data.num_points / batch_size)
train_size = data.num_points
te_batches_num = int(np.shape(data.test_data)[0] / batch_size)
test_size = np.shape(data.test_data)[0]
debug_str = 'test data size: %d' % (np.shape(data.test_data)[0])
logging.error(debug_str)
### Compute probs
# Iterate over batches
logging.error('Determining clusters ID using training..')
mean_probs = np.zeros((10,10))
for it in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
opts['batch_size'],
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
batch_labels = data.labels[data_ids].astype(np.float32)
prob = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
mean_prob = get_mean_probs(batch_labels,prob)
mean_probs += mean_prob / tr_batches_num
# Determine clusters given mean probs
labelled_clusters = relabelling_mask_from_probs(mean_probs)
logging.error('Clusters ID:')
print(labelled_clusters)
### Accuracy
logging.error('Computing losses & accuracy..')
# Training accuracy & loss
acc_tr = 0.
loss_rec_tr, loss_match_tr = 0., 0.
for it in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
batch_size,
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
batch_labels = data.labels[data_ids].astype(np.float32)
# Accuracy
probs = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
acc = accuracy(batch_labels,probs,labelled_clusters)
acc_tr += acc / tr_batches_num
# loss
batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_cov,
opts['batch_size'],
sampling_mode='all_mixtures')
[loss_rec, loss_match] = self.sess.run(
[self.loss_reconstruct,
self.penalty],
feed_dict={self.sample_points: batch_images,
self.sample_mix_noise: batch_mix_noise,
self.is_training: False})
loss_rec_tr += loss_rec / tr_batches_num
loss_match_tr += loss_match / tr_batches_num
# Testing acc
acc_te = 0.
loss_rec_te, loss_match_te = 0., 0.
for it in range(te_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(test_size,
batch_size,
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
batch_labels = data.test_labels[data_ids].astype(np.float32)
# Accuracy
probs = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
acc = accuracy(batch_labels,probs,labelled_clusters)
acc_te += acc / te_batches_num
# Testing loss
batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_cov,
batch_size,
sampling_mode='all_mixtures')
[loss_rec, loss_match] = self.sess.run(
[self.loss_reconstruct,
self.penalty],
feed_dict={self.sample_points: batch_images,
self.sample_mix_noise: batch_mix_noise,
self.is_training: False})
loss_rec_te += loss_rec / te_batches_num
loss_match_te += loss_match / te_batches_num
### Logs
debug_str = 'rec train: %.4f, rec test: %.4f' % (loss_rec_tr,
loss_rec_te)
logging.error(debug_str)
debug_str = 'match train: %.4f, match test: %.4f' % (loss_match_tr,
loss_match_te)
logging.error(debug_str)
debug_str = 'acc train: %.2f, acc test: %.2f' % (100.*acc_tr,
100.*acc_te)
logging.error(debug_str)
### Saving
filename = 'res_test'
res_test = np.array((loss_rec_tr, loss_rec_te,
loss_match_tr, loss_match_te,
acc_tr, acc_te))
np.save(os.path.join(MODEL_PATH,filename),res_test)
def reg(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Trained a logistic regression on the trained MoG model
"""
opts = self.opts
# Load trained weights
MODEL_PATH = os.path.join(opts['method'],MODEL_DIR)
if not tf.gfile.IsDirectory(MODEL_PATH):
raise Exception("model doesn't exist")
WEIGHTS_PATH = os.path.join(MODEL_PATH,'checkpoints',WEIGHTS_FILE)
if not tf.gfile.Exists(WEIGHTS_PATH+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_PATH)
# set up
epoch_num = 20
print_every = 2
batch_size = 100
tr_batches_num = int(data.num_points / batch_size)
train_size = data.num_points
te_batches_num = int(np.shape(data.test_data)[0] / batch_size)
test_size = np.shape(data.test_data)[0]
lr = 0.001
### Logistic regression model
# Construct model
linear_layer = ops.linear(opts, self.preds, 10, scope='log_reg')
logreg_preds = tf.nn.softmax(linear_layer) # Softmax
# Minimize error using cross entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y*tf.log(logreg_preds), reduction_indices=1))
# Accuracy
correct_prediction = tf.equal(tf.argmax(logreg_preds, 1),tf.argmax(self.y, 1))
acc = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
### Optimizer
opt = tf.train.GradientDescentOptimizer(lr)
logreg_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='log_reg')
logreg_opt = opt.minimize(loss=cross_entropy, var_list=logreg_vars)
for var in logreg_vars:
self.sess.run(var.initializer)
### Training loop
costs, acc_train, acc_test = [], [], []
counter = 0
logging.error('Start training..')
self.start_time = time.time()
for epoch in range(epoch_num):
cost = 0.
# Iterate over batches
for it_ in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
batch_size,
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
# Get preds
preds = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
# linear reg
batch_labels = one_hot(data.labels[data_ids])
[_ , c] = self.sess.run([logreg_opt,cross_entropy],
feed_dict={self.preds: preds,
self.y: batch_labels})
cost += c / tr_batches_num
costs.append(cost)
counter += 1
if counter==1 or counter % print_every == 0:
# Testing and logging info
acc_tr, acc_te = 0., 0.
# Training Acc
for it in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
batch_size,
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
preds = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
batch_labels = one_hot(data.labels[data_ids])
a = self.sess.run(acc,
feed_dict={self.preds: preds,
self.y: batch_labels})
acc_tr += a/ tr_batches_num
# Testing Acc
for it in range(te_batches_num):
data_ids = np.random.choice(test_size,
batch_size,
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
preds = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
batch_labels = one_hot(data.test_labels[data_ids])
a = self.sess.run(acc,
feed_dict={self.preds: preds,
self.y: batch_labels})
acc_te += a/ te_batches_num
acc_train.append(acc_tr)
acc_test.append(acc_te)
# logs
debug_str = 'EPOCH: %d/%d, BATCH:%d/%d' % (
epoch + 1, epoch_num,
it_ + 1, tr_batches_num)
logging.error(debug_str)
debug_str = 'cost=%.3f, TRAIN ACC=%.2f, TEST ACC=%.2f' % (
costs[-1], 100*acc_tr, 100*acc_te)
logging.error(debug_str)
### Saving
filename = 'logreg'
xstep = int(len(costs)/100)
np.savez(os.path.join(MODEL_PATH,filename),
costs=np.array(costs[::xstep]),
acc_tr=np.array(acc_train),
acc_te= | np.array(acc_test) | numpy.array |
from collections import namedtuple
from scipy.stats import norm
import numpy as np
import pysubgroup as ps
beta_tuple = namedtuple('beta_tuple',['beta','size'])
class EMM_Likelihood(ps.AbstractInterestingnessMeasure):
tpl=namedtuple('EMM_Likelihood', ['model_params','subgroup_likelihood','inverse_likelihood','size'])
def __init__(self, model):
self.model = model
self.has_constant_statistics = False
self.required_stat_attrs = EMM_Likelihood.tpl._fields
def calculate_constant_statistics(self, task):
self.model.calculate_constant_statistics(task)
self.data_size = len(task.data)
self.has_constant_statistics = True
def calculate_statistics(self, subgroup, data=None):
if hasattr(subgroup, "__array_interface__"):
cover_arr = subgroup
else:
cover_arr = subgroup.covers(data)
sg_size = np.count_nonzero(cover_arr)
params = self.model.fit(cover_arr, data)
return self.get_tuple(sg_size, params, cover_arr)
def get_tuple(self, sg_size, params, cover_arr):
#numeric stability?
all_likelihood = self.model.likelihood(params, np.ones(self.data_size,dtype=bool))
sg_likelihood_sum = np.sum(all_likelihood[cover_arr])
total_likelihood_sum = | np.sum(all_likelihood) | numpy.sum |
import numpy as np
from skimage.transform import pyramid_gaussian
from lv import get_contour_points, area2cont, cont2area, interpolate_contour
def window_image(img, cent_point, window):
y0 = int(np.round(cent_point[0]) - window // 2)
y1 = int(np.round(cent_point[0]) + window // 2 + 1)
x0 = int(np.round(cent_point[1]) - window // 2)
x1 = int(np.round(cent_point[1]) + window // 2 + 1)
if x0 < 0:
x0 = 0
if y0 < 0:
y0 = 0
if y1 > img.shape[0]:
y1 = img.shape[0]
if x1 > img.shape[1]:
x1 = img.shape[1]
img = img[y0:y1, x0:x1]
if img.shape[0] != window:
if y0 == 0:
img = np.concatenate((np.zeros((window - img.shape[0], img.shape[1])), img), axis=0)
elif y1 == img.shape[0]:
img = np.concatenate((img, | np.zeros((window - img.shape[0], img.shape[1])) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 07 12:20:10 2018
@author: sarth
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import segmentation
import cv2
def convert_to_grayscale(image):
"""
Converting the image to grayscale -
where minimum pixel value is 0.0 and maximum pixel value is 1.0
Args:
image: image to be processed, numpy array
Returns:
numpy array
Raises:
Errors when input type is wrong
"""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')
# converting to grayscale
dst = np.zeros(image.shape)
image_gray = cv2.normalize(image, dst, 0.0, 1.0, cv2.NORM_MINMAX)
return image_gray
def seg_random_walker(image, marker_threshold):
"""
Segment image into two regions using skimage random walker
segmentation algorithm. Input image must be gray-scale.
This function will provide segmented image and marker positions
as the output.
Args:
image: image to be processed, numpy array
marker_threshold: threshold for segmentation, float
Returns:
numpy array
Raises:
Errors when input type is wrong
Error when marker_threshold is out of the range of 0 and 1
"""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong image data type', 'image must be a numpy array')
# Checking that the input image is grayscale and not RGB or something else
assert np.max(image) == 1.0 or 1, ('Wrong input image type', 'image must be grayscale')
assert | np.min(image) | numpy.min |
"""
========================================================================
PYTHON SCRIPT ACCOMPANYING:
W.EDELING, <NAME>,
"Reducing data-driven dynamical subgrid scale models
by physical constraints"
COMPUTERS & FLUIDS, 2019.
========================================================================
"""
######################
# SOLVER SUBROUTINES #
######################
#pseudo-spectral technique to solve for Fourier coefs of Jacobian
def compute_VgradW_hat(w_hat_n, P, kx, ky, k_squared_no_zero):
#compute streamfunction
psi_hat_n = w_hat_n/k_squared_no_zero
psi_hat_n[0,0] = 0.0
#compute jacobian in physical space
u_n = np.fft.ifft2(-ky*psi_hat_n).real
w_x_n = np.fft.ifft2(kx*w_hat_n).real
v_n = np.fft.ifft2(kx*psi_hat_n).real
w_y_n = np.fft.ifft2(ky*w_hat_n).real
VgradW_n = u_n*w_x_n + v_n*w_y_n
#return to spectral space
VgradW_hat_n = np.fft.fft2(VgradW_n)
VgradW_hat_n *= P
return VgradW_hat_n
#get Fourier coefficient of the vorticity at next (n+1) time step
def get_w_hat_np1(w_hat_n, w_hat_nm1, VgradW_hat_nm1, P, norm_factor, kx, ky, k_squared_no_zero, F_hat, sgs_hat = 0.0):
#compute jacobian
VgradW_hat_n = compute_VgradW_hat(w_hat_n, P, kx, ky, k_squared_no_zero)
#solve for next time step according to AB/BDI2 scheme
w_hat_np1 = norm_factor*P*(2.0/dt*w_hat_n - 1.0/(2.0*dt)*w_hat_nm1 - \
2.0*VgradW_hat_n + VgradW_hat_nm1 + mu*F_hat - sgs_hat)
return w_hat_np1, VgradW_hat_n
#compute spectral filter
def get_P(cutoff, N):
#frequencies of fft2
k = np.fft.fftfreq(N)*N
kx = np.zeros([N, N]) + 0.0j
ky = np.zeros([N, N]) + 0.0j
for i in range(N):
for j in range(N):
kx[i, j] = 1j*k[j]
ky[i, j] = 1j*k[i]
P = np.ones([N, N])
for i in range(N):
for j in range(N):
if np.abs(kx[i, j]) > cutoff or np.abs(ky[i, j]) > cutoff:
P[i, j] = 0.0
return P, k, kx, ky
def get_P_k(k_min, k_max, N, binnumbers):
P_k = np.zeros([N, N])
idx0, idx1 = | np.where((binnumbers >= k_min) & (binnumbers <= k_max)) | numpy.where |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=line-too-long
"""Build UVFA with state observation for state input."""
from __future__ import absolute_import
from __future__ import division
import random
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from hal.agent.tf2_utils import soft_variables_update
from hal.agent.tf2_utils import stack_conv_layer
from hal.agent.tf2_utils import stack_dense_layer
from hal.agent.tf2_utils import vector_tensor_product
import hal.utils.word_vectorization as wv
embedding_group_1 = ('swivel', 'nnlm', 'word2vec')
class StateUVFA2:
"""UVFA that uses the ground truth states.
Attributes:
cfg: configuration file
tokenizer: tokenizer for the agent
saver: saver in charge of creating checkpoints
manager: tf2 checkpoint manager
global_step: current learning step the agent is at
vocab_list: a list of vocabulary the agent knows
vocab_to_int: a table from vocabulary to integer
int_to_vocab: a table from integer to vocabulary
decode_fn: a function that converts tokens to text
online_encoder: online model of the instruction encoder
target_encoder: target model of the instruction encoder
online_embedding_layer: online embedding matrix
target_embedding_layer: target embedding matrix
online_models: online model of the policy network
target_models: target model of the policy network
optimizer: optimizer the optimizes the agent
"""
def __init__(self, cfg):
self.cfg = cfg
self.tokenizer = None # some settings do not have a tokenizer
self._build()
if 'model_dir' in cfg.as_dict():
self.saver = tf.train.Checkpoint(
optimizer=self.optimizer,
online_encoder=self.online_encoder,
online_model=self.online_models,
target_encoder=self.target_encoder,
target_model=self.target_models,
step=self.global_step)
self.manager = tf.train.CheckpointManager(
self.saver, cfg.model_dir, max_to_keep=5, checkpoint_name='model')
def _build(self):
self.global_step = tf.Variable(0, name='global_step')
self.create_models(self.cfg)
self.vocab_list = self.cfg.vocab_list
self.vocab_to_int, self.int_to_vocab = wv.create_look_up_table(
self.vocab_list)
self.decode_fn = wv.decode_with_lookup_table(self.int_to_vocab)
def create_models(self, cfg):
"""Build the computation graph for the agent."""
online_encoder_out = create_instruction_encoder(cfg, name='online_encoder')
target_encoder_out = create_instruction_encoder(cfg, name='target_encoder')
self.online_encoder = online_encoder_out['encoder']
self.target_encoder = target_encoder_out['encoder']
self.online_embedding_layer = online_encoder_out['token_embedding']
self.target_embedding_layer = target_encoder_out['token_embedding']
embedding_length = online_encoder_out['instruction_embedding_length']
if self.cfg.action_type == 'perfect':
self.online_models = self.build_q_perfect(cfg, 'online_network',
embedding_length)
self.target_models = self.build_q_perfect(cfg, 'target_network',
embedding_length)
elif self.cfg.action_type == 'discrete':
self.online_models = self.build_q_discrete(cfg, 'online_network',
embedding_length)
self.target_models = self.build_q_discrete(cfg, 'target_network',
embedding_length)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.learning_rate)
def _preprocess_instruction(self, g):
"""Pre-process instructions for agent consumption."""
if isinstance(g, str):
return tf.convert_to_tensor(np.array(g.split()))
if len(g.shape) < 2: # should expand 0th axis
g = np.expand_dims(np.array(g), 0)
if self.cfg.instruction_repr != 'language':
return tf.convert_to_tensor(g)
if self.cfg.embedding_type in embedding_group_1:
original_shape = g.shape
g = np.reshape(g, -1)
tokens = []
for i in g:
tokens.append(self.int_to_vocab[i])
return tf.convert_to_tensor( | np.array(tokens) | numpy.array |
import gym
import numpy as np
import time, math, random, bisect, copy
class NeuralNetwork :
def __init__(self, structure):
self.structure = structure
self.weights = []
self.biases = []
self.fitness = 0.0
for i in range(len(structure) - 1):
self.weights.append( | np.random.uniform(low=-1, high=1, size=(structure[i], structure[i+1])) | numpy.random.uniform |
import numpy as np
class EigenModes(object):
def __init__(self, A):
self.vec, self.val = | np.linalg.eig(A) | numpy.linalg.eig |
# Standard Libraries
import numpy as np
import random
# PyXtal imports
from pyxtal.msg import VolumeError
from pyxtal.operations import angle, create_matrix
from pyxtal.constants import deg, rad, ltype_keywords
class Lattice:
"""
Class for storing and generating crystal lattices. Allows for
specification of constraint values. Lattice types include triclinic,
monoclinic, orthorhombic, tetragonal, trigonal, hexagonal, cubic,
spherical, and ellipsoidal. The last two are used for generating point
group structures, and do not actually represent a parallelepiped lattice.
Args:
ltype: a string representing the type of lattice (from the above list)
volume: the volume, in Angstroms cubed, of the lattice
matrix: matrix in 3*3 form
PBC: A periodic boundary condition list, where 1 is periodic,
Ex: [1, 1, 1] -> 3d periodicity, [0, 0, 1] -> periodic at z axis
kwargs: various values which may be defined. If none are defined,
random ones will be generated. Values will be passed to
generate_lattice. Options include:
area: The cross-sectional area (in Ang^2). Only for 1D crystals
thickness: The cell's thickness (in Angstroms) for 2D crystals
unique_axis: The unique axis for certain symmetry (and especially
layer) groups. Because the symmetry operations are not also
transformed, you should use the default values for random
crystal generation
random: If False, keeps the stored values for the lattice geometry
even upon applying reset_matrix. To alter the matrix, use
set_matrix() or set_para
'unique_axis': the axis ('a', 'b', or 'c') which is not symmetrically
equivalent to the other two
'min_l': the smallest allowed cell vector. The smallest vector must
be larger than this.
'mid_l': the second smallest allowed cell vector. The second
smallest vector must be larger than this.
'max_l': the third smallest allowed cell vector. The largest cell
vector must be larger than this.
'allow_volume_reset': a bool stating whether or not the volume
should be reset during each crystal generation attempt
"""
def __init__(self, ltype, volume=None, matrix=None, PBC=[1, 1, 1], **kwargs):
# Set required parameters
if ltype in ltype_keywords:
self.ltype = ltype.lower()
elif ltype == None:
self.ltype = "triclinic"
else:
msg = "Invalid lattice type: " + ltype
raise ValueError(msg)
self.volume = float(volume)
self.PBC = PBC
self.dim = sum(PBC)
self.kwargs = {}
self.random = True
# Set optional values
self.allow_volume_reset = True
for key, value in kwargs.items():
if key in [
"area",
"thickness",
"unique_axis",
"random",
"min_l",
"mid_l",
"max_l",
]:
setattr(self, key, value)
self.kwargs[key] = value
if key == "allow_volume_reset":
if value == False:
self.allow_volume_reset = False
if not hasattr(self, 'unique_axis'):
self.unique_axis = "c"
# Set stress normalization info
if self.ltype == "triclinic":
norm_matrix = np.ones([3, 3])
elif self.ltype == "monoclinic":
if self.PBC == [1, 1, 1]:
norm_matrix = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 1]])
else:
if self.unique_axis == "a":
norm_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 1, 1]])
elif self.unique_axis == "b":
norm_matrix = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 1]])
elif self.unique_axis == "c":
norm_matrix = np.array([[1, 0, 0], [1, 1, 0], [0, 0, 1]])
elif self.ltype in ["orthorhombic", "tetragonal", "trigonal", "hexagonal", "cubic"]:
norm_matrix = np.eye(3)
elif self.ltype in ["spherical", "ellipsoidal"]:
norm_matrix = np.zeros([3, 3])
self.stress_normalization_matrix = norm_matrix
# Set info for on-diagonal stress symmetrization
if self.ltype in ["tetragonal", "trigonal", "hexagonal"]:
self.stress_indices = [(0, 0), (1, 1)]
elif self.ltype in ["cubic"]:
self.stress_indices = [(0, 0), (1, 1), (2, 2)]
else:
self.stress_indices = []
# Set values for the matrix
if matrix is None:
self.reset_matrix()
else:
self.set_matrix(matrix)
# Set tolerance
if self.ltype in ["triclinic"]:
self.a_tol = 15.0
else:
self.a_tol = 9.9
self._get_dof()
def _get_dof(self):
"""
get the number of degree of freedom
"""
if self.ltype in ["triclinic"]:
self.dof = 6
elif self.ltype in ["monoclinic"]:
self.dof = 4
elif self.ltype in ['orthorhombic']:
self.dof = 3
elif self.ltype in ['tetragonal', 'hexagonal', 'trigonal']:
self.dof = 2
else:
self.dof = 1
def copy(self):
"""
simply copy the structure
"""
from copy import deepcopy
return deepcopy(self)
def get_lengths(self):
mat = create_matrix(self.PBC, True)
mat = np.dot(mat, self.matrix)
return mat, np.linalg.norm(mat, axis=1)
def get_permutation_matrices(self):
"""
Return the possible permutation matrices that donot violate the symmetry
"""
if self.ltype in ["monoclinic"]: #permutation between a and c
return np.array([
[[1,0,0],[0,1,0],[0,0,1]], #self
[[0,0,1],[0,1,0],[1,0,0]], #a-c
])
elif self.ltype in ["triclinic"]:
return np.array([
[[1,0,0],[0,1,0],[0,0,1]], #self
[[1,0,0],[0,0,1],[0,1,0]], #b-c
[[0,0,1],[0,1,0],[1,0,0]], #a-c
[[0,1,0],[1,0,0],[0,0,1]], #a-b
])
else:
return [np.eye(3)]
def get_transformation_matrices(self):
"""
Return the possible transformation matrices that donot violate the symmetry
"""
if self.ltype in ["monoclinic"]:
return np.array([
[[1,0,0],[0,1,0],[0,0,1]],
[[1,0,0],[0,1,0],[1,0,1]],
[[1,0,0],[0,1,0],[-1,0,1]],
[[1,0,1],[0,1,0],[0,0,1]],
[[1,0,-1],[0,1,0],[0,0,1]],
[[1,0,0],[0,-1,0],[0,0,-1]], #change angle
#[[-1,0,0],[0,1,0],[0,0,1]], #change angle
])
elif self.ltype in ["triclinic"]:
return np.array([
[[1,0,0],[0,1,0],[0,0,1]],
[[1,0,0],[0,1,0],[1,0,1]],
[[1,0,0],[0,1,0],[-1,0,1]],
[[1,0,1],[0,1,0],[0,0,1]],
[[1,0,-1],[0,1,0],[0,0,1]],
[[1,0,0],[0,1,0],[0,1,1]],
[[1,0,0],[0,1,1],[0,0,1]],
[[1,0,0],[0,1,0],[0,-1,1]],
[[1,0,0],[0,1,-1],[0,0,1]],
[[1,1,0],[0,1,0],[0,0,1]],
[[1,-1,0],[0,1,0],[0,0,1]],
[[1,0,0],[1,1,0],[0,0,1]],
[[1,0,0],[-1,1,0],[0,0,1]],
#[[-1,0,0],[0,-1,0],[0,0,1]],
#[[1,0,0],[0,-1,0],[0,0,-1]],
#[[-1,0,0],[0,1,0],[0,0,-1]],
[[-1,0,0],[0,1,0],[0,0,1]],
[[1,0,0],[0,-1,0],[0,0,1]],
[[1,0,0],[0,1,0],[0,0,-1]],
])
else:
return [np.eye(3)]
def search_transformations(self, lat_ref, d_tol=1.0, f_tol=0.1):
"""
search the closest match to the reference lattice object
Args:
lat_ref: reference lattice object
d_tol: tolerance in angle
f_tol:
a_tol:
Returns:
a two steps of transformation matrix if the match is possible
"""
#Find all possible permutation and transformation matrices
trans1 = self.get_permutation_matrices()
trans2 = self.get_transformation_matrices()
tols = np.zeros([len(trans2)*len(trans1), 3])
trans = []
switchs = []
count = 0
for i, tran1 in enumerate(trans1):
lat0 = self.transform(tran1)
for j, tran2 in enumerate(trans2):
tmp = np.dot(tran2, lat0.matrix)
try:
#print("Start", np.linalg.det(tmp))
lat2 = Lattice.from_matrix(tmp, ltype=self.ltype)
#print("End", np.linalg.det(lat2.matrix))
d_tol1, f_tol1, a_tol1, switch = lat2.get_diff(lat_ref)
#print(d_tol1, f_tol1, a_tol1, switch)
except:
d_tol1, f_tol1, a_tol1, switch = 10, 1.0, 90, None
tols[count] = [d_tol1, f_tol1, a_tol1]
trans.append([tran1, tran2])
switchs.append(switch)
count += 1
trans_good = []
tols_good = []
for id in range(len(tols)):
if (tols[id, 0] < d_tol or tols[id, 1] < f_tol) and tols[id, 2] < self.a_tol:
if switchs[id]:
trans[id].extend([[[1,0,0],[0,-1,0],[0,0,-1]]])
#print(tols[id], len(trans[id]))
trans_good.append(trans[id])
tols_good.append(tols[id])
return trans_good, tols_good
def search_transformation(self, lat_ref, d_tol=1.0, f_tol=0.1):
"""
search the closest match to the reference lattice object
Args:
lat_ref: reference lattice object
d_tol: tolerance in angle
f_tol:
a_tol:
Returns:
a two steps of transformation matrix if the match is possible
"""
#Find all possible permutation and transformation matrices
trans1 = self.get_permutation_matrices()
trans2 = self.get_transformation_matrices()
tols = np.zeros([len(trans2)*len(trans1)+1, 3])
trans = []
switchs = []
#Check it self
d_tol1, f_tol1, a_tol1, switch = self.get_diff(lat_ref)
tols[0] = [d_tol1, f_tol1, a_tol1]
switchs.append(switch)
trans.append([np.eye(3)])
count = 0
for i, tran1 in enumerate(trans1):
lat0 = self.transform(tran1)
for j, tran2 in enumerate(trans2):
count += 1
tmp = np.dot(tran2, lat0.matrix)
try:
#print(i, j, self.ltype)
lat2 = Lattice.from_matrix(tmp, ltype=self.ltype)
d_tol1, f_tol1, a_tol1, switch = lat2.get_diff(lat_ref)
#print(d_tol1, f_tol1, a_tol1, switch)
except:
d_tol1, f_tol1, a_tol1, switch = 10, 1.0, 90, None
tols[count] = [d_tol1, f_tol1, a_tol1]
trans.append([tran1, tran2])
switchs.append(switch)
# QZ: needs to figure out a better way to select the best
rms = tols.sum(axis=1)
ids = np.argsort(rms)
id = ids[0]
#print(tols, rms)
#print(id, switchs[id])
if abs(rms[ids[0]] - rms[ids[1]]) < 1e-3:
if switchs[ids[0]] and not switchs[ids[1]]:
id = ids[1]
#print("change id 1", id)
if id != 0:
if abs(rms[0] - rms[id]) < 1.0:
#print("change id 2", id, rms[0], rms[id])
id = 0
if (tols[id, 0] < d_tol or tols[id, 1] < f_tol) and tols[id, 2] < self.a_tol:
if switchs[id]:
trans[id].append([[1,0,0],[0,-1,0],[0,0,-1]])
return trans[id], tols[id]
else:
return trans[id], tols[id]
else:
#print("=============================================Cannot match:", tols[id])
return None, None
def optimize_once(self, reset=False):
"""
Optimize the lattice's inclination angles
"""
opt = False
trans = self.get_transformation_matrices()
if len(trans) > 1:
diffs = []
for tran in trans:
cell_new = np.dot(tran, self.matrix)
try:
lat_new = Lattice.from_matrix(cell_new, ltype=self.ltype)
diffs.append(lat_new.get_worst_angle())
except:
diffs.append(100)
id = np.array(diffs).argmin()
if id > 0 and diffs[id] < diffs[0] - 0.01:
opt = True
tran = trans[id]
cell = np.dot(tran, self.matrix)
lat = Lattice.from_matrix(cell, ltype=self.ltype, reset=reset)
return lat, tran, opt
return self, np.eye(3), opt
def get_worst_angle(self):
"""
return the worst inclination angle difference w.r.t 90 degree
"""
return np.max(abs(np.array([self.alpha, self.beta, self.gamma])-np.pi/2))
def optimize_multi(self, iterations=5):
"""
Optimize the lattice if the cell has a bad inclination angles
Args:
iterations: maximum number of iterations
force: whether or not do the early termination
Returns:
the optimized lattice
"""
lattice = self
trans_matrices = []
for i in range(iterations):
lattice, trans, opt = lattice.optimize_once(reset=True)
if opt:
trans_matrices.append(trans)
else:
break
return lattice, trans_matrices
def standardize(self):
"""
Force the angle to be smaller than 90 degree
"""
change = False
if self.ltype in ["monoclinic"]:
if self.beta > np.pi/2:
self.beta = np.pi - self.beta
change = True
elif self.ltype in ["triclinic"]:
if self.alpha > np.pi/2:
self.alpha = np.pi - self.alpha
change = True
if self.beta > np.pi/2:
self.beta = np.pi - self.beta
change = True
if self.gamma > np.pi/2:
self.gamma = np.pi - self.gamma
change = True
if change:
para = (self.a, self.b, self.c, self.alpha, self.beta, self.gamma)
self.matrix = para2matrix(para)
def transform(self, trans_mat=np.eye(3), reset=False):
"""
Optimize the lattice's inclination angles
If reset is False, may return negative lattice
"""
if type(trans_mat) == list:
trans_mat = np.array(trans_mat)
cell = np.dot(trans_mat, self.matrix)
return Lattice.from_matrix(cell, ltype=self.ltype, reset=reset)
def transform_multi(self, trans, reset=True):
"""
Optimize the lattice's inclination angles
"""
lat = self
for tran in trans:
lat = lat.transform(tran, reset)
return lat
def encode(self):
a, b, c, alpha, beta, gamma = self.get_para(degree=True)
if self.ltype in ['cubic']:
return [a]
elif self.ltype in ['hexagonal', 'trigonal', 'tetragonal']:
return [a, c]
elif self.ltype in ['orthorhombic']:
return [a, b, c]
elif self.ltype in ['monoclinic']:
return [a, b, c, beta]
else:
return [a, b, c, alpha, beta, gamma]
def mutate(self, degree=0.20, frozen=False):
"""
mutate the lattice object
"""
rand = 1 + degree*(np.random.sample(6)-0.5)
a0, b0, c0, alpha0, beta0, gamma0 = self.get_para()
a = a0*rand[0]
b = b0*rand[1]
c = c0*rand[2]
alpha = np.degrees(alpha0*rand[3])
beta = np.degrees(beta0*rand[4])
gamma = np.degrees(gamma0*rand[5])
ltype = self.ltype
if self.ltype in ['cubic']:
if frozen:
lat = Lattice.from_para(a0, a0, a0, 90, 90, 90, ltype=ltype)
else:
lat = Lattice.from_para(a, a, a, 90, 90, 90, ltype=ltype)
elif ltype in ['hexagonal', 'trigonal']:
if frozen:
lat = Lattice.from_para(a0, a0, c, 90, 90, 120, ltype=ltype)
else:
lat = Lattice.from_para(a, a, c, 90, 90, 120, ltype=ltype)
elif ltype in ['tetragonal']:
if frozen:
lat = Lattice.from_para(a0, a0, c, 90, 90, 90, ltype=ltype)
else:
lat = Lattice.from_para(a, a, c, 90, 90, 90, ltype=ltype)
elif ltype in ['orthorhombic']:
lat = Lattice.from_para(a, b, c, 90, 90, 90, ltype=ltype)
elif ltype in ['monoclinic']:
lat = Lattice.from_para(a, b, c, 90, beta, 90, ltype=ltype)
elif ltype in ['triclinic']:
lat = Lattice.from_para(a, b, c, alpha, beta, gamma, ltype=ltype)
else:
raise ValueError("ltype {:s} is not supported".format(ltype))
return lat
def generate_para(self):
if self.dim == 3:
return generate_lattice(self.ltype, self.volume, **self.kwargs)
elif self.dim == 2:
return generate_lattice_2D(self.ltype, self.volume, **self.kwargs)
elif self.dim == 1:
return generate_lattice_1D(self.ltype, self.volume, **self.kwargs)
elif self.dim == 0:
return generate_lattice_0D(self.ltype, self.volume, **self.kwargs)
def generate_matrix(self):
"""
Generates a 3x3 matrix for the lattice based on the lattice type and volume
"""
# Try multiple times in case of failure
for i in range(10):
para = self.generate_para()
if para is not None:
return para2matrix(para)
def get_matrix(self, shape='upper'):
"""
Returns a 3x3 numpy array representing the lattice vectors.
"""
return self.matrix
def get_para(self, degree=False):
"""
Returns a tuple of lattice parameters.
"""
if degree:
return (self.a, self.b, self.c, deg*self.alpha, deg*self.beta, deg*self.gamma)
else:
return (self.a, self.b, self.c, self.alpha, self.beta, self.gamma)
def set_matrix(self, matrix=None):
if matrix is not None:
m = np.array(matrix)
if np.shape(m) == (3, 3):
self.matrix = m
self.inv_matrix = np.linalg.inv(m)
else:
print(matrix)
msg = "Error: matrix must be a 3x3 numpy array or list"
raise ValueError(msg)
else:
self.reset_matrix()
para = matrix2para(self.matrix)
self.a, self.b, self.c, self.alpha, self.beta, self.gamma = para
self.volume = np.linalg.det(self.matrix)
def set_para(self, para=None, radians=False):
if para is not None:
if radians is False:
para[3] *= rad
para[4] *= rad
para[5] *= rad
self.set_matrix(para2matrix(para))
else:
self.set_matrix()
def reset_matrix(self, shape='upper'):
if self.random:
success = False
for i in range(5):
m = self.generate_matrix()
if m is not None:
self.matrix = m
self.inv_matrix = np.linalg.inv(m)
[a, b, c, alpha, beta, gamma] = matrix2para(self.matrix)
self.a = a
self.b = b
self.c = c
self.alpha = alpha
self.beta = beta
self.gamma = gamma
success = True
break
if not success:
msg = "Cannot generate a good matrix"
raise ValueError(msg)
else:
# a small utility to convert the cell shape
para = matrix2para(self.matrix)
self.matrix = para2matrix(para, format=shape)
def set_volume(self, volume):
if self.allow_volume_reset:
self.volume = volume
def swap_axis(self, random=False, ids=None):
"""
For the lattice
"""
# only applied to triclinic/monoclinic/orthorhombic
if self.ltype in ["triclinic", "orthorhombic", "Orthorhombic"]:
allowed_ids = [[0,1,2], [1,0,2], [0,2,1], [2,1,0], [1,2,0], [2,0,1]]
elif self.ltype in ["monoclinic"]:
if abs(self.beta - 90*rad) > 1e-3:
allowed_ids = [[0,1,2],[2,1,0]]
else:
allowed_ids = [[0,1,2],[1,0,2],[0,2,1],
[2,1,0],[1,2,0],[2,0,1]]
else:
allowed_ids = [[0,1,2]]
if random:
from random import choice
ids = choice(allowed_ids)
else:
if ids not in allowed_ids:
print(ids)
raise ValueError("the above swap is not allowed in "+self.ltype)
(a,b,c,alpha,beta,gamma) = self.get_para()
alpha, beta, gamma = alpha*deg, beta*deg, gamma*deg
if ids is None:
return self
elif ids == [1,0,2]: #a->b
return self.from_para(b, a, c, beta, alpha, gamma, self.ltype)
elif ids == [2,1,0]: #a->c
return self.from_para(c, b, a, gamma, beta, alpha, self.ltype)
elif ids == [0,2,1]: #b-c
return self.from_para(a, c, b, alpha, gamma, beta, self.ltype)
elif ids == [2,0,1]:
return self.from_para(c, a, b, gamma, alpha, beta, self.ltype)
elif ids == [1,2,0]:
return self.from_para(b, c, a, beta, gamma, alpha, self.ltype)
else:
return self
def swap_angle(self, random=True, ids=None):
# only applied to triclinic/monoclinic #/hexagonal
"""
If the angle is not 90. There will be two equivalent versions
e.g., 80 and 100.
"""
if self.ltype in ["monoclinic"]:
allowed_ids = ["beta", "No"]
elif self.ltype in ["triclinic"]:
allowed_ids = ["alpha", "beta", "gamma", "No"]
else:
allowed_ids = ["No"]
if random:
from random import choice
ids = choice(allowed_ids)
else:
if ids not in allowed_ids:
print(ids)
raise ValueError("the above swap is not allowed in "+self.ltype)
(a,b,c,alpha,beta,gamma) = self.get_para()
alpha, beta, gamma = alpha*deg, beta*deg, gamma*deg
if ids is None:
return self
elif ids == "alpha":
return self.from_para(a, b, c, 180-alpha, beta, gamma, self.ltype)
elif ids == "beta":
return self.from_para(a, b, c, alpha, 180-beta, gamma, self.ltype)
elif ids == "gamma":
return self.from_para(a, b, c, alpha, beta, 180-gamma, self.ltype)
else:
return self
def add_vacuum(self, coor, frac=True, vacuum=15, PBC=[0, 0, 0]):
"""
Adds space above and below a 2D or 1D crystal.
Args:
coor: the relative coordinates of the crystal
vacuum: the amount of space, in Angstroms, to add above and below
PBC: A periodic boundary condition list,
Ex: [1,1,1] -> full 3d periodicity, [0,0,1] -> periodicity
along the z axis
Returns:
The transformed lattice and coordinates after the vacuum is added
"""
matrix = self.matrix
if frac:
absolute_coords = np.dot(coor, matrix)
else:
absolute_coords = coor
for i, a in enumerate(PBC):
if not a:
ratio = 1 + vacuum/np.linalg.norm(matrix[i])
matrix[i] *= ratio
absolute_coords[:, i] += vacuum/2
if frac:
coor = np.dot(absolute_coords, np.linalg.inv(matrix))
else:
coor = absolute_coords
return matrix, coor
def generate_point(self):
# point = np.random.RandomState().rand(3)
# QZ: it was here because of multiprocess issue
# https://github.com/numpy/numpy/issues/9650
# now just fix it
point = np.random.rand(3)
if self.ltype in ["spherical", "ellipsoidal"]:
# Choose a point within an octant of the unit sphere
while point.dot(point) > 1: # squared
point = np.random.random(3)
# Randomly flip some coordinates
for index in range(len(point)):
# Scale the point by the max radius
if random.uniform(0, 1) < 0.5:
point[index] *= -1
else:
for i, a in enumerate(self.PBC):
if not a:
if self.ltype in ["hexagonal", "trigonal"]:
point[i] *= 1.0 / np.sqrt(3.0)
else:
point[i] -= 0.5
return point
@classmethod
def from_para(
self,
a,
b,
c,
alpha,
beta,
gamma,
ltype="triclinic",
radians=False,
PBC=[1, 1, 1],
factor=1.0,
**kwargs
):
"""
Creates a Lattice object from 6 lattice parameters. Additional keyword
arguments are available. Unless specified by the keyword random=True,
does not create a new matrix upon calling reset_matrix. This allows
for generation of random crystals with a specific choice of unit cell.
Args:
a: The length (in Angstroms) of the unit cell vectors
b: The length (in Angstroms) of the unit cell vectors
c: The length (in Angstroms) of the unit cell vectors
alpha: the angle (in degrees) between the b and c vectors
beta: the angle (in degrees) between the a and c vectors
gamma: the angle (in degrees) between the a and b vectors
ltype: the lattice type ("cubic, tetragonal, etc."). Also available
are "spherical", which confines generated points to lie within a
sphere, and "ellipsoidal", which confines generated points to lie
within an ellipse (oriented about the z axis)
radians: whether or not to use radians (instead of degrees) for the
lattice angles
PBC: A periodic boundary condition list, where 1 means periodic,
0 means not periodic.
Ex: [1,1,1] -> full 3d periodicity, [0,0,1] -> periodicity along
the z axis
kwargs: various values which may be defined. If none are defined,
random ones will be generated. Values will be passed to generate_lattice.
Options include:
area: The cross-sectional area (in Angstroms squared). Only used
to generate 1D crystals
thickness: The unit cell's non-periodic thickness (in Angstroms).
Only used to generate 2D crystals
unique_axis: The unique axis for certain symmetry (and especially
layer) groups. Because the symmetry operations are not also
transformed, you should use the default values for random
crystal generation
random: If False, keeps the stored values for the lattice geometry
even upon applying reset_matrix. To alter the matrix,
use set_matrix() or set_para
'unique_axis': the axis ('a', 'b', or 'c') which is not symmetrically
equivalent to the other two
'min_l': the smallest allowed cell vector. The smallest vector must
be larger than this.
'mid_l': the second smallest allowed cell vector. The second
smallest vector must be larger than this.
'max_l': the third smallest allowed cell vector. The largest cell
vector must be larger than this.
Returns:
a Lattice object with the specified parameters
"""
try:
cell_matrix = factor*para2matrix((a, b, c, alpha, beta, gamma), radians=radians)
except:
msg = "Error: invalid cell parameters for lattice."
raise ValueError(msg)
volume = np.linalg.det(cell_matrix)
# Initialize a Lattice instance
l = Lattice(ltype, volume, PBC=PBC, **kwargs)
l.a, l.b, l.c = factor*a, factor*b, factor*c
l.alpha, l.beta, l.gamma = alpha * rad, beta * rad, gamma * rad
l.matrix = cell_matrix
l.inv_matrix = np.linalg.inv(cell_matrix)
l.ltype = ltype
l.volume = volume
l.random = False
l.allow_volume_reset = False
return l
@classmethod
def from_matrix(self, matrix, reset=True, shape='upper', ltype="triclinic", PBC=[1, 1, 1], **kwargs):
"""
Creates a Lattice object from a 3x3 cell matrix. Additional keyword arguments
are available. Unless specified by the keyword random=True, does not create a
new matrix upon calling reset_matrix. This allows for generation of random
crystals with a specific choice of unit cell.
Args:
matrix: a 3x3 real matrix (numpy array or nested list) for the cell
ltype: the lattice type ("cubic, tetragonal, etc."). Also available are
- "spherical", confines points to lie within a sphere,
- "ellipsoidal", points to lie within an ellipsoid (about the z axis)
PBC: A periodic boundary condition list, where 1 is periodic
Ex: [1,1,1] -> full 3d periodicity, [0,0,1] -> periodicity at z axis
kwargs: various values which may be defined. Random ones if None
Values will be passed to generate_lattice. Options include:
`area: The cross-sectional area (in Ang^2) for 1D crystals
`thickness`: The cell's thickness (in Ang) for 2D crystals
`unique_axis`: The unique axis for layer groups.
`random`: If False, keeps the stored values for the lattice geometry
even applying reset_matrix. To alter the matrix, use `set_matrix()`
or `set_para`
'unique_axis': the axis ('a', 'b', or 'c') which is not symmetrically
equivalent to the other two
'min_l': the smallest allowed cell vector.
'mid_l': the second smallest allowed cell vector.
'max_l': the third smallest allowed cell vector.
Returns:
a Lattice object with the specified parameters
"""
m = np.array(matrix)
if np.shape(m) != (3, 3):
print(matrix)
msg = "Error: matrix must be a 3x3 numpy array or list"
raise ValueError(msg)
[a, b, c, alpha, beta, gamma] = matrix2para(m)
# symmetrize the lattice
if reset:
if ltype in ['cubic', 'Cubic']:
a = b = c = (a+b+c)/3
alpha = beta = gamma = np.pi/2
elif ltype in ['hexagonal', 'trigonal', 'Hexagonal', 'Trigonal']:
a = b = (a+b)/2
alpha = beta = np.pi/2
gamma = np.pi*2/3
elif ltype in ['tetragonal', 'Tetragonal']:
a = b = (a+b)/2
alpha = beta = gamma = np.pi/2
elif ltype in ['orthorhombic', 'Orthorhombic']:
alpha = beta = gamma = np.pi/2
elif ltype in ['monoclinic', 'Monoclinic']:
alpha = gamma = np.pi/2
# reset matrix according to the symmetry
m = para2matrix([a, b, c, alpha, beta, gamma], format=shape)
# Initialize a Lattice instance
volume = np.linalg.det(m)
l = Lattice(ltype, volume, m, PBC=PBC, **kwargs)
l.a, l.b, l.c = a, b, c
l.alpha, l.beta, l.gamma = alpha, beta, gamma
l.matrix = m
l.inv_matrix = np.linalg.inv(m)
l.ltype = ltype
l.volume = volume
l.random = False
l.allow_volume_reset = False
return l
def is_valid_matrix(self):
"""
check if the cell parameter is reasonable or not
"""
try:
paras = [self.a, self.b, self.c, self.alpha, self.beta, self.gamma]
matrix = para2matrix(paras)
return True
except:
return False
def check_mismatch(self, trans, l_type, tol=1.0, a_tol=10):
"""
check if the lattice mismatch is big after a transformation
This is mostly used in supergroup function
QZ: to fix ===============
Args:
trans: 3*3 matrix
l_type: lattice_type like orthrhombic
tol: tolerance in a, b, c
a_tol: tolerance in alpha, beta, gamma
Returns:
True or False
"""
matrix = np.dot(trans.T, self.matrix)
l1 = Lattice.from_matrix(matrix)
l2 = Lattice.from_matrix(matrix, ltype=l_type)
(a1, b1, c1, alpha1, beta1, gamma1) = l1.get_para(degree=True)
(a2, b2, c2, alpha2, beta2, gamma2) = l2.get_para(degree=True)
abc_diff = np.abs(np.array([a2-a1, b2-b1, c2-c1])).max()
ang_diff = np.abs(np.array([alpha2-alpha1, beta2-beta1, gamma2-gamma1])).max()
if abc_diff > tol or ang_diff > a_tol:
return False
else:
return True
def get_diff(self, l_ref):
"""
get the difference in length, angle, and check if switch is needed
"""
(a1, b1, c1, alpha1, beta1, gamma1) = self.get_para(degree=True)
(a2, b2, c2, alpha2, beta2, gamma2) = l_ref.get_para(degree=True)
abc_diff = np.abs(np.array([a2-a1, b2-b1, c2-c1])).max()
abc_f_diff = np.abs(np.array([(a2-a1)/a1, (b2-b1)/b1, (c2-c1)/c1])).max()
ang_diff1 = abs(alpha1 - alpha2) + abs(beta1 - beta2) + abs(gamma1 - gamma2)
ang_diff2 = abs(alpha1-alpha2)
ang_diff2 += abs(abs(beta1-90) - abs(beta2-90))
ang_diff2 += abs(gamma1-gamma2)
#print(abc_diff, abc_f_diff, ang_diff1, ang_diff2, self.ltype)
if ang_diff1 < ang_diff2 + 0.01:
return abc_diff, abc_f_diff, ang_diff1, False
else:
if self.ltype == 'monoclinic':
return abc_diff, abc_f_diff, ang_diff2, True
else:
return abc_diff, abc_f_diff, ang_diff2, False
def __str__(self):
s = "{:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}, {:s}".format(
self.a,
self.b,
self.c,
self.alpha * deg,
self.beta * deg,
self.gamma * deg,
str(self.ltype),
)
return s
def __repr__(self):
return str(self)
def generate_lattice(
ltype,
volume,
minvec=1.2,
minangle=np.pi / 6,
max_ratio=10.0,
maxattempts=100,
**kwargs
):
"""
Generates a lattice (3x3 matrix) according to the space group symmetry and
number of atoms. If the spacegroup has centering, we will transform to
conventional cell setting. If the generated lattice does not meet the
minimum angle and vector requirements, we try to generate a new one, up to
maxattempts times.
Args:
volume: volume of the conventional unit cell
minvec: minimum allowed lattice vector length (among a, b, and c)
minangle: minimum allowed lattice angle (among alpha, beta, and gamma)
max_ratio: largest allowed ratio of two lattice vector lengths
maxattempts: the maximum number of attempts for generating a lattice
kwargs: a dictionary of optional values. These include:
'unique_axis': the axis ('a', 'b', or 'c') which is not symmetrically
equivalent to the other two
'min_l': the smallest allowed cell vector.
'mid_l': the second smallest allowed cell vector.
'max_l': the third smallest allowed cell vector.
Returns:
a 3x3 matrix representing the lattice vectors of the unit cell. If
generation fails, outputs a warning message and returns empty
"""
maxangle = np.pi - minangle
for n in range(maxattempts):
# Triclinic
# if sg <= 2:
if ltype == "triclinic":
# Derive lattice constants from a random matrix
mat = random_shear_matrix(width=0.2)
a, b, c, alpha, beta, gamma = matrix2para(mat)
x = np.sqrt(
1
- np.cos(alpha) ** 2
- np.cos(beta) ** 2
- np.cos(gamma) ** 2
+ 2 * (np.cos(alpha) * np.cos(beta) * np.cos(gamma))
)
vec = random_vector()
abc = volume / x
xyz = vec[0] * vec[1] * vec[2]
a = vec[0] * np.cbrt(abc) / np.cbrt(xyz)
b = vec[1] * np.cbrt(abc) / np.cbrt(xyz)
c = vec[2] * np.cbrt(abc) / np.cbrt(xyz)
# Monoclinic
elif ltype in ["monoclinic"]:
alpha, gamma = np.pi / 2, np.pi / 2
beta = gaussian(minangle, maxangle)
x = np.sin(beta)
vec = random_vector()
xyz = vec[0] * vec[1] * vec[2]
abc = volume / x
a = vec[0] * np.cbrt(abc) / np.cbrt(xyz)
b = vec[1] * np.cbrt(abc) / np.cbrt(xyz)
c = vec[2] * np.cbrt(abc) / np.cbrt(xyz)
# Orthorhombic
# elif sg <= 74:
elif ltype in ["orthorhombic"]:
alpha, beta, gamma = np.pi / 2, np.pi / 2, np.pi / 2
x = 1
vec = random_vector()
xyz = vec[0] * vec[1] * vec[2]
abc = volume / x
a = vec[0] * np.cbrt(abc) / np.cbrt(xyz)
b = vec[1] * np.cbrt(abc) / np.cbrt(xyz)
c = vec[2] * np.cbrt(abc) / np.cbrt(xyz)
# Tetragonal
# elif sg <= 142:
elif ltype in ["tetragonal"]:
alpha, beta, gamma = np.pi / 2, np.pi / 2, np.pi / 2
x = 1
vec = random_vector()
c = vec[2] / (vec[0] * vec[1]) * np.cbrt(volume / x)
a = b = np.sqrt((volume / x) / c)
# Trigonal/Rhombohedral/Hexagonal
# elif sg <= 194:
elif ltype in ["hexagonal", "trigonal"]:
alpha, beta, gamma = np.pi / 2, np.pi / 2, np.pi / 3 * 2
x = np.sqrt(3.0) / 2.0
vec = random_vector()
c = vec[2] / (vec[0] * vec[1]) * | np.cbrt(volume / x) | numpy.cbrt |
from __future__ import division
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize, bisect
import sklearn.gaussian_process as gp
from sklearn.gaussian_process import GaussianProcessClassifier as GPC
from pyDOE import lhs
from gp import GPR
def normalize(y, return_mean_std=False):
y_mean = np.mean(y)
y_std = np.std(y)
y = (y-y_mean)/y_std
if return_mean_std:
return y, y_mean, y_std
return y
def inv_normalize(y, y_mean, y_std):
return y*y_std + y_mean
def proba_of_improvement(samples, gp_model, f_best):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
PI = 1 - norm.cdf(f_best, loc=mu, scale=sigma)
return np.squeeze(PI)
def expected_improvement(samples, gp_model, f_best):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
with np.errstate(divide='ignore'):
Z = (mu - f_best)/sigma
EI = (mu - f_best) * norm.cdf(Z) + sigma * norm.pdf(Z)
EI[sigma==0.0] = 0.0
return np.squeeze(EI)
def upper_confidence_bound(samples, gp_model, beta):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
UCB = mu + beta * sigma
return np.squeeze(UCB)
def lower_confidence_bound(samples, gp_model, beta):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
LCB = mu - beta * sigma
return np.squeeze(LCB)
def regularized_ei_quadratic(samples, gp_model, f_best, center, w):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
epsilon = np.diag(np.matmul(np.matmul(samples-center, np.diag(w**(-2))), (samples-center).T)).reshape(-1,1)
f_tilde = f_best * (1. + np.sign(f_best)*epsilon)
with np.errstate(divide='ignore'):
Z = (mu - f_tilde)/sigma
EIQ = (mu - f_tilde) * norm.cdf(Z) + sigma * norm.pdf(Z)
EIQ[sigma==0.0] = 0.0
return np.squeeze(EIQ)
def regularized_ei_hinge_quadratic(samples, gp_model, f_best, center, R, beta):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
dists = np.linalg.norm(samples-center, axis=1, keepdims=True)
epsilon = (dists-R)/beta/R
epsilon[dists < R] = 0.0
f_tilde = f_best * (1 + np.sign(f_best)*epsilon)
with np.errstate(divide='ignore'):
Z = (mu - f_tilde)/sigma
EIQ = (mu - f_tilde) * norm.cdf(Z) + sigma * norm.pdf(Z)
EIQ[sigma==0.0] = 0.0
return np.squeeze(EIQ)
def var_constrained_pi(samples, gp_model, f_best, tau):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
PI = 1 - norm.cdf(f_best, loc=mu, scale=sigma)
PI[sigma > (tau*gp_model.kernel_.diag(samples).reshape(-1,1))**.5] = 0.0
return np.squeeze(PI)
def var_constrained_ei(samples, gp_model, f_best, tau):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
with np.errstate(divide='ignore'):
Z = (mu - f_best)/sigma
EI = (mu - f_best) * norm.cdf(Z) + sigma * norm.pdf(Z)
EI[sigma==0.0] = 0.0
EI[sigma > (tau*gp_model.kernel_.diag(samples).reshape(-1,1))**.5] = 0.0
return np.squeeze(EI)
def compute_tau(f_best, gp_model, xi=0.01, kappa=0.1):
delta = 0.01
sigma_plus = (xi+delta)/norm.ppf(1-kappa)
_ = np.zeros((1, gp_model.X_train_.shape[1]))
k0 = np.asscalar(gp_model.kernel_(_, _))
def func(x):
mu_tau = 0.#xi - np.sqrt(x*k0)*norm.ppf(1-kappa)
u_tau = (mu_tau-f_best)/np.sqrt(x*k0)
EI_tau = np.sqrt(x*k0) * (u_tau*norm.cdf(u_tau) + norm.pdf(u_tau))
u_plus = -delta/sigma_plus
EI_plus = sigma_plus * (u_plus*norm.cdf(u_plus) + norm.pdf(u_plus))
return EI_tau - EI_plus
# import matplotlib.pyplot as plt
# xx = np.linspace(0.1, 1., 100)
# plt.plot(xx, func(xx))
try:
tau = bisect(func, 0.01, 1.)
tau = np.clip(tau, 0.0001, 0.99)
except ValueError:
tau = 0.99
return tau
def constraint_proba(samples, gpc_model):
samples = np.array(samples).reshape(-1, gpc_model.base_estimator_.X_train_.shape[1])
pr = gpc_model.predict_proba(samples)[:,1]
return np.squeeze(pr)
def constraint_weighted_acquisition(samples, acquisition_func, constraint_proba_func, delta=0.5):
afv = acquisition_func(samples)
pr = constraint_proba_func(samples)
afv *= pr
afv[pr<1-delta] = 0.0
return afv
def grad_ei(samples, gp_model, f_best):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
u = (mu - f_best) / sigma
dmu_dx, dsigma_dx = gp_model.grad_predict(samples, compute_std=True)
du_dx = (dmu_dx - u*dsigma_dx)/sigma
dEI_dx = (u*norm.cdf(u)+norm.pdf(u))*dsigma_dx + sigma*norm.cdf(u)*du_dx
return dEI_dx
def generate_candidates(d, n_candidates, bounds=None, gaussian=None, ball=None):
if bounds is not None:
bounds = np.array(bounds, ndmin=2)
candidates = np.random.uniform(bounds[:,0], bounds[:,1], size=(n_candidates, d))
elif gaussian is not None:
mean = np.array(gaussian[0], ndmin=1)
cov = np.array(gaussian[1], ndmin=2)
candidates = np.random.multivariate_normal(mean, cov, size=n_candidates)
elif ball is not None:
def sample_sphere(center, radius, num):
count = 0
samples = []
while count < num:
sample = np.random.uniform(-radius, radius, d)
if np.linalg.norm(sample) <= radius:
samples.append(sample + center)
count += 1
samples = np.array(samples)
return samples
center = ball[0]
radius = ball[1]
candidates = sample_sphere(center, radius, n_candidates)
else:
candidates = np.random.rand(n_candidates, d)
return candidates
def sample_next_point(d, acquisition_func, candidates=None, bounds=None, strict_bounds=False, gaussian=None, ball=None,
n_candidates=1000, n_restarts=1, random_search=False, return_opt_f=False):
opt_x = None
f = lambda x: -acquisition_func(x)
if candidates is None:
candidates = generate_candidates(d, n_candidates, bounds, gaussian, ball)
# Random search
if random_search:
afv = np.squeeze(f(candidates))
opt_x = candidates[np.argmin(afv)]
# L-BFGS-B
else:
f_candidates = f(candidates).flatten()
x0s = candidates[np.argsort(f_candidates)[:n_restarts]]
opt_f = np.inf
if strict_bounds and bounds is not None:
bs = np.array(bounds, ndmin=2)
else:
bs = None
for x0 in x0s:
res = minimize(fun=f,
x0=x0,
bounds=bs,
method='L-BFGS-B')
if res.fun < opt_f:
opt_f = res.fun
opt_x = res.x
if return_opt_f:
return opt_x, -opt_f
return opt_x
def bo_c(func, n_eval, n_init_eval, n_candidates, bounds, alpha=1e-4, save_dir=None):
# kernel = gp.kernels.Matern()
kernel = gp.kernels.ConstantKernel(1.0, (1., 1.)) * gp.kernels.RBF(1.0, (1e-5, 1e5))
gp_model = GPR(kernel=kernel, alpha=alpha, n_restarts_optimizer=100, normalize_y=False)
gpc_model = GPC(kernel=kernel, n_restarts_optimizer=100)
dim = func.dim
# Initial evaluations
xs = lhs(dim, samples=n_init_eval, criterion='cm')
xs = xs * (bounds[:,1] - bounds[:,0]) + bounds[:,0]
ys = func(xs)
vs = func.is_feasible(xs)
opt_idx = np.argmax(ys[vs])
opt_x = xs[vs][opt_idx]
opt_y = ys[vs][opt_idx]
opt_ys = [opt_y]
for i in range(n_init_eval, n_eval):
ys_normalized = normalize(ys[vs])
gp_model.fit(xs[vs], ys_normalized)
f_prime = ys_normalized[opt_idx]
acquisition_func = lambda x: expected_improvement(x, gp_model, f_prime)
if np.any(vs) and np.any(np.logical_not(vs)):
gpc_model.fit(xs, vs)
constraint_proba_func = lambda x: constraint_proba(x, gpc_model)
constraint_weighted_acquisition_func = lambda x: constraint_weighted_acquisition(x, acquisition_func, constraint_proba_func)
else:
constraint_weighted_acquisition_func = acquisition_func
# Decide point to evaluate next
n_candidates = 1000*dim
x = sample_next_point(dim, constraint_weighted_acquisition_func, bounds=bounds, strict_bounds=True, n_candidates=n_candidates)
y = func(x)
v = func.is_feasible(x)
xs = np.append(xs, | np.array(x, ndmin=2) | numpy.array |
import os
import tempfile
import unittest
import numpy as np
from ensembler import samplers
from ensembler import potentials
from ensembler import system
from ensembler.util import dataStructure as data
class test_System(unittest.TestCase):
system_class = system.system
tmp_test_dir: str = None
def setUp(self) -> None:
test_dir = os.getcwd()+"/tests_out"
if(not os.path.exists(test_dir)):
os.mkdir(test_dir)
if(__class__.tmp_test_dir is None):
__class__.tmp_test_dir = tempfile.mkdtemp(dir=test_dir, prefix="tmp_test_system")
_, self.tmp_out_path = tempfile.mkstemp(prefix="test_" + self.system_class.name, suffix=".obj", dir=__class__.tmp_test_dir)
self.sampler = samplers.stochastic.metropolisMonteCarloIntegrator()
self.pot = potentials.OneD.harmonicOscillatorPotential()
def test_system_constructor(self):
self.system_class(potential=self.pot, sampler=self.sampler)
def test_system_constructor_detail(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
conditions = []
temperature = 300
position = [0.1]
mass = [1]
expected_state = data.basicState(position=[0.1], temperature=temperature,
total_system_energy=0.005000000000000001, total_potential_energy=0.005000000000000001,
total_kinetic_energy=np.nan, dhdpos=[[np.nan]],
velocity=np.nan) # Monte carlo does not use dhdpos or velocity
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
curState = sys.current_state
# check attributes
self.assertEqual(self.pot.constants[self.pot.nDimensions], sys.nDimensions,
msg="Dimensionality was not the same for system and potential!")
self.assertEqual([], sys.conditions, msg="Conditions were not empty!")
# print(curState)
# check current state intialisation
self.assertEqual(expected_state.position, curState.position, msg="The initialised Position is not correct!")
self.assertEqual(expected_state.temperature, curState.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(expected_state.total_system_energy, curState.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(expected_state.total_potential_energy, curState.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertEqual(np.isnan(expected_state.total_kinetic_energy), np.isnan(curState.total_kinetic_energy),
msg="The initialised total_kinetic_energy is not correct!")
self.assertEqual(np.isnan(expected_state.velocity), np.isnan(curState.velocity),
msg="The initialised velocity is not correct!")
def test_append_state(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
conditions = []
temperature = 300
position = [0.1]
mass = [1]
newPosition = 10
newVelocity = -5
newForces = 3
expected_state = data.basicState(position=newPosition, temperature=temperature,
total_system_energy=62.5, total_potential_energy=50.0, total_kinetic_energy=12.5,
dhdpos=3, velocity=newVelocity)
# potential: _perturbedPotentialCls, samplers: _samplerCls, conditions: Iterable[Condition] = [],
# temperature: float = 298.0, position:(Iterable[Number] or float
sys = self.system_class(potential=self.pot, sampler=self.sampler, conditions=[], temperature=temperature,
start_position=position)
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces)
curState = sys.current_state
# check current state intialisation
self.assertEqual(curState.position, expected_state.position, msg="The initialised Position is not correct!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertAlmostEqual(curState.total_kinetic_energy, expected_state.total_kinetic_energy,
msg="The initialised total_kinetic_energy is not correct!")
# self.assertEqual(curState.dhdpos, expected_state.dhdpos, msg="The initialised dhdpos is not correct!")
self.assertEqual(curState.velocity, expected_state.velocity, msg="The initialised velocity is not correct!")
def test_revertStep(self):
conditions = []
temperature = 300
position = [0.1]
mass = [1]
newPosition = 10
newVelocity = -5
newForces = 3
newPosition2 = 13
newVelocity2 = -4
newForces2 = 8
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces)
expected_state = sys.current_state
sys.append_state(new_position=newPosition2, new_velocity=newVelocity2, new_forces=newForces2)
not_expected_state = sys.current_state
sys.revert_step()
curState = sys.current_state
# check current state intialisation
self.assertEqual(curState.position, expected_state.position,
msg="The current Position is not equal to the one two steps before!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The current temperature is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The current total_system_energy is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The current total_potential_energy is not equal to the one two steps before!")
self.assertEqual(np.isnan(curState.total_kinetic_energy), np.isnan(expected_state.total_kinetic_energy),
msg="The current total_kinetic_energy is not equal to the one two steps before!")
self.assertEqual(curState.dhdpos, expected_state.dhdpos,
msg="The current dhdpos is not equal to the one two steps before!")
self.assertEqual(curState.velocity, expected_state.velocity,
msg="The current velocity is not equal to the one two steps before!")
# check that middle step is not sames
self.assertNotEqual(curState.position, not_expected_state.position,
msg="The not expected Position equals the current one!")
self.assertEqual(curState.temperature, not_expected_state.temperature,
msg="The not expected temperature equals the current one")
self.assertNotAlmostEqual(curState.total_system_energy, not_expected_state.total_system_energy,
msg="The not expected total_system_energy equals the current one")
self.assertNotAlmostEqual(curState.total_potential_energy, not_expected_state.total_potential_energy,
msg="The not expected total_potential_energy equals the current one")
self.assertEqual(np.isnan(curState.total_kinetic_energy), np.isnan(not_expected_state.total_kinetic_energy),
msg="The not expected total_kinetic_energy equals the current one")
self.assertNotEqual(curState.dhdpos, not_expected_state.dhdpos,
msg="The not expected dhdpos, equals the current one")
self.assertNotEqual(curState.velocity, not_expected_state.velocity,
msg="The not expected velocity equals the current one")
def test_propergate(self):
conditions = []
temperature = 300
position = [0.1]
mass = [1]
expected_state = data.basicState(position=position, temperature=temperature,
total_system_energy=0.005000000000000001, total_potential_energy=0.005000000000000001,
total_kinetic_energy=np.nan,
dhdpos=np.nan, velocity=np.nan)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
initialState = sys.current_state
sys.propagate()
# check that middle step is not sames
self.assertNotEqual(sys._currentPosition, initialState.position,
msg="The initialState equals the currentState after propergating in attribute: Position!")
self.assertEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does not equal the currentState after propergating in attribute: temperature!")
self.assertEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does not equal the currentState after propergating in attribute: total_potential_energy!")
self.assertEqual(np.isnan(sys._currentTotKin), np.isnan(initialState.total_kinetic_energy),
msg="The initialState does not equal the currentState after propergating in attribute: total_kinetic_energy!")
self.assertNotEqual(sys._currentForce, initialState.dhdpos,
msg="The initialState equals the currentState after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(sys._currentVelocities), np.isnan(initialState.velocity),
msg="The initialState does not equal the currentState after propergating in attribute: velocity!")
def test_simulate(self):
conditions = []
temperature = 300
position = [0.1]
mass = [1]
steps = 100
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
init_state = sys.current_state
sys.simulate(steps=steps, init_system=False,
withdraw_traj=True) # withdrawTraj is needed in the context because of the interaction between different Tests
trajectory = sys.trajectory
old_frame = trajectory.iloc[0]
# print(old_frame)
# print(init_state)
# Check that the first frame is the initial state!
self.assertEqual(init_state.position, old_frame.position,
msg="The initial state does not equal the frame 0 after propergating in attribute: Position!")
self.assertEqual(init_state.temperature, old_frame.temperature,
msg="The initial state does not equal the frame 0 after propergating in attribute: temperature!")
self.assertAlmostEqual(init_state.total_potential_energy, old_frame.total_potential_energy,
msg="The initial state does not equal the frame 0 after propergating in attribute: total_potential_energy!")
self.assertAlmostEqual(np.isnan(init_state.total_kinetic_energy), np.isnan(old_frame.total_kinetic_energy),
msg="The initial state does not equal the frame 0 after propergating in attribute: total_kinetic_energy!")
self.assertEqual(np.isnan(init_state.dhdpos), np.isnan(old_frame.dhdpos),
msg="The initial state does not equal the frame 0 after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(init_state.velocity), np.isnan(old_frame.velocity),
msg="The initial state does not equal the frame 0 after propergating in attribute: velocity!")
# check that the frames are all different from each other.
for ind, frame in list(trajectory.iterrows())[1:]:
# print()
# print(ind, frame)
# check that middle step is not sames
self.assertNotEqual(old_frame.position, frame.position,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: Position!")
self.assertEqual(old_frame.temperature, frame.temperature,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: temperature!") # due to samplers
self.assertNotAlmostEqual(old_frame.total_potential_energy, frame.total_potential_energy,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: total_potential_energy!")
self.assertEqual(np.isnan(old_frame.total_kinetic_energy), np.isnan(frame.total_kinetic_energy),
msg="The frame " + str(ind) + " equals not the frame " + str(
ind + 1) + " after propergating in attribute: total_kinetic_energy!") # due to samplers
self.assertNotEqual(old_frame.dhdpos, frame.dhdpos,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(old_frame.velocity), np.isnan(frame.velocity),
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: velocity!") # due to samplers
old_frame = frame
def test_applyConditions(self):
"""
NOT IMPLEMENTED!
"""
pass
def test_initVel(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
conditions = []
temperature = 300
position = [0.1]
mass = [1]
newPosition = 10
newVelocity = -5
newForces = 3
expected_state = data.basicState(position=newPosition, temperature=temperature,
total_system_energy=62.5, total_potential_energy=50.0, total_kinetic_energy=12.5,
dhdpos=3, velocity=newVelocity)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
sys._init_velocities()
cur_velocity = sys._currentVelocities
# print(cur_velocity)
self.assertIsInstance(cur_velocity, float, msg="Velocity has not the correcttype!")
def test_updateTemp(self):
"""
NOT IMPLEMENTED
"""
pass
def test_updateEne(self):
conditions = []
temperature = 300
position = [0.1]
mass = [1]
expected_state = data.basicState(position=position, temperature=temperature,
total_system_energy=0.005000000000000001, total_potential_energy=0.005000000000000001,
total_kinetic_energy=np.nan, dhdpos=np.nan, velocity=np.nan)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
initialState = sys.current_state
sys.propagate()
sys._update_energies()
# check that middle step is not sames
self.assertNotEqual(sys._currentPosition, initialState.position,
msg="The initialState equals the currentState after propergating in attribute: Position!")
self.assertEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does not equal the currentState after propergating in attribute: temperature!")
self.assertNotAlmostEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does equal the currentState after propergating in attribute: total_potential_energy!")
self.assertAlmostEqual(np.isnan(sys._currentTotKin), np.isnan(initialState.total_kinetic_energy),
msg="The initialState does equal the currentState after propergating in attribute: total_kinetic_energy!")
self.assertNotEqual(sys._currentForce, initialState.dhdpos,
msg="The initialState equals the currentState after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(sys._currentVelocities), np.isnan(initialState.velocity),
msg="The initialState does not equal the currentState after propergating in attribute: velocity!")
def test_totPot(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
temperature = 300
position = [1]
mass = [1]
expected_state = data.basicState(position=position, temperature=temperature,
total_system_energy=0.005000000000000001, total_potential_energy=0.005000000000000001,
total_kinetic_energy=0,
dhdpos=None, velocity=None)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
self.assertAlmostEqual(sys.calculate_total_potential_energy(), 0.5, msg="The initialised total_potential_energy is not correct!")
def test_totKin(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
conditions = []
temperature = 300
position = [1]
mass = [1]
expected_state = data.basicState(position=position, temperature=temperature,
total_system_energy=0.005000000000000001, total_potential_energy=0.005000000000000001,
total_kinetic_energy=np.nan, dhdpos=np.nan, velocity=np.nan)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
self.assertEqual(np.isnan(sys.calculate_total_kinetic_energy()), np.isnan(np.nan), msg="The initialised total_kinetic_energy is not correct!")
newPosition = 10
newVelocity = -5
newForces = 3
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces)
self.assertAlmostEqual(sys.calculate_total_potential_energy(), 50.0, msg="The initialised total_potential_energy is not correct!")
def test_setTemperature(self):
conditions = []
temperature = 300
temperature2 = 600
position = [0.1]
mass = [1]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
sys._currentVelocities = 100
sys.update_current_state()
initialState = sys.current_state
sys.set_temperature(temperature2)
# check that middle step is not sames
self.assertEqual(sys._currentPosition, initialState.position,
msg="The initialState equals the currentState after propergating in attribute: Position!")
self.assertNotEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does equal the currentState after propergating in attribute: temperature!")
self.assertAlmostEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does equal the currentState after propergating in attribute: total_potential_energy!")
# self.assertNotAlmostEqual(sys._currentTotKin, initialState.total_kinetic_energy,
# msg="The initialState does not equal the currentState after propergating in attribute: total_kinetic_energy!")
self.assertEqual(np.isnan(sys._currentForce), np.isnan(initialState.dhdpos),
msg="The initialState equals the currentState after propergating in attribute: dhdpos!")
self.assertEqual(sys._currentVelocities, initialState.velocity,
msg="The initialState does equal the currentState after propergating in attribute: velocity!")
def test_get_Pot(self):
conditions = []
temperature = 300
position = 0.1
mass = [1]
expected_state = data.basicState(position=position, temperature=temperature,
total_system_energy=0.005, total_potential_energy=0.005, total_kinetic_energy=0,
dhdpos=None, velocity=None)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
self.assertEqual(0.005000000000000001, sys.total_potential_energy, msg="Could not get the correct Pot Energy!")
def test_get_Trajectory(self):
conditions = []
temperature = 300
position = [0.1]
mass = [1]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
sys.simulate(steps=10)
traj_pd = sys.trajectory
def test_save_obj_str(self):
path = self.tmp_out_path
out_path = self.system_class(potential=self.pot, sampler=self.sampler).save(path=path)
print(out_path)
def test_load_str_path(self):
path = self.tmp_out_path
out_path = self.system_class(potential=self.pot, sampler=self.sampler).save(path=path)
cls = self.system_class.load(path=out_path)
print(cls)
class test_perturbedSystem1D(test_System):
system_class = system.perturbed_system.perturbedSystem
tmp_test_dir: str = None
def setUp(self) -> None:
test_dir = os.getcwd()+"/tests_out"
if(not os.path.exists(test_dir)):
os.mkdir(test_dir)
if (__class__.tmp_test_dir is None):
__class__.tmp_test_dir = tempfile.mkdtemp(dir=test_dir, prefix="tmp_test_perturbedSystem")
_, self.tmp_out_path = tempfile.mkstemp(prefix="test_" + self.system_class.name, suffix=".obj",
dir=__class__.tmp_test_dir)
self.sampler = samplers.stochastic.metropolisMonteCarloIntegrator()
ha = potentials.OneD.harmonicOscillatorPotential(x_shift=-5)
hb = potentials.OneD.harmonicOscillatorPotential(x_shift=5)
self.pot = potentials.OneD.linearCoupledPotentials(Va=ha, Vb=hb, lam=1.0)
def test_system_constructor(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
lam = 0
conditions = []
temperature = 300
position = 0
mass = [1]
expected_state = data.lambdaState(position=0, temperature=temperature, lam=0.0,
total_system_energy=12.5, total_potential_energy=12.5, total_kinetic_energy=np.nan,
dhdpos=np.nan, velocity=np.nan, dhdlam=np.nan)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
curState = sys.current_state
# check attributes
self.assertEqual(self.pot.constants[self.pot.nDimensions], sys.nDimensions,
msg="Dimensionality was not the same for system and potential!")
self.assertEqual([], sys.conditions, msg="Conditions were not empty!")
# print(curState)
# check current state intialisation
self.assertEqual(curState.position, expected_state.position, msg="The initialised Position is not correct!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertEqual(np.isnan(curState.total_kinetic_energy), np.isnan(expected_state.total_kinetic_energy),
msg="The initialised total_kinetic_energy is not correct!")
# self.assertEqual(np.isnan(curState.dhdpos), np.isnan(expected_state.dhdpos), msg="The initialised dhdpos is not correct!")
self.assertEqual(np.isnan(curState.velocity), np.isnan(expected_state.velocity),
msg="The initialised velocity is not correct!")
self.assertEqual(np.isnan(curState.lam), np.isnan(expected_state.lam),
msg="The initialised lam is not correct!")
# self.assertEqual(np.isnan(curState.dhdlam), np.isnan(expected_state.dhdlam), msg="The initialised dHdlam is not correct!")
def test_system_constructor_detail(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
conditions = []
temperature = 300
position = [0.1]
mass = [1]
expected_state = data.basicState(position=[0.1], temperature=temperature,
total_system_energy=12.005, total_potential_energy=12.005,
total_kinetic_energy=np.nan, dhdpos=[[np.nan]],
velocity=np.nan) # Monte carlo does not use dhdpos or velocity
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
curState = sys.current_state
# check attributes
self.assertEqual(self.pot.constants[self.pot.nDimensions], sys.nDimensions,
msg="Dimensionality was not the same for system and potential!")
self.assertEqual([], sys.conditions, msg="Conditions were not empty!")
# print(curState)
# check current state intialisation
self.assertEqual(expected_state.position, curState.position, msg="The initialised Position is not correct!")
self.assertEqual(expected_state.temperature, curState.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(expected_state.total_system_energy, curState.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(expected_state.total_potential_energy, curState.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertEqual(np.isnan(expected_state.total_kinetic_energy), np.isnan(curState.total_kinetic_energy),
msg="The initialised total_kinetic_energy is not correct!")
self.assertEqual(np.isnan(expected_state.velocity), np.isnan(curState.velocity),
msg="The initialised velocity is not correct!")
def test_append_state(self):
lam = 0
temperature = 300
position = 0
newPosition = 10
newVelocity = -5
newForces = 3
newLam = 1.0
expected_state = data.lambdaState(position=newPosition, temperature=temperature, lam=newLam,
total_system_energy=125.0, total_potential_energy=112.5, total_kinetic_energy=12.5,
dhdpos=newForces, velocity=newVelocity, dhdlam=np.nan)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature)
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces, new_lambda=newLam)
curState = sys.current_state
# check current state intialisation
self.assertEqual(sys._currentPosition, expected_state.position, msg="The initialised Position is not correct!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertAlmostEqual(curState.total_kinetic_energy, expected_state.total_kinetic_energy,
msg="The initialised total_kinetic_energy is not correct!")
# self.assertEqual(curState.dhdpos, expected_state.dhdpos, msg="The initialised dhdpos is not correct!")
self.assertEqual(np.isnan(curState.velocity), np.isnan(expected_state.velocity),
msg="The initialised velocity is not correct!")
self.assertEqual(curState.lam, expected_state.lam, msg="The initialised lam is not correct!")
# self.assertEqual(np.isnan(curState.dhdlam), np.isnan(expected_state.dhdlam), msg="The initialised dHdlam is not correct!")
def test_revertStep(self):
newPosition = 10
newVelocity = -5
newForces = 3
newLam = 1.0
newPosition2 = 13
newVelocity2 = -4
newForces2 = 8
newLam2 = 0.5
lam = 0
conditions = []
temperature = 300
position = [0]
mass = [1]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces, new_lambda=newLam)
expected_state = sys.current_state
sys.append_state(new_position=newPosition2, new_velocity=newVelocity2, new_forces=newForces2, new_lambda=newLam2)
not_expected_state = sys.current_state
sys.revert_step()
curState = sys.current_state
# check current state intialisation
self.assertEqual(curState.position, expected_state.position,
msg="The current Position is not equal to the one two steps before!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The current temperature is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The current total_system_energy is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The current total_potential_energy is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_kinetic_energy, expected_state.total_kinetic_energy,
msg="The current total_kinetic_energy is not equal to the one two steps before!")
# self.assertEqual(curState.dhdpos, expected_state.dhdpos, msg="The current dhdpos is not equal to the one two steps before!")
self.assertEqual(curState.velocity, expected_state.velocity,
msg="The current velocity is not equal to the one two steps before!")
self.assertEqual(curState.lam, expected_state.lam,
msg="The current lam is not equal to the one two steps before!")
self.assertEqual(np.isnan(curState.dhdlam), np.isnan(expected_state.dhdlam),
msg="The initialised dHdlam is not correct!")
# check that middle step is not sames
self.assertNotEqual(curState.position, not_expected_state.position,
msg="The not expected Position equals the current one!")
self.assertEqual(curState.temperature, not_expected_state.temperature,
msg="The not expected temperature equals the current one")
self.assertNotAlmostEqual(curState.total_system_energy, not_expected_state.total_system_energy,
msg="The not expected total_system_energy equals the current one")
self.assertNotAlmostEqual(curState.total_potential_energy, not_expected_state.total_potential_energy,
msg="The not expected total_potential_energy equals the current one")
self.assertNotAlmostEqual(curState.total_kinetic_energy, not_expected_state.total_kinetic_energy,
msg="The not expected total_kinetic_energy equals the current one")
# self.assertNotEqual(curState.dhdpos, not_expected_state.dhdpos, msg="The not expected dhdpos, equals the current one")
self.assertNotEqual(curState.velocity, not_expected_state.velocity,
msg="The not expected velocity equals the current one")
self.assertNotEqual(curState.lam, not_expected_state.lam, msg="The not expected lam equals the current one")
self.assertEqual(np.isnan(curState.dhdlam), np.isnan(expected_state.dhdlam),
msg="The initialised dHdlam is not correct!")
def test_propergate(self):
lam = 0
temperature = 300
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
initialState = sys.current_state
sys.propagate()
# check that middle step is not sames
self.assertNotEqual(sys._currentPosition, initialState.position,
msg="The initialState equals the currentState after propagating in attribute: Position!")
self.assertEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does not equal the currentState after propergating in attribute: temperature!")
self.assertAlmostEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does not equal the currentState after propergating in attribute: total_potential_energy!")
self.assertEqual(np.isnan(sys._currentTotKin), np.isnan(initialState.total_kinetic_energy),
msg="The initialState does not equal the currentState after propergating in attribute: total_kinetic_energy!")
self.assertNotEqual(sys._currentForce, initialState.dhdpos,
msg="The initialState equals the currentState after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(sys._currentVelocities), np.isnan(initialState.velocity),
msg="The initialState does not equal the currentState after propergating in attribute: velocity!")
self.assertEqual(sys._currentLambda, initialState.lam,
msg="The initialState does not equal the currentState after propergating in attribute: lam!")
self.assertEqual(np.isnan(sys._currentdHdLambda), np.isnan(initialState.dhdlam),
msg="The initialState does not equal the currentState after propergating in attribute: dHdLam!")
def test_simulate(self):
lam = 0
steps = 100
temperature = 300
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
init_state = sys.current_state
sys.simulate(steps=steps, init_system=False,
withdraw_traj=True) # withdrawTraj is needed in the context because of the interaction between different Tests
trajectory = sys.trajectory
old_frame = trajectory.iloc[0]
# Check that the first frame is the initial state!
self.assertListEqual(list(init_state.position), list(old_frame.position),
msg="The initial state does not equal the frame 0 after propergating in attribute: Position!")
self.assertEqual(init_state.temperature, old_frame.temperature,
msg="The initial state does not equal the frame 0 after propergating in attribute: temperature!")
self.assertAlmostEqual(init_state.total_potential_energy, old_frame.total_potential_energy,
msg="The initial state does not equal the frame 0 after propergating in attribute: total_potential_energy!")
self.assertAlmostEqual(np.isnan(init_state.total_kinetic_energy), np.isnan(old_frame.total_kinetic_energy),
msg="The initial state does not equal the frame 0 after propergating in attribute: total_kinetic_energy!")
self.assertEqual(np.isnan(init_state.dhdpos), np.isnan(old_frame.dhdpos),
msg="The initial state does not equal the frame 0 after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(init_state.velocity), np.isnan(old_frame.velocity),
msg="The initial state does not equal the frame 0 after propergating in attribute: velocity!")
self.assertEqual(init_state.lam, old_frame.lam,
msg="The initial state does not equal the frame 0 after propergating in attribute: lam!")
self.assertEqual(np.isnan(init_state.dhdlam), np.isnan(old_frame.dhdlam),
msg="The initial state does not equal the frame 0 after propergating in attribute: dhdLam!")
# check that the frames are all different from each other.
for ind, frame in list(trajectory.iterrows())[1:]:
# check that middle step is not sames
self.assertNotEqual(old_frame.position, frame.position,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: Position!")
self.assertEqual(old_frame.temperature, frame.temperature,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: temperature!") # due to samplers
self.assertNotAlmostEqual(old_frame.total_potential_energy, frame.total_potential_energy,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: total_potential_energy!")
self.assertAlmostEqual(np.isnan(old_frame.total_kinetic_energy), np.isnan(frame.total_kinetic_energy),
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: total_kinetic_energy!") # due to samplers
self.assertNotEqual(old_frame.dhdpos, frame.dhdpos,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(old_frame.velocity), np.isnan(frame.velocity),
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: velocity!") # due to samplers
self.assertEqual(init_state.lam, old_frame.lam,
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: lam!")
self.assertEqual(np.isnan(init_state.dhdlam), np.isnan(old_frame.dhdlam),
msg="The frame " + str(ind) + " equals the frame " + str(
ind + 1) + " after propergating in attribute: dhdLam!")
old_frame = frame
def test_applyConditions(self):
"""
NOT IMPLEMENTED!
"""
pass
def test_initVel(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
lam = 0
temperature = 300
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
sys._init_velocities()
cur_velocity = sys._currentVelocities
# print(cur_velocity)
expected_vel = np.float64(-2.8014573319669176)
self.assertEqual(type(cur_velocity), type(expected_vel), msg="Velocity has not the correcttype!")
def test_updateTemp(self):
"""
NOT IMPLEMENTED
"""
pass
def test_updateEne(self):
lam = 0
temperature = 300
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
initialState = sys.current_state
sys.propagate()
sys._update_energies()
# check that middle step is not sames
self.assertNotEqual(sys._currentPosition, initialState.position,
msg="The initialState equals the currentState after propergating in attribute: Position!")
self.assertEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does not equal the currentState after propergating in attribute: temperature!")
self.assertNotAlmostEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does equal the currentState after propergating in attribute: total_potential_energy!")
self.assertEqual(np.isnan(sys._currentTotKin), np.isnan(initialState.total_kinetic_energy),
msg="The initialState does equal the currentState after propergating in attribute: total_kinetic_energy!")
self.assertNotEqual(sys._currentForce, initialState.dhdpos,
msg="The initialState equals the currentState after propergating in attribute: dhdpos!")
self.assertEqual(np.isnan(sys._currentVelocities), np.isnan(initialState.velocity),
msg="The initialState does not equal the currentState after propergating in attribute: velocity!")
def test_totPot(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
lam=0
temperature = 300
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
self.assertAlmostEqual(sys.calculate_total_potential_energy(), 12.5, msg="The initialised total_potential_energy is not correct!")
def test_totKin(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
lam = 0
temperature = 300
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
self.assertTrue(np.isnan(sys.calculate_total_kinetic_energy()), msg="The initialised total_potential_energy is not correct!")
newPosition = 10
newVelocity = -5
newForces = 3
newLam = 1
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces, new_lambda=newLam)
self.assertAlmostEqual(sys.calculate_total_kinetic_energy(), 12.5, msg="The initialised total_potential_energy is not correct!")
def test_setTemperature(self):
lam = 0
temperature = 300
temperature2 = 600
position = [0]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
sys._currentVelocities = 100
sys.update_current_state()
initialState = sys.current_state
sys.set_temperature(temperature2)
# check that middle step is not sames
self.assertListEqual(list(sys._currentPosition), list(initialState.position),
msg="The initialState equals the currentState after propergating in attribute: Position!")
self.assertNotEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does equal the currentState after propergating in attribute: temperature!")
self.assertAlmostEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does equal the currentState after propergating in attribute: total_potential_energy!")
self.assertNotAlmostEqual(sys._currentTotKin, initialState.total_kinetic_energy,
msg="The initialState does not equal the currentState after propergating in attribute: total_kinetic_energy!")
self.assertEqual(np.isnan(sys._currentForce), np.isnan(initialState.dhdpos),
msg="The initialState equals the currentState after propergating in attribute: dhdpos!")
self.assertEqual(sys._currentVelocities, initialState.velocity,
msg="The initialState does equal the currentState after propergating in attribute: velocity!")
def test_get_Pot(self):
lam = 0
temperature = 300
position = [5]
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, lam=lam)
self.assertEqual(0.0, sys.total_potential_energy, msg="Could not get the correct Pot Energy!")
class test_edsSystem1D(test_System):
system_class = system.eds_system.edsSystem
tmp_test_dir: str = None
def setUp(self) -> None:
test_dir = os.getcwd()+"/tests_out"
if(not os.path.exists(test_dir)):
os.mkdir(test_dir)
if (__class__.tmp_test_dir is None):
__class__.tmp_test_dir = tempfile.mkdtemp(dir=test_dir, prefix="tmp_test_eds_system")
_, self.tmp_out_path = tempfile.mkstemp(prefix="test_" + self.system_class.name, suffix=".obj",
dir=__class__.tmp_test_dir)
self.sampler = samplers.stochastic.metropolisMonteCarloIntegrator()
self.pot = potentials.OneD.envelopedPotential()
def test_system_constructor(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
s = 1
conditions = []
temperature = 300
position = 0
mass = [1]
expected_state = data.envelopedPStstate(position=0, temperature=temperature, s=1.0, eoff=[0,0],
total_system_energy=-0.011047744848593777, total_potential_energy=-0.011047744848593777, total_kinetic_energy=np.nan,
dhdpos=np.nan, velocity=np.nan)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, eds_s=s)
curState = sys.current_state
# check attributes
self.assertEqual(self.pot.constants[self.pot.nDimensions], sys.nDimensions,
msg="Dimensionality was not the same for system and potential!")
self.assertEqual([], sys.conditions, msg="Conditions were not empty!")
# print(curState)
# check current state intialisation
self.assertEqual(curState.position, expected_state.position, msg="The initialised Position is not correct!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertEqual(np.isnan(curState.total_kinetic_energy), np.isnan(expected_state.total_kinetic_energy),
msg="The initialised total_kinetic_energy is not correct!")
# self.assertEqual(np.isnan(curState.dhdpos), np.isnan(expected_state.dhdpos), msg="The initialised dhdpos is not correct!")
self.assertEqual(np.isnan(curState.velocity), np.isnan(expected_state.velocity),
msg="The initialised velocity is not correct!")
self.assertEqual(curState.s, expected_state.s,
msg="The initialised s is not correct!")
self.assertEqual(curState.eoff, expected_state.eoff,
msg="The initialised Eoff is not correct!")
def test_system_constructor_detail(self):
"""
uses init_state, updateEne, randomPos, self.state
:return:
"""
conditions = []
temperature = 300
position = 0.1
mass = [1]
expected_state = data.basicState(position=position, temperature=temperature,
total_system_energy=-0.009884254671918117, total_potential_energy=-0.009884254671918117,
total_kinetic_energy=np.nan, dhdpos=np.array(-0.0556779),
velocity=np.nan) # Monte carlo does not use dhdpos or velocity
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position, temperature=temperature)
curState = sys.current_state
print(curState)
# check attributes
self.assertEqual(self.pot.constants[self.pot.nDimensions], sys.nDimensions,
msg="Dimensionality was not the same for system and potential!")
self.assertEqual([], sys.conditions, msg="Conditions were not empty!")
# print(curState)
# check current state intialisation
self.assertEqual(expected_state.position, curState.position, msg="The initialised Position is not correct!")
self.assertEqual(expected_state.temperature, curState.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(expected_state.total_system_energy, curState.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(expected_state.total_potential_energy, curState.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertEqual(np.isnan(expected_state.total_kinetic_energy), np.isnan(curState.total_kinetic_energy),
msg="The initialised total_kinetic_energy is not correct!")
self.assertEqual(np.isnan(expected_state.velocity), np.isnan(curState.velocity),
msg="The initialised velocity is not correct!")
def test_append_state(self):
temperature = 300
position = 0
s = 1.0
Eoff=[0,0]
newPosition = 10
newVelocity = -5
newForces = 3
newEoff = [1,1]
newS = [2]
expected_state = data.envelopedPStstate(position=newPosition, temperature=temperature, s=newS, eoff=newEoff,
total_system_energy=36.99999999999157, total_potential_energy=24.499999999991577, total_kinetic_energy=12.5,
dhdpos=newForces, velocity=newVelocity)
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, eds_s=s)
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces, new_s=newS, new_eoff=newEoff)
curState = sys.current_state
# check current state intialisation
self.assertEqual(sys._currentPosition, expected_state.position, msg="The initialised Position is not correct!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The initialised temperature is not correct!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The initialised total_system_energy is not correct!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The initialised total_potential_energy is not correct!")
self.assertAlmostEqual(curState.total_kinetic_energy, expected_state.total_kinetic_energy,
msg="The initialised total_kinetic_energy is not correct!")
# self.assertEqual(curState.dhdpos, expected_state.dhdpos, msg="The initialised dhdpos is not correct!")
self.assertEqual(np.isnan(curState.velocity), np.isnan(expected_state.velocity),
msg="The initialised velocity is not correct!")
self.assertEqual(curState.s, expected_state.s, msg="The initialised s is not correct!")
self.assertEqual(curState.eoff, expected_state.eoff, msg="The initialised Eoff is not correct!")
def test_revertStep(self):
newPosition = 10
newVelocity = -5
newForces = 3
newS = 1.0
newEoff = [1,1]
newPosition2 = 13
newVelocity2 = -4
newForces2 = 8
newS2 = 0.5
newEoff2 = [2,2]
integ = samplers.stochastic.metropolisMonteCarloIntegrator()
ha = potentials.OneD.harmonicOscillatorPotential(x_shift=-5)
hb = potentials.OneD.harmonicOscillatorPotential(x_shift=5)
s = 1
pot = potentials.OneD.exponentialCoupledPotentials(Va=ha, Vb=hb, s=1)
conditions = []
temperature = 300
position = [0]
mass = [1]
sys = self.system_class(potential=pot, sampler=integ, start_position=position,
temperature=temperature, eds_s=s)
sys.append_state(new_position=newPosition, new_velocity=newVelocity, new_forces=newForces, new_s=newS, new_eoff=newEoff)
expected_state = sys.current_state
sys.append_state(new_position=newPosition2, new_velocity=newVelocity2, new_forces=newForces2, new_s=newS2, new_eoff=newEoff2)
not_expected_state = sys.current_state
print(len(sys._trajectory), sys._trajectory)
sys.revert_step()
curState = sys.current_state
print(curState)
print(not_expected_state)
# check current state intialisation
self.assertEqual(curState.position, expected_state.position,
msg="The current Position is not equal to the one two steps before!")
self.assertEqual(curState.temperature, expected_state.temperature,
msg="The current temperature is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_system_energy, expected_state.total_system_energy,
msg="The current total_system_energy is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_potential_energy, expected_state.total_potential_energy,
msg="The current total_potential_energy is not equal to the one two steps before!")
self.assertAlmostEqual(curState.total_kinetic_energy, expected_state.total_kinetic_energy,
msg="The current total_kinetic_energy is not equal to the one two steps before!")
# self.assertEqual(curState.dhdpos, expected_state.dhdpos, msg="The current dhdpos is not equal to the one two steps before!")
self.assertEqual(curState.velocity, expected_state.velocity,
msg="The current velocity is not equal to the one two steps before!")
self.assertEqual(curState.s, expected_state.s,
msg="The current s is not equal to the one two steps before!")
np.testing.assert_almost_equal(curState.eoff, expected_state.eoff,
err_msg="The initialised Eoff is not correct as not equal to two steps before!")
# check that middle step is not sames
self.assertNotEqual(curState.position, not_expected_state.position,
msg="The not expected Position equals the current one!")
self.assertEqual(curState.temperature, not_expected_state.temperature,
msg="The not expected temperature equals the current one")
self.assertNotAlmostEqual(curState.total_system_energy, not_expected_state.total_system_energy,
msg="The not expected total_system_energy equals the current one")
self.assertNotAlmostEqual(curState.total_potential_energy, not_expected_state.total_potential_energy,
msg="The not expected total_potential_energy equals the current one")
self.assertNotAlmostEqual(curState.total_kinetic_energy, not_expected_state.total_kinetic_energy,
msg="The not expected total_kinetic_energy equals the current one")
# self.assertNotEqual(curState.dhdpos, not_expected_state.dhdpos, msg="The not expected dhdpos, equals the current one")
self.assertNotEqual(curState.velocity, not_expected_state.velocity,
msg="The not expected velocity equals the current one")
self.assertNotEqual(curState.s, not_expected_state.s, msg="The not expected lam equals the current one")
self.assertNotEqual(curState.eoff, not_expected_state.eoff, msg="The initialised Eoff is not correct!")
def test_propergate(self):
temperature = 300
position = [0]
s=1
sys = self.system_class(potential=self.pot, sampler=self.sampler, start_position=position,
temperature=temperature, eds_s=s)
initialState = sys.current_state
sys.propagate()
# check that middle step is not sames
self.assertNotEqual(sys._currentPosition, initialState.position,
msg="The initialState equals the currentState after propagating in attribute: Position!")
self.assertEqual(sys._currentTemperature, initialState.temperature,
msg="The initialState does not equal the currentState after propergating in attribute: temperature!")
self.assertAlmostEqual(sys._currentTotPot, initialState.total_potential_energy,
msg="The initialState does not equal the currentState after propergating in attribute: total_potential_energy!")
self.assertEqual(np.isnan(sys._currentTotKin), | np.isnan(initialState.total_kinetic_energy) | numpy.isnan |
# -*- coding:utf-8 -*-
"""
@file name : loss_acc_weights_grad.py
# @author : TingsongYu https://github.com/TingsongYu
@date : 2019-10-24
@brief : 监控loss, accuracy, weights, gradients
"""
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from matplotlib import pyplot as plt
from model.lenet import LeNet
from tools.my_dataset import RMBDataset
from tools.common_tools import set_seed
set_seed() # 设置随机种子
rmb_label = {"1": 0, "100": 1}
# 参数设置
MAX_EPOCH = 10
BATCH_SIZE = 16
LR = 0.01
log_interval = 10
val_interval = 1
# ============================ step 1/5 数据 ============================
split_dir = os.path.join("..", "..", "data", "rmb_split")
train_dir = os.path.join(split_dir, "train")
valid_dir = os.path.join(split_dir, "valid")
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomCrop(32, padding=4),
transforms.RandomGrayscale(p=0.8),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
])
valid_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
])
# 构建MyDataset实例
train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)
# 构建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)
# ============================ step 2/5 模型 ============================
net = LeNet(classes=2)
net.initialize_weights()
# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss() # 选择损失函数
# ============================ step 4/5 优化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9) # 选择优化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # 设置学习率下降策略
# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()
iter_count = 0
# 构建 SummaryWriter
writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")
for epoch in range(MAX_EPOCH):
loss_mean = 0.
correct = 0.
total = 0.
net.train()
for i, data in enumerate(train_loader):
iter_count += 1
# forward
inputs, labels = data
outputs = net(inputs)
# backward
optimizer.zero_grad()
loss = criterion(outputs, labels)
loss.backward()
# update weights
optimizer.step()
# 统计分类情况
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).squeeze().sum().numpy()
# 打印训练信息
loss_mean += loss.item()
train_curve.append(loss.item())
if (i+1) % log_interval == 0:
loss_mean = loss_mean / log_interval
print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
loss_mean = 0.
# 记录数据,保存于event file
writer.add_scalars("Loss", {"Train": loss.item()}, iter_count)
writer.add_scalars("Accuracy", {"Train": correct / total}, iter_count)
# 每个epoch,记录梯度,权值
for name, param in net.named_parameters():
writer.add_histogram(name + '_grad', param.grad, epoch)
writer.add_histogram(name + '_data', param, epoch)
scheduler.step() # 更新学习率
# validate the model
if (epoch+1) % val_interval == 0:
correct_val = 0.
total_val = 0.
loss_val = 0.
net.eval()
with torch.no_grad():
for j, data in enumerate(valid_loader):
inputs, labels = data
outputs = net(inputs)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total_val += labels.size(0)
correct_val += (predicted == labels).squeeze().sum().numpy()
loss_val += loss.item()
valid_curve.append(loss.item())
print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct / total))
# 记录数据,保存于event file
writer.add_scalars("Loss", {"Valid": | np.mean(valid_curve) | numpy.mean |
from __future__ import print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
from optparse import OptionParser
from math import sqrt, floor, ceil, pi
from glob import glob
import numpy as np
import pylab as plt
from scipy import linalg
from astrom_common import *
from astrom_intra import Intra
from astrom_merge import mergeBrick
#from astrom_merge2 import mergeBrick2
from astrometry.util.plotutils import antigray
def loadBrickCached(cam, brick, mergedfn=None, ps=None, **kwargs):
if cam in ['CFHT', 'CFHT2']:
return loadBrick(cam, **kwargs)
T = mergeBrick(cam, brick, mergedfn, ps, **kwargs)
if 'primary' in T.columns():
T = T[T.primary]
print('After cutting on primary:', len(T))
return T
def main():
import sys
parser = OptionParser(usage='%(program) [options] <gst.fits filenames>')
parser.add_option('-b', '--brick', dest='brick', type='int', help='Brick')
parser.add_option('-c', '--cam', dest='cam', help='Camera -- ACS, IR or UV', action=None)
parser.add_option('--ref', dest='ref', help='Reference "camera" -- CFHT, ACS, IR or UV', action=None)
parser.add_option('--refmerged', dest='refmergedfn', help='File to read/write merged reference sources from/into')
#parser.add_option('--refitab', dest='refitab', help='Reference source table')
parser.add_option('--refmagcut', dest='refmagcut', type='float', help='Reference mag cut')
parser.add_option('-p', '--path', dest='path', help='Path to .gst.fits files (default: "data/pipe/*/proc")')
parser.add_option('-r', '--radius', dest='radius', type='float', help='Search radius (default 1")', default=1.)
parser.add_option('-m', '--magcut', dest='magcut', type='float', help='mag cut (default: 22 for ACS, 21 for IR)')
parser.add_option('-R', '--rotation', dest='rotation', type='float', help='Apply this rotation correction (default=0 deg)', default=0.)
parser.add_option('-s', '--smallrad', dest='smallrad', type='float', help='Small search radius (default 0.1")', default=0.1)
parser.add_option('-E', '--emrad', dest='emrad', type='float', help='Radius for EM (default: searchrad)')
parser.add_option('--merged', dest='mergedfn', help='File to read/write merged sources from/into')
#parser.add_option('--itab', dest='itab', help='Target source table')
parser.add_option('-G', '--grid', dest='grid', action='store_true', default=False,
help='Show a grid of the 18 fields in this brick.')
parser.add_option('-B', '--basefn', dest='basefn',
help='Base output filename for plots')
parser.add_option('--rot-lo', dest='rotlo', type='float',
help='Search rotations from --rot-lo to --rot-hi in steps of --rot-step')
parser.add_option('--rot-hi', dest='rothi', type='float')
parser.add_option('--rot-step', dest='rotstep', type='float', default=0.01)
parser.add_option('--output', '-o', dest='outfn', help='Output filename (affine FITS)', default=None)
opt,args = parser.parse_args()
if opt.brick is None or opt.cam is None:
parser.print_help()
print('Need --brick and --cam')
sys.exit(-1)
if opt.emrad is None:
opt.emrad = opt.radius
#if opt.itab is not None:
# opt.itab = fits_table(opt.itab)
#if opt.refitab is not None:
# opt.refitab = fits_table(opt.refitab)
if opt.basefn is None:
basefn = 'inter-%02i-%s-%s' % (opt.brick, opt.cam, opt.ref)
else:
basefn = opt.basefn
ps = PlotSequence(basefn+'-', format='%02i')
Tme = loadBrickCached(opt.cam, opt.brick, path=opt.path, mergedfn=opt.mergedfn,
#itab=opt.itab,
ps=ps)
me = describeFilters(opt.cam, Tme)
Tref = loadBrickCached(opt.ref, opt.brick, path=opt.path, mergedfn=opt.refmergedfn,
#itab=opt.refitab,
ps=ps)
ref = describeFilters(opt.ref, Tref)
i,j = getNearMags(me, ref)
Tme.cam = opt.cam
Tme.mag = Tme.get('mag%i' % (i+1))
Tme.filter = me.fnames[i]
Tref.cam = opt.ref
Tref.mag = Tref.get('mag%i' % (j+1))
Tref.filter = ref.fnames[j]
if opt.magcut is not None:
I = (Tme.mag < opt.magcut)
Tme = Tme[I]
print('Got', len(Tme), 'after mag cut (at', opt.magcut, ')')
if opt.refmagcut is not None:
I = (Tref.mag < opt.refmagcut)
Tref = Tref[I]
print('Got', len(Tref), 'reference after mag cut (at %g)' % opt.refmagcut)
rl,rh = Tme.ra.min(), Tme.ra.max()
dl,dh = Tme.dec.min(), Tme.dec.max()
dmid = (dl+dh)/2.
rmid = (rl+rh)/2.
def rotate_radec(rot, ra, dec, refra, refdec):
trans = Affine()
trans.setRotation(rot, smallangle=False)
trans.setReferenceRadec(refra, refdec)
newra,newdec = trans.apply(ra, dec)
return newra, newdec, trans
rot = 0
trans0 = None
if opt.rotation != 0.:
rot = opt.rotation
# rotate.
print('Applying rotation correction of', rot, 'deg')
Tme.ra, Tme.dec, trans0 = rotate_radec(rot, Tme.ra, Tme.dec, rmid, dmid)
elif opt.rotlo is not None and opt.rothi is not None:
lo = opt.rotlo
hi = opt.rothi
step = opt.rotstep
print('Trying rotations between', lo, 'and', hi, 'in steps of', step)
variances = []
rots = np.arange(lo, hi+step/2., step)
for rot in rots:
print('Rotation', rot)
Tm = Tme.copy()
Tm.ra, Tm.dec, nil = rotate_radec(rot, Tm.ra, Tm.dec, rmid, dmid)
print('Matching...')
M = Match(Tm, Tref, opt.radius)
print('Got %i matches' % len(M.I))
nbins = 200
H,xe,ye = plothist(M.dra_arcsec, M.ddec_arcsec, nbins)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDec (arcsec)')
plt.title('Rotated by %g deg' % rot)
ps.savefig()
plotresids(Tm, M, 'Rotated by %g deg' % rot, bins=100)
ps.savefig()
# Trim the circle to avoid edge effects, and then measure the variance.
X,Y = np.meshgrid(np.arange(nbins), np.arange(nbins))
R2 = (X - nbins/2.)**2 + (Y - nbins/2.)**2
I = (R2 < (0.95 * (nbins/2)**2))
v = np.var(H[I])
print('Variance:', v)
variances.append(v)
plt.clf()
plt.plot(rots, variances, 'r-')
plt.xlabel('Rotation (deg)')
plt.ylabel('Variance in dRA,dDec histogram')
ps.savefig()
I = np.argmax(variances)
rot = rots[I]
print('Applying rotation correction of', rot, 'deg')
Tme.ra, Tme.dec, trans0 = rotate_radec(rot, Tme.ra, Tme.dec, rmid, dmid)
if trans0 is not None:
print('Setting initial rotation affine transformation:')
print(trans0)
A = alignAndPlot(Tme, Tref, opt.radius, ps, emrad=opt.emrad, doweighted=False)
#print 'Cov:', A.C
trans = findAffine(Tme, Tref, A, (rmid,dmid))
RR,DD = np.meshgrid(np.linspace(rl, rh, 20),
np.linspace(dl, dh, 20))
RR = RR.ravel()
DD = DD.ravel()
plotaffine(trans, RR, DD, exag=1.)
setRadecAxes(rl,rh,dl,dh)
ps.savefig()
plotaffine(trans, RR, DD, exag=100.)
setRadecAxes(rl,rh,dl,dh)
ps.savefig()
exag = 1000.
plotaffine(trans, RR, DD, exag, affineOnly=True)
ps.savefig()
Tme.ra,Tme.dec = trans.apply(Tme.ra, Tme.dec)
# Do it again!
A2 = alignAndPlot(Tme, Tref, opt.smallrad, ps, doweighted=False, emrad=opt.smallrad)
trans2 = findAffine(Tme, Tref, A2, (rmid,dmid))
Tme.ra,Tme.dec = trans2.apply(Tme.ra, Tme.dec)
# For the 'after' plots
A3 = alignAndPlot(Tme, Tref, opt.smallrad, ps, doweighted=False, emrad=opt.smallrad)
# Save
if opt.outfn:
if trans0 is None:
trans.add(trans2)
else:
trans0.add(trans)
trans0.add(trans2)
trans = trans0
T = Affine.toTable([trans])
T.writeto(opt.outfn)
def findAffine(Tme, Tref, A, refradec, affine=True, order=1):
'''
Computes an Affine transformation between two aligned catalogs.
*Tme*: catalog to align
*Tref*: reference catalog
*A*: an Alignment object matching these two catalogs
*refradec*: tuple (refra, refdec) of the reference point about which to
rotate.
*affine*: if True, produce an affine transformation; otherwise, just a shift
*order*: polynomial distortion order.
Returns:
*Affine* object.
'''
refra,refdec = refradec
rascale = np.cos(np.deg2rad(refdec))
srdeg,sddeg = A.getshift()
if not affine:
affine = Affine(dra = -srdeg, ddec = -sddeg,
refra = refra, refdec = refdec)
return affine
assert(order >= 1)
sr,sd = A.arcsecshift()
w = np.sqrt(A.fore)
M = A.match
dra = M.dra_arcsec [A.subset] - sr
ddec = M.ddec_arcsec[A.subset] - sd
ra = Tme.ra [M.I[A.subset]]
dec = Tme.dec[M.I[A.subset]]
comps = [np.ones_like(ra) * w]
for o in range(1, order+1):
for deco in range(o+1):
rao = o - deco
rr = (ra - refra )*rascale
dd = (dec - refdec)
# rr and dd are in isotropic degrees
comps.append((rr ** rao) * (dd ** deco) * w)
print('ra order', rao, 'dec order', deco)
# In the linear case (order=1), the terms are listed as rao=1 then deco=1
Amat = np.vstack(comps).T
Amat = np.matrix(Amat)
# dra,ddec are in isotropic degrees
b1 = -dra / 3600. * w
b2 = -ddec / 3600. * w
X1 = linalg.lstsq(Amat, b1)
X2 = linalg.lstsq(Amat, b2)
X1 = X1[0]
X2 = X2[0]
e,a,b = X1[:3]
f,c,d = X2[:3]
#print 'a,b,c,d', a,b,c,d
#print 'e,f', e,f
if order >= 2:
rapoly = X1[3:]
decpoly = X2[3:]
else:
rapoly = decpoly = None
affine = Affine(dra = e/rascale - srdeg, ddec = f - sddeg,
T = [ a, b, c, d ],
refra = refra, refdec = refdec,
rapoly=rapoly, decpoly=decpoly)
return affine
'''
Returns the Alignment object A.
'''
def alignAndPlot(Tme, Tref, rad, ps, doweighted=True, emrad=None, nearest=False, **kwargs):
aliargs = dict(cutrange=emrad)
aliargs.update(kwargs)
A = Alignment(Tme, Tref, searchradius=rad, **aliargs)
if nearest:
# There is something badly wrong with spherematch.nearest().
assert(False)
A.findMatches(nearest=True)
M = A.match
print('dra,ddec arcsec:', M.dra_arcsec[:100], M.ddec_arcsec[:100])
if A.shift() is None:
print('Shift not found!')
return None
M = A.match
print('Shift:', A.arcsecshift())
sr,sd = A.arcsecshift()
sumd2 = np.sum(A.fore * ((M.dra_arcsec [A.subset] - sr)**2 +
(M.ddec_arcsec[A.subset] - sd)**2))
sumw = np.sum(A.fore)
# / 2. to get std per coord.
std = sqrt(sumd2 / (sumw * 2.))
angles = np.linspace(0, 2.*pi, 100)
modstr = ''
if A.cov:
eigs = A.getEllipseSize() * 1000.
if eigs[0] > 100:
modstr = '%.0fx%.0f' % (eigs[0], eigs[1])
else:
modstr = '%.1fx%.1f' % (eigs[0], eigs[1])
else:
modstr = '%.1f' % (1000. * A.sigma)
W = np.zeros_like(A.subset).astype(float)
W[A.subset] = A.fore
rl,rh = Tme.ra.min(), Tme.ra.max()
dl,dh = Tme.dec.min(), Tme.dec.max()
if doweighted:
rounds = [ {}, { 'weights': W } ]
else:
rounds = [ {} ]
for i,args in enumerate(rounds):
tsuf = '' if i == 0 else ' (weighted)'
N = len(M.dra_arcsec) if i == 0 else sumw
plotresids(Tme, M, '%s-%s match residuals%s' % (Tme.cam, Tref.cam, tsuf),
bins=100, **args)
ps.savefig()
dst = 1000. * np.sqrt(M.dra_arcsec ** 2 + M.ddec_arcsec ** 2)
loghist(Tme.mag[M.I], dst, 100, **args)
plt.xlabel(Tme.filter)
plt.ylabel('Match residual (mas)')
ps.savefig()
loghist(Tref.mag[M.J], dst, 100, **args)
plt.xlabel(Tref.filter)
plt.ylabel('Match residual (mas)')
ps.savefig()
H,xe,ye = plotalignment(A)
# show EM circle
ax = plt.axis()
angles = np.linspace(0, 2.*pi, 100)
c = A.cutcenter
r = A.cutrange
plt.plot(c[0] + r * np.cos(angles), c[1] + r * np.sin(angles), 'g--')
plt.axis(ax)
plt.title('%s-%s (%i matches, std %.1f mas, model %s)%s' %
(Tme.cam, Tref.cam, int(sumw), std*1000., modstr, tsuf))
ps.savefig()
bins = 200
edges = np.linspace(-rad, rad, bins)
DR,DD = | np.meshgrid(edges, edges) | numpy.meshgrid |
"""
Module for data interaction tools for the LIM package.
Author: <NAME>
"""
import tables as tb
import dask.array as da
import numpy as np
import os.path as path
import netCDF4 as ncf
import numexpr as ne
import pickle as cpk
import logging
from datetime import datetime
from copy import copy, deepcopy
from .Stats import run_mean, calc_anomaly, detrend_data, is_dask_array, \
dask_detrend_data, calc_eofs
# Prevents any nodes in HDF5 file from being cached, saving space
# tb.parameters.NODE_CACHE_SLOTS = 0
# Set the overflow cache for Dask operations
# CACHE = chest.Chest(available_memory=16e9,
# path='/home/katabatic/wperkins/scratch')
# dask.set_options(cache=CACHE)
# Initialize logging client for this module
logger = logging.getLogger(__name__)
class BaseDataObject(object):
"""Data Input Object
This class is for handling data which may be in a masked format. This
class can also be used to expand previously compressed data if an
original mask is provided.
Notes
-----
Right now it is writen to work with 2D spatial data. It assumes that
the leading dimension is temporal. In the future it might change to
incorporate 3D spatial fields or just general data.
"""
# Static names
TIME = 'time'
LEVEL = 'level'
LAT = 'lat'
LON = 'lon'
# Static databin keys
_COMPRESSED = 'compressed_data'
_ORIGDATA = 'orig'
_DETRENDED = 'detrended'
_AWGHT = 'area_weighted'
_RUNMEAN = 'running_mean'
_ANOMALY = 'anomaly'
_CLIMO = 'climo'
_STD = 'standardized'
_EOFPROJ = 'eof_proj'
@staticmethod
def _match_dims(shape, dim_coords):
"""
Match each dimension key in dim_coords dict to the correct index of the
shape.
"""
return {key: value[0] for key, value in list(dim_coords.items())
if shape[value[0]] == len(value[1])}
def __init__(self, data, dim_coords=None, coord_grids=None,
valid_data=None, force_flat=False, cell_area=None,
irregular_grid=False,
save_none=False, time_units=None, time_cal=None,
fill_value=None):
"""
Construction of a DataObject from input data. If nan or
infinite values are present, a compressed version of the data
is stored.
Parameters
----------
data: ndarray
Input dataset to be used.
dim_coords: dict(str:(int, ndarray)
Dimension position and oordinate vector dictionary for supplied
data. Please use DataObject attributes (e.g. DataObject.TIME)
for dictionary keys.
coord_grids: dict(str:ndarray), optional
Full grids of each dimensions coordinates. If not provided these
can be created from dim_coords as long as the grid is regular.
If grid is irregular these should be provided for easier plotting.
valid_data: ndarray (np.bool), optional
Array corresponding to valid data in the of the input dataset
or uncompressed version of the input dataset. Should have the same
number of dimensions as the data and each dimension should be
greater than or equal to the spatial dimensions of data.
force_flat: bool, optional
Force spatial dimensions to be flattened (1D array)
cell_area: ndarray, optional
Grid cell areas used for area weighting the data.
irregular_grid: bool, optional
Whether or not the source grid is regular. Default: False
save_none: bool, optional
If true, data object will not save any of the intermediate
calculation data.
time_units: str, optional
Units string to be used by netcdf.date2num function for storing
datetime objects as a numeric value for output.
time_cal: str, optional
Calendar string to be used by netcdf.date2num function for storing
datetime objects as a numeric value for output
fill_value: float
Value to be considered invalid data during the mask and
compression. Only considered when data is not masked.
"""
logger.info('Initializing data object from {}'.format(self.__class__))
assert data.ndim <= 4, 'Maximum of 4 dimensions are allowed.'
self._full_shp = data.shape
self.forced_flat = force_flat
self.time_units = time_units
self.time_cal = time_cal
self.cell_area = cell_area
self.irregular_grid = irregular_grid
self._coord_grids = coord_grids
self._fill_value = fill_value
self._save_none = save_none
self._data_bins = {}
self._curr_data_key = None
self._ops_performed = {}
self._altered_time_coords = {}
self._start_time_edge = None
self._end_time_edge = None
self._eofs = None
self._svals = None
self._eof_stats = {}
self._tb_file_args = None
self._std_scaling = None
# Future possible data manipulation functionality
self.anomaly = None
self.climo = None
self.compressed_data = None
self.running_mean = None
self.detrended = None
self.area_weighted = None
self.eof_proj = None
self.standardized = None
# Match dimension coordinate vectors
if dim_coords is not None:
if self.TIME in list(dim_coords.keys()):
time_idx, time_coord = dim_coords[self.TIME]
if time_idx != 0:
logger.error('Non-leading time dimension encountered in '
'dim_coords.')
raise ValueError('Sampling dimension must always be the '
'leading dimension if provided.')
self._leading_time = True
self._time_shp = [data.shape[0]]
self._spatial_shp = data.shape[1:]
self._altered_time_coords[self._ORIGDATA] = time_coord
else:
self._leading_time = False
self._time_shp = []
self._spatial_shp = self._full_shp
self._dim_idx = self._match_dims(data.shape, dim_coords)
self._dim_coords = dim_coords
else:
self._leading_time = False
self._time_shp = []
self._spatial_shp = self._full_shp
self._dim_idx = None
self._flat_spatial_shp = [np.product(self._spatial_shp)]
logger.info('Time shape: {}'.format(self._time_shp))
logger.info('Spatial shape: {}\n'.format(self._spatial_shp))
logger.info('Flattened spatial length: '
'{}'.format(self._flat_spatial_shp))
# Check to see if data input is a compressed version
compressed = False
if valid_data is not None:
dim_lim = valid_data.ndim
if dim_lim <= 3:
logger.error('Valid data has more than 3 dimensions: '
'ndim={}'.format(dim_lim))
raise ValueError('Valid data mask should not have more than 3 '
'dimensions')
elif dim_lim != len(self._spatial_shp):
logger.error('Valid data dimensions not equivalent to the '
'shape of the spatial field: \n'
'valid_data.ndim={}\n'
'_spatial_shp.ndim={}'.format(dim_lim,
self._spatial_shp))
# Check the dimensions of the mask and data to se if compressed
for dat_dim, mask_dim in zip(self._spatial_shp, valid_data.shape):
if dat_dim > mask_dim:
logger.error('Data dimension greater than mask dimension:'
'{} > {}'.format(dat_dim, mask_dim))
raise ValueError('Encountered data dimension larger than'
'equivalent masked dimension.')
compressed |= dat_dim < mask_dim
# Apply input mask if its spatial dimensions match data
if not compressed:
# multplication broadcasts across leading sampling dimension if
# applicable
full_valid = np.ones_like(data, dtype=np.bool) * valid_data
data[~full_valid] = np.nan
logger.debug('Mask applied (NaN) to non-compressed data.')
else:
if not np.all(np.isfinite(data)):
logger.error('Data determined to be compressed still '
'contains non-finite elements.')
raise ValueError('Non-finite value encountered in '
'compressed data.')
self._full_shp = self._time_shp + list(valid_data.shape)
logger.debug('Compressed data encountered. Full shape: '
'{}'.format(self._full_shp))
self.valid_data = valid_data.flatten()
self.is_masked = True
# Masked array valid handling
self.is_masked, self.valid_data = self._data_masking(data)
if self.valid_data is not None:
self.valid_data = self.valid_data.flatten()
self.data = data
# Flatten Spatial Dimension if applicable
if force_flat or self.is_masked:
self._flatten_curr_data()
logger.debug('Flattening data over spatial dimensions. New shp: '
'{}'.format(self.data.shape))
self.orig = self._new_databin(self.data, self._ORIGDATA)
self._add_to_operation_history(None, self._ORIGDATA)
self._set_curr_data_key(self._ORIGDATA)
# Compress the data if mask is present
if compressed:
self.compressed_data = self.data
elif self.is_masked:
if not save_none:
if self._leading_time:
new_shp = (self._time_shp[0], self.valid_data.sum())
else:
new_shp = (self.valid_data.sum(),)
self.compressed_data = self._new_empty_databin(new_shp,
self.data.dtype,
self._COMPRESSED)
self.data = self._compress_to_valid_data(self.data,
self.valid_data,
out_arr=self.compressed_data)
self._add_to_operation_history(self._curr_data_key, self._COMPRESSED)
self._set_curr_data_key(self._COMPRESSED)
else:
self.reset_data(self._ORIGDATA)
def _set_curr_data_key(self, new_key):
logger.debug('Setting current data key to: '.format(new_key))
self._curr_data_key = new_key
def _add_to_operation_history(self, curr_dkey, new_op_key):
if curr_dkey is None:
self._ops_performed[new_op_key] = [new_op_key]
else:
self._ops_performed[new_op_key] = list(self._ops_performed[curr_dkey])
self._ops_performed[new_op_key] += [new_op_key]
def _new_empty_databin(self, shape, dtype, name):
"""
Create an empty backend data container.
"""
logger.debug('Creating empty databin: \n'
'shape: {}\n'
'dtype: {}\n'
'name: {}'.format(shape, dtype, name))
new = np.empty(shape, dtype=dtype)
self._data_bins[name] = new
return new
def _new_databin(self, data, name):
"""
Create and copy data into a new backend data container.
"""
logger.debug('Copying data to databin: {}'.format(name))
new = np.empty_like(data)
new[:] = data
self._data_bins[name] = new
return new
def _gen_composite_mask(self, data):
"""
Generate a mask (over the time dimension if present) that masks all
locations that are missing data.
"""
logger.debug('Generating composite mask from data mask.')
if self._leading_time:
composite_mask = data.mask.sum(axis=0) > 0
else:
composite_mask = data.mask
return composite_mask
def _check_invalid_data(self, data):
"""
Check for invalid (inf or NaN) data in the data. Like
_gen_composite_mask it operates over the time dimension if present,
and only returns true for locations that have all valid data.
"""
logger.info('Checking data for invalid elements.')
full_valid = np.isfinite(data)
if self._fill_value is not None:
full_valid &= data != self._fill_value
if not np.all(full_valid):
masked = True
if self._leading_time:
valid_data = full_valid.sum(axis=0) == self._time_shp[0]
else:
valid_data = full_valid
logger.debug('Found invalid values. {:d} spatial elements masked.'
''.format(np.logical_not(valid_data).sum()))
else:
logger.debug('No invalid values encountered.')
masked = False
valid_data = None
return masked, valid_data
def _data_masking(self, data):
"""
Check and generate a valid data mask.
"""
logger.info('Performing masking and invalid data checks.')
if np.ma.is_masked(data[0]):
masked = True
composite_mask = self._gen_composite_mask(data)
valid_data = np.logical_not(composite_mask)
else:
masked, valid_data = self._check_invalid_data(data)
return masked, valid_data
def _compress_to_valid_data(self, data, valid_mask, out_arr=None):
"""
Compress data to only the valid locations.
"""
logger.info('Compressing data to valid spatial locations.')
if self._leading_time:
compress_axis = 1
else:
compress_axis = None
out_arr = np.compress(valid_mask, data, axis=compress_axis, out=out_arr)
if self.cell_area is not None:
self.cell_area = np.compress(valid_mask, self.cell_area)
return out_arr
def _flatten_curr_data(self):
"""
Flatten the spatial dimension of data pointed to by self.data
"""
if self._leading_time:
self.data = self.data.reshape(self._time_shp + self._flat_spatial_shp)
else:
self.data = self.data.reshape(self._flat_spatial_shp)
if self.cell_area is not None:
self.cell_area = self.cell_area.reshape(self._flat_spatial_shp)
def _set_time_coord(self, key, time_len_of_data):
"""
Sets the time coordinate according to the provided data key. Also
adjusts the object attribute of the time shape.
"""
if key in self._altered_time_coords:
time_coord = self._altered_time_coords[key]
else:
ops = self._ops_performed[key]
for past_op_key in ops[::-1]:
if past_op_key in self._altered_time_coords:
time_coord = self._altered_time_coords[past_op_key]
break
else:
raise IndexError('No suitable time coordinates found for '
'current key.')
if not len(time_coord) == time_len_of_data:
logger.error('Time coordinate length is different than the '
'sampling dimension of the data. coord_len = {:d}, '
'data_sample_len = {:d}'.format(len(time_coord),
time_len_of_data))
raise ValueError('Inconsistent sampling dimension and '
'corresponding coordinate length detected.')
time_idx = self._dim_coords[self.TIME][0]
self._dim_coords[self.TIME] = (time_idx, time_coord)
self._time_shp = [time_len_of_data]
@staticmethod
def _detrend_func(data, output_arr=None):
return detrend_data(data, output_arr=output_arr)
@staticmethod
def _avg_func(data, output_arr=None):
return np.mean(data, axis=1, out=output_arr)
def time_average_resample(self, key, nsamples_in_avg, shift=0):
"""
Resample by averaging over the sampling dimension.
:return:
"""
if not self._leading_time:
raise ValueError('Can only perform a resample operation when data '
'has a leading sampling dimension.')
if shift < 0:
logger.error('Invalid shift argument (shift = {:d})'.format(shift))
raise ValueError('Currently only positive shifts are supported '
'for resampling.')
nsamples = self._time_shp[0]
nsamples -= shift
new_nsamples = nsamples // nsamples_in_avg
end_cutoff = nsamples % nsamples_in_avg
if end_cutoff == 0:
end_cutoff = None
else:
end_cutoff = -end_cutoff
spatial_shp = self.data.shape[1:]
new_shape = [new_nsamples] + list(spatial_shp)
avg_shape = [new_nsamples, nsamples_in_avg] + list(spatial_shp)
new_bin = self._new_empty_databin(new_shape, self.data.dtype, key)
setattr(self, key, new_bin)
tmp_slice = slice(shift, end_cutoff)
self.data = self.data[tmp_slice]
self.data = self.data.reshape(avg_shape)
self.data = self._avg_func(self.data, output_arr=new_bin)
self._time_shp = [new_nsamples]
time_idx, time_coord = self._dim_coords[self.TIME]
tmp_slice = slice(shift, end_cutoff, nsamples_in_avg)
new_time_coord = time_coord[tmp_slice]
self._dim_coords[self.TIME] = (time_idx, new_time_coord)
self._altered_time_coords[key] = new_time_coord
self._add_to_operation_history(self._curr_data_key, key)
self._set_curr_data_key(key)
return self.data
def train_test_split_random(self, test_size=0.25, random_seed=None,
sample_lags=None):
if random_seed is not None:
np.random.seed(random_seed)
sample_len = self._time_shp[0]
if sample_lags is not None:
test_sample_len = sample_len - max(sample_lags)
if isinstance(test_size, float):
if test_size >= 1 or test_size <= 0:
raise ValueError('Testing sample size must be between 0.0 and '
'1.0 if float is provided.')
if sample_lags is not None and (test_size * len(sample_lags)) > 0.75:
raise ValueError('Test size and number of lagged samples to'
'include could comprise more than 75% of data'
'. Please lower the test size or lower the '
'number of sample lags.')
test_samples = int(np.ceil(test_sample_len * test_size))
elif isinstance(test_size, int):
test_samples = test_size
else:
raise ValueError('Testing sample size must be of type int or '
'float.')
if test_samples <= 0:
logging.error('Invalid testing sample size encountered: '
'test_samples={:d}'.format(test_samples))
raise ValueError('Provided testing sample size is too small.')
test_indices = np.random.choice(test_sample_len, size=test_samples,
replace=False)
test_set = set(test_indices)
for lag in sample_lags:
test_set = test_set | set(test_indices+lag)
train_set = set(np.arange(sample_len)) - set(test_set)
train_indices = list(train_set)
train_in_test = []
for lag in sample_lags:
for idx in train_indices:
if idx+lag in test_set:
train_in_test.append(idx)
train_set = train_set - set(train_in_test)
train_indices = np.array(list(train_set))
train_indices = np.sort(train_indices)
if sample_lags is None:
sample_lags = []
else:
max_lag = max(sample_lags)
test_data = []
obj_data = getattr(self, self._curr_data_key)
train_dobj = self.copy(data_indices=train_indices,
data_group='/train_copy')
lag_idx_training = {}
for idx_adjust in sample_lags:
t0_idx_list = []
tlag_idx_list = []
for i, t0_idx in enumerate(train_indices):
for j, tlag_idx in enumerate(train_indices[i:]):
if t0_idx + idx_adjust == tlag_idx:
t0_idx_list.append(i)
tlag_idx_list.append(i+j)
# TODO: should I warn if number of samples is small?
if t0_idx_list:
lag_idx_training[idx_adjust] = (t0_idx_list, tlag_idx_list)
test_data.append(obj_data[test_indices, ...])
for idx_adjust in sample_lags:
test_data.append(obj_data[test_indices+idx_adjust, ...])
return test_data, train_dobj, lag_idx_training
def inflate_full_grid(self, data=None, expand_axis=-1, reshape_orig=False):
"""
Returns previously compressed data to its full grid filled with np.NaN
values.
Parameters
----------
data: ndarray like, optional
Data to inflate to its original grid size. If none specified this
operates on the current data pointed to by self.data.
expand_axis: int, optional
Which axis to expand along for the data. Defaults to -1 which is
the correct axis when operating on self.data.
reshape_orig: bool, optional
If true it will reshape data to the correct time shape (if
applicable) and spatial shape.
Returns
-------
ndarray
Full decompressed grid filled with NaN values in masked locations.
"""
if not self.is_masked:
logger.warning('Cannot inflate uncompressed data.')
return None
if data is not None:
# Check that this data was compressed from current object
elem_expand_axis = data.shape[expand_axis]
num_valid_points = self.valid_data.sum()
if elem_expand_axis != num_valid_points:
logger.error('Incorrect number of elements for compressed '
'data associated with this object.\n'
'data.shape=[{:d}]\n'
'nelem valid data={:d}'
''.format(elem_expand_axis, num_valid_points))
raise ValueError('Input data does not have same length as '
'number of valid data elements.')
shp = list(data.shape)
shp[expand_axis] = len(self.valid_data)
else:
data = self.data
shp = self._time_shp + [len(self.valid_data)]
full = np.empty(shp) * np.nan
valid_mask = self.valid_data
for dim_idx, dim_len in enumerate(shp):
if dim_len != self.valid_data.shape[0]:
valid_mask = np.expand_dims(valid_mask, dim_idx)
valid_mask = np.logical_and(np.ones(shp), valid_mask)
full[valid_mask] = data.flatten()
if reshape_orig:
new_shp = list(shp)
new_shp.pop(expand_axis)
for dim_len in self._spatial_shp[::-1]:
new_shp.insert(expand_axis, dim_len)
full = full.reshape(new_shp)
logger.debug('Inflated grid shape: {}'.format(full.shape))
return full
def calc_running_mean(self, window_size, year_len, save=True):
"""
Calculate a running mean over the sampling dimension.
Parameters
----------
window_size: int
Number of samples to include in the running mean window.
year_len: int
Number of samples in a year. If sampling frequency is longer
than 1 year this will default to 1.
save: bool, optional
Whether or not to save data in a new databin. (Default is True)
Returns
-------
ndarray-like
Data filtered using a running mean.
Notes
-----
This function will trim each end of the sample by removing
ciel(window_size//2 / year_len) * year_len.
"""
logger.info('Filtering data using running mean...')
logger.debug('window_size = {:d}, year_len = {:d}'.format(window_size,
year_len))
# TODO: year_len should eventually be a property determined during init
if not self._leading_time:
logger.error('Running mean requires leading time dimension.')
raise ValueError('Can only perform a running mean when data has a '
'leading sampling dimension.')
if year_len < 1:
year_len = 1
edge_pad = window_size // 2
edge_trim = np.ceil(edge_pad / float(year_len)) * year_len
edge_trim = int(edge_trim)
new_time_len = self.data.shape[0] - edge_trim * 2
time_idx, old_time_coord = self._dim_coords[self.TIME]
new_time_coord = old_time_coord[edge_trim:-edge_trim]
self._dim_coords[self.TIME] = (time_idx, new_time_coord)
if save and not self._save_none:
new_shape = list(self.data.shape)
new_shape[0] = new_time_len
new_shape = tuple(new_shape)
self.running_mean = self._new_empty_databin(new_shape,
self.data.dtype,
self._RUNMEAN)
self.data = run_mean(self.data, window_size, trim_edge=edge_trim,
output_arr=self.running_mean)
self._time_shp = [new_time_len]
self._start_time_edge = edge_trim
self._end_time_edge = -edge_trim
self._add_to_operation_history(self._curr_data_key, self._RUNMEAN)
self._set_curr_data_key(self._RUNMEAN)
self._altered_time_coords[self._RUNMEAN] = new_time_coord
return self.data
# TODO: Use provided time coordinates to determine year size
def calc_anomaly(self, year_len, save=True, climo=None):
"""
Center the data (anomaly) over the sampling dimension. If the there are
multiple samples within a year (yr_size>1) then the climatology is
calculated for each subannual quantity.
Parameters
----------
year_len: int
Number of samples in a year. If sampling frequency is longer
than 1 year this will default to 1.
save: bool, optional
Whether or not to save data in a new databin. (Default is True)
Returns
-------
ndarray-like
Centered data
"""
logger.info('Centering data and saving climatology...')
logger.debug('yr_size = {:d}'.format(year_len))
if not self._leading_time:
raise ValueError('Can only perform anomaly calculation with a '
'specified leading sampling dimension')
if save and not self._save_none:
self.anomaly = self._new_empty_databin(self.data.shape,
self.data.dtype,
self._ANOMALY)
if year_len < 1:
year_len = 1
self.data, self.climo = calc_anomaly(self.data, year_len,
climo=climo,
output_arr=self.anomaly)
self._add_to_operation_history(self._curr_data_key, self._ANOMALY)
self._set_curr_data_key(self._ANOMALY)
return self.data
def detrend_data(self, save=True):
"""
Remove linear trends from the data along the sampling dimension.
Parameters
----------
save: bool, optional
Whether or not to save data in a new databin. (Default is True)
Returns
-------
ndarray-like
Detrended data
"""
logger.info('Detrending data...')
if not self._leading_time:
raise ValueError('Can only perform detrending with a specified '
'leading sampling dimension')
if save and not self._save_none:
self.detrended = self._new_empty_databin(self.data.shape,
self.data.dtype,
self._DETRENDED)
self.data = self._detrend_func(self.data, output_arr=self.detrended)
self._add_to_operation_history(self._curr_data_key, self._DETRENDED)
self._set_curr_data_key(self._DETRENDED)
return self.data
def area_weight_data(self, use_sqrt=True, save=True):
"""
Perform a gridcell area weighting using provided areas or latitudes if
field is regularly grided and cell areas are not loaded.
Parameters
----------
use_sqrt: bool, optional
Use square root of weight matrix. Useful for when data will be
used in quadratic calculations. (E.g. PCA)
save: bool, optional
Whether or not to save data in a new databin. (Default is True)
Returns
-------
ndarray-like
Area-weighted data
"""
if self.cell_area is None and self.irregular_grid:
raise ValueError('Cell areas are required to area-weight a '
'non-regular grid.')
elif self.cell_area is None and not self.irregular_grid:
do_lat_based = True
if self.LAT not in list(self._dim_idx.keys()):
raise ValueError('Cell area or latitude dimension are not '
'specified. Required for grid cell area '
'weighting.')
logger.info('Area-weighting by latitude.')
else:
logger.info('Area-weighting using cell area')
do_lat_based = False
if save and not self._save_none:
self.area_weighted = self._new_empty_databin(self.data.shape,
self.data.dtype,
self._AWGHT)
if do_lat_based:
lat = self.get_coordinate_grids([self.LAT],
flat=self.forced_flat)[self.LAT]
scale = abs(np.cos(np.radians(lat)))
else:
scale = self.cell_area / self.cell_area.sum()
if use_sqrt:
scale = np.sqrt(scale)
if is_dask_array(self.data):
awgt = self.data * scale
da.store(awgt, self.area_weighted)
else:
awgt = self.data
result = ne.evaluate('awgt * scale')
if self.area_weighted is not None:
self.area_weighted[:] = result
self.data = self.area_weighted
else:
self.data = result
self._add_to_operation_history(self._curr_data_key, self._AWGHT)
self._set_curr_data_key(self._AWGHT)
return self.data
def standardize_data(self, std_factor=None, save=True):
"""
Perform a standardization by the total grid variance.
Parameters
----------
save: bool, optional
Whether or not to save data in a new databin. (Default is True)
Returns
-------
ndarray-like
Standardized data
"""
if save and not self._save_none:
self.standardized = self._new_empty_databin(self.data.shape,
self.data.dtype,
self._STD)
if std_factor is None:
grid_var = self.data.var(axis=0, ddof=1)
total_var = grid_var.sum()
std_scaling = 1 / np.sqrt(total_var)
else:
std_scaling = std_factor
grid_standardized = self.data * std_scaling
if is_dask_array(self.data):
if not is_dask_array(std_scaling):
inputs = [grid_standardized]
outputs = [self.standardized]
unpack_std_scaling = False
else:
inputs = [grid_standardized, std_scaling]
self._std_scaling = np.zeros(1)
outputs = [self.standardized, self._std_scaling]
unpack_std_scaling = True
da.store(inputs, outputs)
if unpack_std_scaling:
self._std_scaling = self._std_scaling[0]
else:
self._std_scaling = std_scaling
else:
self._std_scaling = std_scaling
if self.standardized is not None and save and not self._save_none:
self.standardized[:] = grid_standardized
self.data = self.standardized
else:
self.data = grid_standardized
self._add_to_operation_history(self._curr_data_key, self._STD)
self._set_curr_data_key(self._STD)
return self.data
def eof_proj_data(self, num_eofs=10, eof_in=None, save=True,
calc_on_key=None, proj_key=None):
"""
Calculate spatial EOFs on the data retaining a specified number of
modes.
Parameters
----------
num_eofs: int
How many modes to retain from the EOF decomposition. Ignored if
input_eofs is specified.
eof_in: ndarray, optional
A set of EOFs to project the data into. First dimension should
match the length of the data feature dimension. Overrides
num_eofs if provided.
save: bool, optional
Whether or not to save data in a new databin. (Default is True)
calc_on_key: str, optional
Field key to calculate the EOF basis on. Defaults to the
area-weighted data.
proj_key: str, optional
Field to project onto the EOF basis. Defaults to the current data
if no key is provided.
Returns
-------
ndarray-like
Data projected into EOF basis. Will have shape of (sampling dim
x num EOFs).
"""
if not self._leading_time:
raise ValueError('Can only perform eof calculation with a '
'specified leading sampling dimension')
if calc_on_key is None and self._curr_data_key != self._AWGHT:
self.reset_data(self._AWGHT)
calc_on_key = self._AWGHT
if len(self.data.shape) > 2:
logger.warning('Cannot perform EOF calculation on data with more '
'than 2 dimensions. Flattening data...')
self._flatten_curr_data()
if eof_in is not None:
if eof_in.shape[0] != self.data.shape[1]:
logger.error('Input EOFs feature dimension (length={}) does '
'not match data feature dimension (length={})'
''.format(eof_in.shape[0], self.data.shape[1]))
raise ValueError('Feature dimension mismatch for input EOFs')
num_eofs = eof_in.shape[1]
logger.info('Projecting data into leading {:d} EOFs'.format(num_eofs))
if eof_in is None:
self._eof_stats = {}
self._eof_stats['calc_on'] = calc_on_key
self._eofs, self._svals = calc_eofs(self.data, num_eofs,
var_stats_dict=self._eof_stats)
else:
self._eofs = eof_in
if proj_key is not None:
self.reset_data(proj_key)
if save and not self._save_none:
new_shp = (self.data.shape[0], num_eofs)
self.eof_proj = self._new_empty_databin(new_shp,
self.data.dtype,
self._EOFPROJ)
if is_dask_array(self.data):
proj = da.dot(self.data, self._eofs)
da.store(proj, self.eof_proj)
self.data = self.eof_proj
else:
proj = np.dot(self.data, self._eofs)
if self.eof_proj is not None:
self.eof_proj[:] = proj
self.data = self.eof_proj
else:
self.data = proj
self._add_to_operation_history(self._curr_data_key, self._EOFPROJ)
self._set_curr_data_key(self._EOFPROJ)
return self.data
def get_eof_stats(self):
return deepcopy(self._eof_stats)
# TODO: Make this return copies of dim_coord information
def get_dim_coords(self, keys):
"""
Return dim_coord key, value pairs for a specified group of keys.
Parameters
----------
keys: Iterable<str>
A list of keys specifying data to retrieve from the dim_coords
property
Returns
-------
dict
A dim_coord dictionary with specified keys. Values will be a tuple
of the dimension index and coordinate values.
"""
logger.info('Retrieving dim_coords for: {}'.format(keys))
dim_coords = {}
for key in keys:
if key in list(self._dim_coords.keys()):
dim_coords[key] = self._dim_coords[key]
return dim_coords
def get_coordinate_grids(self, keys, compressed=True, flat=False):
"""
Return coordinate grid for spatial dimensions in full, compressed, or
flattened form.
Parameters
----------
keys: Iterable<str>
A list of keys specifying spatial grids to create.
compressed: bool, optional
Whether or not to compress the grid when it contains masked values
flat: bool, optional
Whether or not to return a flattened 1D grid.
Returns
-------
dict
Requested coordinate grids as key/value pairs
"""
logger.info('Retrieving coordinate grids for: {}'.format(keys))
grids = {}
if self.TIME in keys:
logger.warning('Get_coordinate_grids currently only supports '
'retreival of spatial fields.')
keys.pop(self.TIME)
for key in keys:
if key not in list(self._dim_idx.keys()):
raise KeyError('No matching dimension for key ({}) was found.'
''.format(key))
if self._coord_grids is not None and key in self._coord_grids:
grid = np.copy(self._coord_grids[key])
else:
idx = self._dim_idx[key]
# adjust field index for leading time dimension
if self._leading_time:
idx -= 1
# Get coordinates for current key and copy
coords = self._dim_coords[key][1]
grid = np.copy(coords)
# Expand dimensions for broadcasting
for dim, _ in enumerate(self._spatial_shp):
if dim != idx:
grid = np.expand_dims(grid, dim)
grid = np.ones(self._spatial_shp) * grid
if self.is_masked and compressed:
grid = grid.flatten()
grid = grid[self.valid_data]
elif flat:
grid = grid.flatten()
grids[key] = grid
return grids
def reset_data(self, key):
logger.info('Resetting data to: {}'.format(key))
try:
self.data = self._data_bins[key]
self._set_curr_data_key(key)
if self._leading_time:
self._set_time_coord(key, self.data.shape[0])
except KeyError:
logger.error('Could not find {} in initialized '
'databins.'.format(key))
raise KeyError('Key {} not saved. Could not reset self.data.')
return self.data
def is_leading_time(self):
return self._leading_time
def save_dataobj_pckl(self, filename):
logger.info('Saving data object to file: {}'.format(filename))
tmp_dimcoord = self._dim_coords[self.TIME]
tmp_time = tmp_dimcoord[1]
kwargs = {}
if self.time_cal is not None:
kwargs['calendar'] = self.time_cal
topckl_time = ncf.date2num(tmp_time, units=self.time_units,
**kwargs)
self._dim_coords[self.TIME] = (tmp_dimcoord[0], topckl_time)
with open(filename, 'wb') as f:
cpk.dump(self, f)
self._dim_coords[self.TIME] = (tmp_dimcoord[0], tmp_time)
def copy(self, data_indices=None, **kwargs):
"""
Copies the current data object to a new object only retaining the
current data_bin and associated information. Allows for subsampling
of the current data.
Parameters
----------
data_indices: list of ints or slice object, optional
Indicies to subsample the current data object data for the copy
operation.
kwargs:
Other keyword arguments for _helper_copy_new_databin method
Returns
-------
DataObject
Copy of the current data object with or without a subsample
"""
if data_indices is not None and not self.is_leading_time():
raise ValueError('Cannot copy with specified indices for subsample'
'when data does not contain leading sampling dim.')
cls = self.__class__
new_obj = cls.__new__(cls)
curr_dict = copy(self.__dict__)
# attributes that need deep copy (arrays, lists, etc.)
attrs_to_deepcopy = ['_coord_grids', '_time_shp', '_spatial_shp',
'_dim_idx', '_dim_coords', '_flat_spatial_shp',
'valid_data']
# check if eof attributes are relevant
current_dkey = self._curr_data_key
if (current_dkey == self._EOFPROJ or
self._EOFPROJ in self._ops_performed[current_dkey]):
attrs_to_deepcopy.append('_eof_stats')
else:
curr_dict['_eofs'] = None
curr_dict['_svals'] = None
curr_dict['_eof_stats'] = {}
deepcopy_items = {key: curr_dict.pop(key) for key in attrs_to_deepcopy}
# Unset all attributes for other data bins
for key in list(self._data_bins.keys()):
if key != self._curr_data_key:
curr_dict[key] = None
curr_dict['data'] = None
curr_dict['_data_bins'] = {}
ops_performed = {current_dkey: curr_dict['_ops_performed'][current_dkey]}
curr_dict['_ops_performed'] = ops_performed
deepcopied_attrs = deepcopy(deepcopy_items)
data = self.data
time_idx, time_coord = deepcopied_attrs['_dim_coords'][self.TIME]
time_coord = np.array(time_coord)
# Adjust the time and data if resampling
if data_indices is not None:
try:
sample_len = len(data_indices)
except TypeError as e:
# Assume slice input
sample_len = data_indices.stop - data_indices.start
time_coord = time_coord[data_indices]
deepcopied_attrs['_dim_coords'][self.TIME] = (time_idx, time_coord)
deepcopied_attrs['_time_shp'] = [sample_len]
data = data[data_indices, ...]
curr_dict['_altered_time_coords'] = {current_dkey: time_coord}
# Update object with attributes
new_obj.__dict__.update(curr_dict)
new_obj.__dict__.update(deepcopied_attrs)
# Create a new databin for our data
new_obj._helper_copy_new_databin(current_dkey, data, **kwargs)
return new_obj
def _helper_copy_new_databin(self, data_key, data, **kwargs):
databin = self._new_databin(data, data_key)
setattr(self, data_key, databin)
self.data = databin
self._set_curr_data_key(data_key)
@staticmethod
def _load_cell_area(cell_area_path):
if cell_area_path is None:
return None
logger.info('Loading grid cell area from : {}'.format(cell_area_path))
ca_fname = path.split(cell_area_path)[-1]
ca_var = ca_fname.split('_')[0]
f = ncf.Dataset(cell_area_path, 'r')
cell_area = f.variables[ca_var][:]
return cell_area
@classmethod
def from_netcdf(cls, filename, var_name, cell_area_path=None, **kwargs):
logging.info('Loading data object from netcdf: \n'
'file = {}\n'
'var_name = {}'.format(filename, var_name))
cell_area = cls._load_cell_area(cell_area_path)
with ncf.Dataset(filename, 'r') as f:
data = f.variables[var_name]
lat = f.variables['lat']
lon = f.variables['lon']
if len(lat.shape) > 1:
irregular_grid = True
lat_grid = lat
lon_grid = lon
# TODO: Should I just fill these with dummy dimensions?
lat = lat[:, 0]
lon = lon[0]
grids = {BaseDataObject.LAT: lat_grid[:],
BaseDataObject.LON: lon_grid[:]}
else:
irregular_grid = False
grids = None
coords = {BaseDataObject.LAT: lat[:],
BaseDataObject.LON: lon[:]}
times = f.variables['time']
try:
cal = times.calendar
coords[BaseDataObject.TIME] = ncf.num2date(times[:], times.units,
calendar=cal)
except AttributeError:
logger.debug('No calendar attribute found in netCDF.')
coords[BaseDataObject.TIME] = ncf.num2date(times[:], times.units)
cal = None
for i, key in enumerate(data.dimensions):
if key in list(coords.keys()):
coords[key] = (i, coords[key])
force_flat = kwargs.pop('force_flat', True)
return cls(data[:], dim_coords=coords, force_flat=force_flat,
time_units=times.units, time_cal=cal, coord_grids=grids,
cell_area=cell_area, irregular_grid=irregular_grid,
**kwargs)
@classmethod
def from_hdf5(cls, filename, var_name, data_dir='/',
cell_area_path=None, **kwargs):
logging.info('Loading data object from HDF5: \n'
'file = {}\n'
'var_name = {}'.format(filename, var_name))
cell_area = cls._load_cell_area(cell_area_path)
with tb.open_file(filename, 'r') as f:
data = f.get_node(data_dir, name=var_name)
try:
fill_val = data.attrs.fill_value
except AttributeError:
fill_val = None
lat = f.get_node(data_dir+'lat')
lon = f.get_node(data_dir+'lon')
lat_idx = lat.attrs.index
lon_idx = lon.attrs.index
if len(lat.shape) > 1:
irregular_grid = True
lat_grid = lat
lon_grid = lon
# TODO: Should I just fill these with dummy dimensions?
lat = lat[:, 0]
lon = lon[0]
grids = {BaseDataObject.LAT: lat_grid[:],
BaseDataObject.LON: lon_grid[:]}
else:
irregular_grid = False
grids = None
coords = {BaseDataObject.LAT: (lat_idx, lat[:]),
BaseDataObject.LON: (lon_idx, lon[:])}
times = f.get_node(data_dir + 'time')
time_idx = times.attrs.index
if hasattr(times.attrs, 'calendar'):
time_cal = times.attrs.calendar
else:
time_cal = None
try:
time_units = times.attrs.units
times_list = ncf.num2date(times[:], time_units)
coords[BaseDataObject.TIME] = (times.attrs.index,
ncf.num2date(times[:],
times.attrs.units))
except ValueError as e:
logger.error('Problem converting netCDF time units: ' + str(e))
[times_list,
time_units] = _handle_year_zero_units(times[:],
times.attrs.units,
calendar=time_cal)
coords[BaseDataObject.TIME] = (time_idx, times_list)
force_flat = kwargs.pop('force_flat', True)
return cls(data, dim_coords=coords, force_flat=force_flat,
coord_grids=grids, fill_value=fill_val,
time_units=time_units, time_cal=time_cal,
cell_area=cell_area, irregular_grid=irregular_grid,
**kwargs)
@classmethod
def from_pickle(cls, filename):
logging.info('Loading data object from pickle.\n'
'file = {}'.format(filename))
with open(filename, 'rb') as f:
dobj = cpk.load(f)
tmp_dimcoord = dobj._dim_coords[dobj.TIME]
tmp_time = tmp_dimcoord[1]
kwargs = {}
if dobj.time_cal is not None:
kwargs['calendar'] = dobj.time_cal
topckl_time = ncf.num2date(tmp_time, units=dobj.time_units,
**kwargs)
dobj._dim_coords[dobj.TIME] = (tmp_dimcoord[0], topckl_time)
return dobj
@classmethod
def from_posterior_ncf(cls, filename, var_name, **kwargs):
with ncf.Dataset(filename, 'r') as f:
data = f.variables[var_name][:]
coords = {BaseDataObject.LAT: f.variables['lat'][:],
BaseDataObject.LON: f.variables['lon'][:]}
times = (0, f.variables['time'][:])
coords['time'] = times
coords['lat'] = (1, coords['lat'])
coords['lon'] = (1, coords['lon'])
return cls(data, dim_coords=coords, **kwargs)
@classmethod
def from_posterior_npz(cls, filename, **kwargs):
with np.load(filename) as f:
data = f['values'][:]
lat = f['lat'][:, 0]
lon = f['lon'][0, :]
coords = {BaseDataObject.LAT: (1, lat),
BaseDataObject.LON: (1, lon),
BaseDataObject.TIME: (0, f['years'])}
force_flat = kwargs.pop('force_flat', True)
return cls(data, dim_coords=coords, force_flat=force_flat,
**kwargs)
class Hdf5DataObject(BaseDataObject):
def __init__(self, data, h5file, dim_coords=None, valid_data=None,
force_flat=False, fill_value=None, chunk_shape=None,
default_grp='/data', coord_grids=None, cell_area=None,
time_units=None, time_cal=None, irregular_grid=False):
"""
Construction of a Hdf5DataObject from input data. If nan or
infinite values are present, a compressed version of the data
is also stored.
Parameters
----------
data: ndarray
Input dataset to be used.
h5file: tables.File
HDF5 Pytables file to use as a data storage backend
dim_coords: dict(str:ndarray), optional
Coordinate vector dictionary for supplied data. Please use
DataObject attributes (e.g. DataObject.TIME) for dictionary
keys.
valid_data: ndarray (np.bool), optional
Array corresponding to valid data in the of the input dataset
or uncompressed version of the input dataset. Should have the same
number of dimensions as the data and each dimension should be
greater than or equal to the spatial dimensions of data.
force_flat: bool
Force spatial dimensions to be flattened (1D array)
Data has been detrended.
fill_value: float
Value to be considered invalid data during the mask and
compression. Only considered when data is not masked.
default_grp: tables.Group or str, optional
Group to store all created databins under in the hdf5 file.
Notes
-----
If NaN values are present I do not suggest
using the orig_data variable when reloading from a file. Currently
PyTables Carrays have no method of storing np.NaN so the values in those
locations will be random. Please only read the compressed data or make
sure you apply the mask on the data if you think self.orig_data is being
read from disk.
"""
if type(h5file) != tb.File:
logger.error('Invalid HDF5 file encountered: '
'type={}'.format(type(h5file)))
raise ValueError('Input HDF5 file must be opened using pytables.')
self.h5f = h5file
self._default_grp = None
self.set_databin_grp(default_grp)
if chunk_shape is None:
leading_time = BaseDataObject.TIME in dim_coords
self._chunk_shape = self._determine_chunk(leading_time,
data.shape,
data.dtype)
else:
self._chunk_shape = chunk_shape
logger.debug('Dask array chunk shape: {}'.format(self._chunk_shape))
data = da.from_array(data, chunks=self._chunk_shape)
super(Hdf5DataObject, self).__init__(data,
dim_coords=dim_coords,
valid_data=valid_data,
force_flat=force_flat,
fill_value=fill_value,
cell_area=cell_area,
irregular_grid=irregular_grid,
coord_grids=coord_grids,
time_cal=time_cal,
time_units=time_units)
self._eof_stats = None
def _set_curr_data_key(self, new_key):
if not hasattr(self.data, 'dask'):
chunk_shp = self._determine_chunk(self._leading_time,
self.data.shape,
self.data.dtype)
self._chunk_shape = chunk_shp
logger.debug('Current chunk shape: {}'.format(chunk_shp))
self.data = da.from_array(self.data, chunks=self._chunk_shape)
super(Hdf5DataObject, self)._set_curr_data_key(new_key)
# Create backend data container
def _new_empty_databin(self, shape, dtype, name):
logger.debug('Creating empty HDF5 databin:\n'
'shape: {}\n'
'dtype: {}\n'
'name: {}'.format(shape, dtype, name))
new = empty_hdf5_carray(self.h5f,
self._default_grp,
name,
tb.Atom.from_dtype(dtype),
shape
)
self._data_bins[name] = new
return new
def _new_databin(self, data, name):
logger.debug('Copying data to HDF5 databin: {}'.format(name))
new = self._new_empty_databin(data.shape, data.dtype, name)
da.store(data, new)
self._data_bins[name] = new
return new
@staticmethod
def _determine_chunk(leading_time, shape, dtype, size=32):
"""
Determine default chunk size for dask array operations.
Parameters
----------
shape: tuple<int>
Shape of the data to be chunked.
dtype: numpy.dtype
Datatype of the data to be chunked
size: int
Size (in MB) of the desired chunk
Returns
-------
tuple
Chunk shape for data and given size.
"""
if leading_time:
sptl_size = np.product(shape[1:]) * dtype.itemsize
rows_in_chunk = size*1024**2 // sptl_size
rows_in_chunk = int(rows_in_chunk)
rows_in_chunk = min((rows_in_chunk, shape[0]))
chunk = tuple([rows_in_chunk] + list(shape[1:]))
else:
nelem = | np.product(shape) | numpy.product |
# coding: utf-8
# # Test pyIAST for match with competitive Langmuir model
# In the case that the pure-component isotherms $N_{i,pure}(P)$ follow the Langmuir model with the same saturation loading $M$:
#
# $N_{i,pure} = M \frac{K_iP}{1+K_iP},$
#
# The mixed gas adsorption isotherm follows the competitive Langmuir isotherm:
#
# $N_i = M \frac{K_i p_i}{1 + \sum_j K_jp_j},$
#
# where $p_i$ is the partial pressure of component $i$. Here, we generate synthetic pure-component adsorption isotherm data and confirm that pyIAST agrees with the competitive Langmuir isotherm for 3 components.
# In[1]:
from __future__ import absolute_import
import numpy as np
import pyiast
import pandas as pd
import matplotlib.pyplot as plt
from six.moves import range
plt.style.use('fivethirtyeight')
colors = ['b', 'g', 'r'] # for representing each component
component_names = {0: 'A', 1: 'B', 2: 'C'}
# ## Generate synthetic pure-component isotherm data, fit Langmuir models to them.
# Model parameters ($M$, $\{K_i\}$)
# In[2]:
M = 1.0
langmuirKs = [2.0, 10.0, 20.0] # K_i
# Generate data according to Langmuir model, store in list of Pandas DataFrames
# In[3]:
pressure = np.logspace(-3, np.log10(10), 20)
dfs = [
pd.DataFrame({
'P':
pressure,
'L':
M * langmuirKs[i] * pressure / (1.0 + langmuirKs[i] * pressure)
}) for i in range(3)
]
# Use pyIAST to fit Lanmguir models to the data, then plot fits
# In[4]:
isotherms = [
pyiast.ModelIsotherm(
dfs[i], pressure_key='P', loading_key='L', model='Langmuir')
for i in range(3)
]
for i in range(len(isotherms)):
isotherms[i].print_params()
pyiast.plot_isotherm(isotherms[i])
# Plot synthetic data all in one plot for paper
# In[5]:
p_plot = np.logspace(-3, | np.log10(11) | numpy.log10 |
"""
ecam Apr18
"""
import numpy as np #Numerical methods
from scipy.integrate import ode #ODE solver
from param import param_dict,param_array
from data import read_data_file
from random import choice
from scipy.signal import argrelextrema
from scipy.stats import pearsonr
import scipy.optimize
T = read_data_file("time") #read time for time points
index_of_0 = np.where( T == 0.)[0][0]
GG = np.linspace(0.001,20,500)
def F(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
"""
Right hand side of ODE y'(t) = f(t,y,...)
It receives parameters as f_args, as given py param_array (see param.py)
G,M (...)
"""
G=y[0]
M=y[1]
if len(y) > 2:
Gp=y[2] # GEF perturbation (what's given in the data)
Gpvis=y[3] # GEF perturbation (what's given in the data)
else:
Gp = 0.
Gpvis = 0.
k0=k0m*Kr0 # kmi =ki/Kri or ki/Kmi
k2=k2m*Kr2
k5=k5m*Km5
k6=k6m*Km6
J=Kr0/Rt
K=Kr2/Rt
u=k0*G+k1+k0*Gpt*Gp
v=k2p*M+k2
Q=v-u+v*J+u*K
Z=Q**2-4*(v-u)*u*K
#if np.isnan(Z) or Z == np.inf:
# print k3,R,
# print k0,G,k1,k0,Gpt,Gp
# print v,u,J
A=Q+np.sqrt(Z)
Ep=(2*u*K)/A
R=Rt*Ep
try:
return np.array( [ k3*R*(Gt-G) - k4*M*G, k5*R*(Mt-M)**n/(Km5**n+(Mt-M)**n) - k6*M/(Km6+M) + k7*(Mt-M)/(Km7+(Mt-M)), k_Gp-k_Gp*Gp-k4*Gp*M, k_Gp-k_Gp*Gpvis] )
except ValueError:
return np.array([0.,0.,0.,0.])
def Fdict(dp,y):
return F(0,y,dp["k0m"],dp["k1"],dp["k2m"],dp["k2p"],dp["k3"],dp["k4"],dp["k5m"],dp["k6m"],dp["k7"],dp["Kr0"],dp["Kr1"],dp["Kr2"],dp["Kr2p"],dp["Km5"],dp["Km6"],dp["Km7"],dp["Gt"],dp["Rt"],dp["Mt"],dp["k_Gp"],dp["Gpt"],dp["n"])
def Fosc(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
"""
like F, but gives only G,M.
"""
return F(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n)[:2]
def Fosc_complete(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
"""
like F, but gives only G,M.
"""
return Fcomplete(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n)[:3]
def linearF_complete(p,y):
f = lambda z,p=p: Fosc_complete(0,z,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
f0 = lambda y,f=f: f(y)[0]
f1 = lambda y,f=f: f(y)[1]
f2 = lambda y,f=f: f(y)[2]
return np.array([scipy.optimize.approx_fprime(y,f0,1.e-8),scipy.optimize.approx_fprime(y,f1,1.e-8),scipy.optimize.approx_fprime(y,f2,1.e-8)])
def F01(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
return np.linalg.norm(F(0,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n)[:2])
def DF01(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
p = {}
p["k0m"] = k0m
p["k1"] = k1
p["k2m"] = k2m
p["k2p"] = k2p
p["k3"] = k3
p["k4"] = k4
p["k5m"] = k5m
p["k6m"] = k6m
p["k7"] = k7
p["Kr0"] = Kr0
p["Kr1"] = Kr1
p["Kr2"] = Kr2
p["Kr2p"] = Kr2p
p["Km5"] = Km5
p["Km6"] = Km6
p["Km7"] = Km7
p["Gt"] = Gt
p["Rt"] = Rt
p["Mt"] = Mt
p["k_Gp"] = k_Gp
p["Gpt"] = Gpt
p["n"] = n
df = linearF(p,y)
f = G(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n)
return 2*f[0] * df[0] + 2*f[1]*df[1]
def G(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
return F(0,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n)[:2]
def G_complete(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
return Fcomplete(0,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n)[:3]
def root(p,y0 = np.array([1.,1.])):
try:
return scipy.optimize.least_squares(G,y0,jac=DG,args = (p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]),bounds = ( (0.,0.),(np.inf,np.inf)))
except ValueError:
return np.array([0.,0])
def root_complete(p,y0 = np.array([1.,1.,1.])):
try:
return scipy.optimize.least_squares(G_complete,y0,jac=DG_complete,args = (p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]),bounds = ( (0.,0.,0.),(np.inf,np.inf,np.inf)))
except ValueError:
return np.array([0.,0.,0.])
def compute_rho(r,p):
"""
Takes a solution to the system (4 components) and computes RhoA
We also need to calculate value of RhoA from this formula
"""
k2=p["k2m"]*p["Kr2"]
k5=p["k5m"]*p["Km5"]
k6=p["k6m"]*p["Km6"]
Qr=k2-(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"]+p["k0m"]*p["Kr0"]*p["Gpt"]*r[:,2])+k2*p["Kr0"]/p["Rt"]+(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"]+p["k0m"]*p["Kr0"]*p["Gpt"]*r[:,2])*p["Kr2"]/p["Rt"]
Zr=Qr**2-4*(k2-(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"]+p["k0m"]*p["Kr0"]*p["Gpt"]*r[:,2]))*(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"]+p["k0m"]*p["Kr0"]*p["Gpt"]*r[:,2])*p["Kr2"]/p["Rt"]
Ar=Qr+np.sqrt(Zr)
RhoA=2*(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"]+p["k0m"]*p["Kr0"]*p["Gpt"]*r[:,2])*p["Kr2"]/Ar
RhoA = RhoA.reshape( (len(RhoA),1) )
return RhoA
def initial_condition(p,y0 = np.array([1.,1.])):
"""
Finds steady state of first two components of the solution
"""
return root(p,y0)["x"]
def initial_condition_complete(p,y0 = np.array([1.,1.,1.])):
"""
Finds steady state of first two components of the solution
"""
r = root_complete(p,y0)
if not isinstance(r,np.ndarray):
return root_complete(p,y0)["x"]
return r
#return root(p,y0)
def check(p):
yy = initial_condition(p)
return F01(yy,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
def f11(y,p):
k0=p["k0m"]*p["Kr0"]
k2=p["k2m"]*p["Kr2"]
k5=p["k5m"]*p["Km5"]
k6=p["k6m"]*p["Km6"]
Q=k2-(k0*y[0]+p["k1"])+k2*p["Kr0"]/p["Rt"]+(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Qg=-k0+k0*p["Kr2"]/p["Rt"]
Qm=0
Z=Q**2-4*(k2-(k0*y[0]+p["k1"]))*(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Zg=2*Q*Qg-4*(-k0*(k0*y[0]+p["k1"])+k0*(k2-(k0*y[0]+p["k1"])))*p["Kr2"]/p["Rt"]
Zm=0
A=Q+np.sqrt(Z)
Ag=Qg+Zg/(2*np.sqrt(Z))
Am=0
R=2*(k0*y[0]+p["k1"])*p["Kr2"]/A
Rg=2*(k0*A-Ag*(k0*y[0]+p["k1"]))*p["Kr2"]/A**2
Rm=0
return p["k3"]*(Rg*(p["Gt"]-y[0])-R)-p["k4"]*y[1] # corrected bracket here
def f12(y,p):
k0=p["k0m"]*p["Kr0"]
k2=p["k2m"]*p["Kr2"]
k5=p["k5m"]*p["Km5"]
k6=p["k6m"]*p["Km6"]
Q=k2-(k0*y[0]+p["k1"])+k2*p["Kr0"]/p["Rt"]+(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Qg=-k0+k0*p["Kr2"]/p["Rt"]
Qm=0
Z=Q**2-4*(k2-(k0*y[0]+p["k1"]))*(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Zg=2*Q*Qg-4*(-k0*(k0*y[0]+p["k1"])+k0*(k2-(k0*y[0]+p["k1"])))*p["Kr2"]/p["Rt"]
Zm=0
A=Q+np.sqrt(Z)
Ag=Qg+Zg/(2*np.sqrt(Z))
Am=0
R=2*(k0*y[0]+p["k1"])*p["Kr2"]/A
Rg=2*(k0*A-Ag*(k0*y[0]+p["k1"]))*p["Kr2"]/A**2
Rm=0
return -p["k4"]*y[0]
def f21(y,p):
k0=p["k0m"]*p["Kr0"]
k2=p["k2m"]*p["Kr2"]
k5=p["k5m"]*p["Km5"]
k6=p["k6m"]*p["Km6"]
Q=k2-(k0*y[0]+p["k1"])+k2*p["Kr0"]/p["Rt"]+(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Qg=-k0+k0*p["Kr2"]/p["Rt"]
Qm=0
Z=Q**2-4*(k2-(k0*y[0]+p["k1"]))*(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Zg=2*Q*Qg-4*(-k0*(k0*y[0]+p["k1"])+k0*(k2-(k0*y[0]+p["k1"])))*p["Kr2"]/p["Rt"]
Zm=0
A=Q+np.sqrt(Z)
Ag=Qg+Zg/(2*np.sqrt(Z))
Am=0
R=2*(k0*y[0]+p["k1"])*p["Kr2"]/A
Rg=2*(k0*A-Ag*(k0*y[0]+p["k1"]))*p["Kr2"]/A**2
Rm=0
return k5*Rg*(p["Mt"]-y[1])**p["n"]/(p["Km5"]**p["n"]+(p["Mt"]-y[1])**p["n"])
def f22(y,p):
k0=p["k0m"]*p["Kr0"]
k2=p["k2m"]*p["Kr2"]
k5=p["k5m"]*p["Km5"]
k6=p["k6m"]*p["Km6"]
Q=k2-(k0*y[0]+p["k1"])+k2*p["Kr0"]/p["Rt"]+(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Qg=-k0+k0*p["Kr2"]/p["Rt"]
Qm=0
Z=Q**2-4*(k2-(k0*y[0]+p["k1"]))*(k0*y[0]+p["k1"])*p["Kr2"]/p["Rt"]
Zg=2*Q*Qg-4*(-k0*(k0*y[0]+p["k1"])+k0*(k2-(k0*y[0]+p["k1"])))*p["Kr2"]/p["Rt"]
Zm=0
A=Q+np.sqrt(Z)
Ag=Qg+Zg/(2*np.sqrt(Z))
Am=0
R=2*(k0*y[0]+p["k1"])*p["Kr2"]/A
Rg=2*(k0*A-Ag*(k0*y[0]+p["k1"]))*p["Kr2"]/A**2
Rm=0
return -k5*R*p["n"]*((p["Mt"]-y[1])**(p["n"]-1))*p["Km5"]**p["n"]/(p["Km5"]**p["n"]+(p["Mt"]-y[1])**p["n"])**2 - k6*p["Km6"]/(p["Km6"]+y[1])**2 - p["k7"]*p["Km7"]/(p["Km7"]+p["Mt"]-y[1])**2
def linearF(p,y):
return np.array([[f11(y,p), f12(y,p)], [f21(y,p),f22(y,p)]])
def DG(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
p = {}
p["k0m"] = k0m
p["k1"] = k1
p["k2m"] = k2m
p["k2p"] = k2p
p["k3"] = k3
p["k4"] = k4
p["k5m"] = k5m
p["k6m"] = k6m
p["k7"] = k7
p["Kr0"] = Kr0
p["Kr1"] = Kr1
p["Kr2"] = Kr2
p["Kr2p"] = Kr2p
p["Km5"] = Km5
p["Km6"] = Km6
p["Km7"] = Km7
p["Gt"] = Gt
p["Rt"] = Rt
p["Mt"] = Mt
p["k_Gp"] = k_Gp
p["Gpt"] = Gpt
p["n"] = n
return linearF(p,y)
def DG_complete(y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
p = {}
p["k0m"] = k0m
p["k1"] = k1
p["k2m"] = k2m
p["k2p"] = k2p
p["k3"] = k3
p["k4"] = k4
p["k5m"] = k5m
p["k6m"] = k6m
p["k7"] = k7
p["Kr0"] = Kr0
p["Kr1"] = Kr1
p["Kr2"] = Kr2
p["Kr2p"] = Kr2p
p["Km5"] = Km5
p["Km6"] = Km6
p["Km7"] = Km7
p["Gt"] = Gt
p["Rt"] = Rt
p["Mt"] = Mt
p["k_Gp"] = k_Gp
p["Gpt"] = Gpt
p["n"] = n
return linearF_complete(p,y)
def eig(p,y):
M = linearF(p,y)
u = np.linalg.eigvals(M)
return u
def eig_complete(p,y):
M = linearF_complete(p,y)
u = np.linalg.eigvals(M)
return u
def eig_sign(p,y):
u = eig(p,y)
if np.real(u[0])<0 and np.real(u[1]) < 0:
return -1 #both negative
if np.real(u[0])>0 and np.real(u[1]) > 0:
return 1 #both positivie
return 0 #One of each
def Fc(y,Gt,p):
return G(y,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],Gt,p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
def Fc_complete(y,Gt,p):
return G_complete(y,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],Gt,p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
def DFc(y,Gt,p):
return DG(y,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],Gt,p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
def DFc_complete(y,Gt,p):
return DG_complete(y,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],Gt,p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
def FFc1(Gt,y,p):
return Fc(y,Gt[0],p)[0]
def FFc1_complete(Gt,y,p):
return Fc_complete(y,Gt[0],p)[0]
def FFc2(Gt,y,p):
return Fc(y,Gt[0],p)[1]
def FFc2_complete(Gt,y,p):
return Fc_complete(y,Gt[0],p)[1]
def FFc3_complete(Gt,y,p):
return Fc_complete(y,Gt[0],p)[2]
def D_GtFFc(Gt,y,p):
return np.array([scipy.optimize.approx_fprime([Gt],FFc1,1.e-8,y,p),scipy.optimize.approx_fprime([Gt],FFc2,1.e-8,y,p)])
def D_GtFFc_complete(Gt,y,p):
return np.array([scipy.optimize.approx_fprime([Gt],FFc1_complete,1.e-8,y,p),scipy.optimize.approx_fprime([Gt],FFc2_complete,1.e-8,y,p),scipy.optimize.approx_fprime([Gt],FFc3_complete,1.e-8,y,p)])
def nFc(y,Gt,p):
return np.linalg.norm(Fc(y,Gt,p))
def nFc_complete(y,Gt,p):
return np.linalg.norm(Fc_complete(y,Gt,p))
def rootFc(y0,Gt,p):
try:
return scipy.optimize.least_squares(Fc,y0,args = (Gt,p),jac=DFc,bounds = ( (0.,0.),(np.inf,np.inf) ) )["x"]
except ValueError:
return np.array([0,0])
def rootFc_complete(y0,Gt,p):
try:
return scipy.optimize.least_squares(Fc_complete,y0,args = (Gt,p),jac=DFc_complete,bounds = ( (0.,0.,0.),(np.inf,np.inf,np.inf) ) )["x"]
except ValueError:
return np.array([0,0,0])
def predictor(y,Gt,p,delta_Gt=0.06):
A = DG(y,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],Gt,p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
b = -D_GtFFc(Gt,y,p)*delta_Gt
return y+np.linalg.solve(A,b).ravel()
def corrector(y_pred,Gt,p,eps=1.e-8):
return rootFc(y_pred,Gt,p)
def predictor_complete(y,Gt,p,delta_Gt=0.06):
A = DG_complete(y,p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],Gt,p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"])
b = -D_GtFFc_complete(Gt,y,p)*delta_Gt
return y+np.linalg.solve(A,b).ravel()
def corrector_complete(y_pred,Gt,p,eps=1.e-8):
return rootFc_complete(y_pred,Gt,p)
def fixed_points(dp,GG,eps=1.e-16):
dp["Gt"] = GG[0]
y0 = initial_condition(dp)
Y = [y0]
g = GG[0]
for gg in GG[1:]:
y0 = predictor(y0,g,dp,gg-g)
y0 = corrector(y0,gg,dp,eps)
g = gg
Y.append(y0)
return Y
def fixed_points_complete(dp,GG,eps=1.e-16):
dp["Gt"] = GG[0]
y0 = initial_condition_complete(dp)
Y = [y0]
g = GG[0]
for gg in GG[1:]:
y0 = predictor_complete(y0,g,dp,gg-g)
y0 = corrector_complete(y0,gg,dp,eps)
g = gg
Y.append(y0)
return Y
def to_percent(r):
"""
Changes sol. to percentages wrt to initial value
Only for components 0,1, and 3. Comp. 2 is 0. at time 0.
"""
for i in [0,1,4]:
r[:,i] = 100.*r[:,i]/(r[0,i]+1.e-9) - 100.
r[:,3] = r[:,3] * 310.84426380000002
return r
def solver_tmax(p,dt=1.,Tmax=500.):
"""
Solve the pendulum ODE up to time Tmax
Returns array of values for ang. displacement at time intervals dt.
p is a corrected param dict (with k_Gp instead of k_Gp_rho, etc
"""
GG = np.arange(0.0142,p["Gt"]+0.1,0.1)
YY = fixed_points(p,GG)
p["Gt"] = p["Gt_osc"]
y0 = np.concatenate( [ YY[-1],[0.,0.] ] )
s = ode(F)
s.set_integrator("lsoda",nsteps=1.e6,max_step=0) #See Scipy doc for other options
s.set_initial_value(y0,T[index_of_0]) #Initial condition
s.set_f_params(p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]) #Parameters for the right hand side
r = [y0] #Return list. Initial value.
#While solve is OK and we are not at Tmax
while s.t < Tmax:
if not s.successful():
#raise BaseException("Solver not successful")
return np.zeros((401,5))
r.append(s.integrate(s.t+dt)) #Append first component (ang. disp) of result
r = np.array(r) #Return numpy array for convenience.
r = np.hstack( [ r, compute_rho(r,p) ])
return r
def solver_dict(p,dt=.1,Tmax=500.):
"""
Solve the pendulum ODE up to time Tmax
Returns array of values for ang. displacement at time intervals dt.
p is a corrected param dict (with k_Gp instead of k_Gp_rho, etc
"""
y0 = np.concatenate( [ initial_condition(p),[0.,0.] ] )
s = ode(F) #Instance of ODE integrator
s.set_integrator("lsoda",nsteps=1.e6,max_step=0) #See Scipy doc for other options
s.set_initial_value(y0,T[index_of_0]) #Initial condition
s.set_f_params(p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]) #Parameters for the right hand side
r = [y0] #Return list. Initial value.
#While solve is OK and we are not at Tmax
for t in T[index_of_0+1:]:
if not s.successful():
#raise BaseException("Solver not successful")
return np.zeros((401,5))
r.append(s.integrate(t)) #Append first component (ang. disp) of result
r = np.array(r) #Return numpy array for convenience.
r = np.hstack( [ r, compute_rho(r,p) ])
r = to_percent(r)
return r
def solver_dict_nonorm(p,dt=.1,Tmax=500.):
"""
Solve the pendulum ODE up to time Tmax
Returns array of values for ang. displacement at time intervals dt.
p is a corrected param dict (with k_Gp instead of k_Gp_rho, etc
"""
y0 = np.concatenate( [ initial_condition(p),[0.,0.] ] )
s = ode(F) #Instance of ODE integrator
s.set_integrator("lsoda",nsteps=1.e6,max_step=0) #See Scipy doc for other options
s.set_initial_value(y0,T[index_of_0]) #Initial condition
s.set_f_params(p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]) #Parameters for the right hand side
r = [y0] #Return list. Initial value.
#While solve is OK and we are not at Tmax
for t in T[index_of_0+1:]:
if not s.successful():
#raise BaseException("Solver not successful")
return np.zeros((401,5))
r.append(s.integrate(t)) #Append first component (ang. disp) of result
r = np.array(r) #Return numpy array for convenience.
#return r,compute_rho(r,p)
r = np.hstack( [ r, compute_rho(r,p) ])
#print "Initial value (GK):",r[0][ [0,-1,1] ],"(G,R,M)"
#G=r[:,1]
#r = to_percent(r)
return r
def compute_rho_osc(r,p):
# rho_oscillation
k2=p["k2m"]*p["Kr2"]
k5=p["k5m"]*p["Km5"]
k6=p["k6m"]*p["Km6"]
Qr=k2-(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"])+k2*p["Kr0"]/p["Rt"]+(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"])*p["Kr2"]/p["Rt"]
Zr=Qr**2-4*(k2-(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"]))*(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"])*p["Kr2"]/p["Rt"]
Ar=Qr+np.sqrt(Zr)
RhoA=2*(p["k0m"]*p["Kr0"]*r[:,0]+p["k1"])*p["Kr2"]/Ar
RhoA = RhoA.reshape( (len(RhoA),) )
return RhoA
def to_percent_gen(v,base):
return 100.*(v/(base+1.e-9) -1.)
def solver_osc2(p,dt=1.,Tmax=20000,step=1000):
GG = np.arange(0.01,p["Gt_osc"]+0.1,0.01)
#print "Computing initial condition"
YY = fixed_points(p,GG)
#print "Fixed points completed."
p["Gt"] = p["Gt_osc"]
y0 = YY[-1]
s = ode(Fosc) #Instance of ODE integrator
s.set_integrator("lsoda",nsteps=1.e6,max_step=0) #See Scipy doc for other options
s.set_initial_value(y0,T[index_of_0]) #Initial condition
s.set_f_params(p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]) #Parameters for the right hand side
stable = False
#print "Initial (fixed point): ",y0
for k in range(300):
s.integrate(s.t+dt)
if not s.successful():
return False,np.zeros(800),np.zeros(800),np.zeros(800)
while not stable and s.t < Tmax:
r = []
#Solve 500 steps.
for k in range(step):
r.append(s.integrate(s.t+dt))
if not s.successful():
return False,np.zeros(800),np.zeros(800),np.zeros(800)
r = np.array(r)
#Constant solution
if np.linalg.norm(r[:,1] - np.max(r[:,1])) < 1.e-8:
return False,np.zeros(800),np.zeros(800),np.zeros(800)
#Local max
lmax = argrelextrema(r[:,1],np.greater)[0]
lmin = argrelextrema(r[:,1],np.less)[0]
if len(lmax) == 0:
continue
dlmax = np.diff(lmax)
if max([ np.abs( r[i,1] - r[i-1,1] ) for i in lmax]) < 1.e-3 or np.max(dlmax)-np.min(dlmax) < 5:
#print max([ np.abs( r[i,1] - r[i-1,1] ) for i in lmax])
stable = True
if not stable:
return False,0,0,0
#We are stable.
dt = 1.
r = []
#L = int(800/dt)
#L = 2500
L = 800
while len(r) < L:
r.append(s.integrate(s.t+dt))
if not s.successful():
return False,np.zeros(800),np.zeros(800),np.zeros(800)
r = np.array(r[::int(1./dt)])
R = compute_rho_osc(r,p)
return stable,r[:,0],r[:,1],R
def solver_osc2_complete(p,dt=1.,Tmax=20000,step=1000):
GG = np.arange(0.01,p["Gt_osc"]+0.1,0.01)
#print "Computing initial condition"
YY = fixed_points_complete(p,GG)
#print "Fixed points completed."
p["Gt"] = p["Gt_osc"]
y0 = YY[-1]
s = ode(Fosc_complete) #Instance of ODE integrator
s.set_integrator("lsoda",nsteps=1.e6,max_step=0) #See Scipy doc for other options
s.set_initial_value(y0,T[index_of_0]) #Initial condition
s.set_f_params(p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]) #Parameters for the right hand side
stable = False
for k in range(300):
s.integrate(s.t+dt)
if not s.successful():
return False,np.zeros(800),np.zeros(800),np.zeros(800)
while not stable and s.t < Tmax:
r = []
#Solve 500 steps.
for k in range(step):
r.append(s.integrate(s.t+dt))
if not s.successful():
return False,np.zeros(800),np.zeros(800),np.zeros(800)
r = np.array(r)
#Constant solution
if np.linalg.norm(r[:,1] - np.max(r[:,1])) < 1.e-8:
return False,np.zeros(800),np.zeros(800),np.zeros(800)
#Local max
lmax = argrelextrema(r[:,1],np.greater)[0]
lmin = argrelextrema(r[:,1],np.less)[0]
if len(lmax) == 0:
continue
dlmax = np.diff(lmax)
if max([ np.abs( r[i,1] - r[i-1,1] ) for i in lmax]) < 1.e-3 or np.max(dlmax)-np.min(dlmax) < 5:
#print max([ np.abs( r[i,1] - r[i-1,1] ) for i in lmax])
stable = True
if not stable:
return False,0,0,0
#We are stable.
dt = 1.
r = []
#L = int(800/dt)
#L = 2500
L = 800
while len(r) < L:
r.append(s.integrate(s.t+dt))
if not s.successful():
return False,np.zeros(800),np.zeros(800),np.zeros(800)
r = np.array(r[::int(1./dt)])
#R = compute_rho_osc(r,p)
return stable,r[:,0],r[:,1],r[:,2]
def solver_osc(p,dt=1.,Tmax=1000.):
"""
Solve the pendulum ODE up to time Tmax
Returns array of values for ang. displacement at time intervals dt.
p is a corrected param dict (with k_Gp instead of k_Gp_rho, etc
"""
GG = np.arange(0.1,p["Gt"]+0.1,0.1) #Note: we will fall short of p["Gt"], so we will "close" to the steady state!
YY = fixed_points(p,GG)
p["Gt"] = p["Gt_osc"]
y0 = YY[-1]
s = ode(Fosc) #Instance of ODE integrator
s.set_integrator("lsoda",nsteps=1.e6,max_step=0) #See Scipy doc for other options
s.set_initial_value(y0,T[index_of_0]) #Initial condition
s.set_f_params(p["k0m"],p["k1"],p["k2m"],p["k2p"],p["k3"],p["k4"],p["k5m"],p["k6m"],p["k7"],p["Kr0"],p["Kr1"],p["Kr2"],p["Kr2p"],p["Km5"],p["Km6"],p["Km7"],p["Gt"],p["Rt"],p["Mt"],p["k_Gp"],p["Gpt"],p["n"]) #Parameters for the right hand side
r = []
y = [0,0,0]
prev_max = 0.
#Solve 3 steps
for k in range(3):
y[k] = 100.* (s.integrate(s.t+dt)[1]/(y0[1] + 1.e-9) - 1.)
if not s.successful():
return np.zeros(401),np.zeros(401),np.zeros(401)
rr = [y[0],y[1],y[2]]
#Solve until we find a max.
while y[1] < y[0] or y[1] < y[2]:
if s.t > 6000:
return np.zeros(401),np.zeros(401),np.zeros(401)
y[0] = y[1]
y[1] = y[2]
y[2] = 100. * (s.integrate(s.t+dt)[1]/(y0[1]+1.e-9) - 1.)
rr.append(y[2])
if not s.successful():
return np.zeros(401),np.zeros(401),np.zeros(401)
prev_max = y[1]
#One more step
y[0] = y[1]
y[1] = y[2]
y[2] = 100.*(s.integrate(s.t+dt)[1]/(y0[1] + 1.e-9) - 1.)
#Find next max
while y[1] < y[0] or y[1] < y[2]:
if s.t > 6000:
return np.zeros(401),np.zeros(401),np.zeros(401)
y[0] = y[1]
y[1] = y[2]
y[2] = 100.*(s.integrate(s.t+dt)[1]/(y0[1] + 1.e-9) - 1.)
rr.append(y[2])
if not s.successful():
return np.zeros(401),np.zeros(401),np.zeros(401)
new_max = y[1]
#Keep solving until the amplitude is stable
while np.abs(new_max-prev_max)>1.e-5:
if s.t > 6000:
return np.zeros(401),np.zeros(401),np.zeros(401)
y[0] = y[1]
y[1] = y[2]
y[2] = 100.*(s.integrate(s.t+dt)[1]/(y0[1] + 1.e-9) - 1.)
rr.append(y[2])
if not s.successful():
return np.zeros(401), | np.zeros(401) | numpy.zeros |
import os
import sys
import time
import pickle
import numpy as np
import torch
import logging
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from . import gpr
from .serie import Serie
from .dataset import DataSet
from .errors import mean_absolute_error, mean_absolute_percentage_error, symmetric_mean_absolute_percentage_error, mean_squared_error, root_mean_squared_error
logger = logging.getLogger('mogptk')
def LoadModel(filename):
"""
Load model from a given file that was previously saved with `model.save()`.
Args:
filename (str): File name to load from.
Examples:
>>> LoadModel('filename')
"""
filename += ".npy"
with open(filename, 'rb') as r:
return pickle.load(r)
class Exact:
"""
Exact inference for Gaussian process regression.
"""
def __init__(self, variance=1.0):
self.variance = variance
def build(self, kernel, x, y, mean=None, name=None):
return gpr.Exact(kernel, x, y, variance=self.variance, mean=mean, name=name)
class Snelson:
"""
Inference using Snelson and Ghahramani 2005 for Gaussian process regression.
"""
def __init__(self, inducing_points=10, variance=1.0, jitter=1e-6):
self.inducing_points = inducing_points
self.variance = variance
self.jitter = jitter
def build(self, kernel, x, y, mean=None, name=None):
return gpr.Snelson(kernel, x, y, self.inducing_points, variance=self.variance, jitter=self.jitter, mean=mean, name=name)
class OpperArchambeau:
"""
Inference using Opper and Archambeau 2009 for Gaussian process regression.
"""
def __init__(self, likelihood=gpr.GaussianLikelihood(variance=1.0), jitter=1e-6):
self.likelihood = likelihood
self.jitter = jitter
def build(self, kernel, x, y, mean=None, name=None):
return gpr.OpperArchambeau(kernel, x, y, likelihood=likelihood, jitter=self.jitter, mean=mean, name=name)
class Titsias:
"""
Inference using Titsias 2009 for Gaussian process regression.
"""
def __init__(self, inducing_points=10, variance=1.0, jitter=1e-6):
self.inducing_points = inducing_points
self.variance = variance
self.jitter = jitter
def build(self, kernel, x, y, mean=None, name=None):
return gpr.Titsias(kernel, x, y, self.inducing_points, variance=self.variance, jitter=self.jitter, mean=mean, name=name)
class Hensman:
"""
Inference using Hensman 2015 for Gaussian process regression.
"""
def __init__(self, inducing_points=None, likelihood=gpr.GaussianLikelihood(variance=1.0), jitter=1e-6):
self.inducing_points = inducing_points
self.variance = variance
self.jitter = jitter
def build(self, kernel, x, y, mean=None, name=None):
if self.inducing_points is None:
return gpr.Hensman(kernel, x, y, likelihood=self.likelihood, jitter=self.jitter, mean=mean, name=name)
return gpr.SparseHensman(kernel, x, y, self.inducing_points, likelihood=self.likelihood, jitter=self.jitter, mean=mean, name=name)
class Model:
def __init__(self, dataset, kernel, inference=Exact(), mean=None, name=None, rescale_x=False):
"""
Model is the base class for multi-output Gaussian process models.
Args:
dataset (mogptk.dataset.DataSet, mogptk.data.Data): `DataSet` with `Data` objects for all the channels. When a (list or dict of) `Data` object is passed, it will automatically be converted to a `DataSet`.
kernel (mogptk.gpr.kernel.Kernel): The kernel class.
model: Gaussian process model to use, such as `mogptk.model.Exact`.
mean (mogptk.gpr.mean.Mean): The mean class.
name (str): Name of the model.
rescale_x (bool): Rescale the X axis to [0,1000] to help training.
"""
if not isinstance(dataset, DataSet):
dataset = DataSet(dataset)
if dataset.get_output_dims() == 0:
raise ValueError("dataset must have at least one channel")
names = [name for name in dataset.get_names() if name is not None]
if len(set(names)) != len(names):
raise ValueError("all data channels must have unique names")
if rescale_x:
dataset.rescale_x()
else:
for channel in dataset:
for dim in range(channel.get_input_dims()):
xran = np.max(channel.X[dim].transformed) - np.min(channel.X[dim].transformed)
if xran < 1e-3:
logger.warning("Very small X range may give problems, it is suggested to scale up your X axis")
elif 1e4 < xran:
logger.warning("Very large X range may give problems, it is suggested to scale down your X axis")
self.name = name
self.dataset = dataset
self.kernel = kernel
X = [[x[channel.mask] for x in channel.X] for channel in self.dataset]
Y = [np.array(channel.Y[channel.mask]) for channel in self.dataset]
x, y = self._to_kernel_format(X, Y)
self.model = inference.build(kernel, x, y, mean, name)
if issubclass(type(kernel), gpr.MultiOutputKernel) and issubclass(type(model), Exact):
self.model.variance.assign(0.0, lower=0.0, trainable=False) # handled by MultiOutputKernel
################################################################
def print_parameters(self):
"""
Print the parameters of the model in a table.
Examples:
>>> model.print_parameters()
"""
self.model.print_parameters()
def get_parameters(self):
"""
Returns all parameters of the kernel.
Returns:
list: mogptk.gpr.parameter.Parameter
Examples:
>>> params = model.get_parameters()
"""
self.model.parameters()
def save(self, filename):
"""
Save the model to a given file that can then be loaded using `LoadModel()`.
Args:
filename (str): File name to save to, automatically appends '.npy'.
Examples:
>>> model.save('filename')
"""
filename += ".npy"
try:
os.remove(filename)
except OSError:
pass
with open(filename, 'wb') as w:
pickle.dump(self, w)
def log_marginal_likelihood(self):
"""
Returns the log marginal likelihood of the kernel and its data and parameters.
Returns:
float: The current log marginal likelihood.
Examples:
>>> model.log_marginal_likelihood()
"""
return self.model.log_marginal_likelihood().detach().cpu().item()
def loss(self):
"""
Returns the loss of the kernel and its data and parameters.
Returns:
float: The current loss.
Examples:
>>> model.loss()
"""
return self.model.loss().detach().cpu().item()
def error(self, method='MAE', use_all_data=False):
"""
Returns the error of the kernel prediction with the removed data points in the data set.
Args:
method (str): Error calculation method, such as MAE, MAPE, sMAPE, MSE, or RMSE.
Returns:
float: The current error.
Examples:
>>> model.error()
"""
if use_all_data:
X, Y_true = self.dataset.get_data()
else:
X, Y_true = self.dataset.get_test_data()
x, y_true = self._to_kernel_format(X, Y_true)
y_pred, _ = self.model.predict(x)
if method.lower() == 'mae':
return mean_absolute_error(y_true, y_pred)
elif method.lower() == 'mape':
return mean_absolute_percentage_error(y_true, y_pred)
elif method.lower() == 'smape':
return symmetric_mean_absolute_percentage_error(y_true, y_pred)
elif method.lower() == 'mse':
return mean_squared_error(y_true, y_pred)
elif method.lower() == 'rmse':
return root_mean_squared_error(y_true, y_pred)
else:
raise ValueError("valid error calculation methods are MAE, MAPE, and RMSE")
def train(
self,
method='Adam',
iters=500,
verbose=False,
error=None,
plot=False,
**kwargs):
"""
Trains the model by optimizing the (hyper)parameters of the kernel to approach the training data.
Args:
method (str): Optimizer to use such as LBFGS, Adam, Adagrad, or SGD.
iters (int): Number of iterations, or maximum in case of LBFGS optimizer.
verbose (bool): Print verbose output about the state of the optimizer.
error (str): Calculate prediction error for each iteration by the given method, such as MAE, MAPE, or RMSE.
plot (bool): Plot the negative log likelihood.
**kwargs (dict): Additional dictionary of parameters passed to the PyTorch optimizer.
Returns:
numpy.ndarray: Losses for all iterations.
numpy.ndarray: Errors for all iterations. Only if `error` is set, otherwise zero.
Examples:
>>> model.train()
>>> model.train(method='lbfgs', tolerance_grad=1e-10, tolerance_change=1e-12)
>>> model.train(method='adam', lr=0.5)
"""
error_use_all_data = False
if error is not None and all(not channel.has_test_data() for channel in self.dataset):
error_use_all_data = True
if method.lower() in ('l-bfgs', 'lbfgs', 'l-bfgs-b', 'lbfgsb'):
method = 'LBFGS'
elif method.lower() == 'adam':
method = 'Adam'
elif method.lower() == 'sgd':
method = 'SGD'
elif method.lower() == 'adagrad':
method = 'AdaGrad'
if verbose:
training_points = sum([len(channel.get_train_data()[1]) for channel in self.dataset])
parameters = sum([int(np.prod(param.shape)) for param in self.model.parameters()])
print('\nStarting optimization using', method)
print('‣ Model: {}'.format(self.name))
print('‣ Channels: {}'.format(len(self.dataset)))
if hasattr(self, 'Q'):
print('‣ Mixtures: {}'.format(self.Q))
print('‣ Training points: {}'.format(training_points))
print('‣ Parameters: {}'.format(parameters))
print('‣ Initial loss: {:.3g}'.format(self.loss()))
if error is not None:
print('‣ Initial error: {:.3g}'.format(self.error(error, error_use_all_data)))
losses = np.empty((iters+1,))
errors = | np.zeros((iters+1,)) | numpy.zeros |
#!/usr/bin/env python
u"""
read_cryosat_L2.py
Written by <NAME> (05/2021)
Reads CryoSat Level-2 data products from baselines A, B and C
Reads CryoSat Level-2 netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Data_1Hz: Time and Orbit Parameters
Corrections: Elevation Corrections and Flags
Data_20Hz: Geolocation and Elevation Measurements with Quality Parameters
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 05/2021: use raw binary string prefixes (rb) for regular expressions
Updated 02/2021: replaced numpy bool to prevent deprecation warning
Updated 06/2020: patch error in CryoSat-2 GDR pointer variables
using the 1Hz mapping variable ind_meas_1hz_20_ku to remap the index
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
output 20Hz data as masked arrays for all baselines
Updated 08/2019: generalize regular expression patterns in read_DSD function
Updated 10/2018: updated header read functions for python3
Updated 11/2016: added Abs_Orbit and Ascending_flag to Data_1Hz outputs
Abs_Orbit should be same as in read_cryosat_ground_tracks.py
Ascending_flag can use in surface regression fits (McMillan, 2014)
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L2 MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid,record_size,n_records):
#-- CryoSat-2 1 Hz data fields (Location Group)
#-- Time and Orbit Parameters plus Measurement Mode
Data_1Hz = {}
#-- Time: day part
Data_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
#-- Time: second part
Data_1Hz['Second'] = np.zeros((n_records),dtype=np.int32)
#-- Time: microsecond part
Data_1Hz['Micsec'] = np.zeros((n_records),dtype=np.int32)
#-- SIRAL mode
Data_1Hz['Siral_mode'] = np.zeros((n_records),dtype=np.uint64)
#-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lat_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lon_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Alt_1Hz: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Data_1Hz['Alt_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Mispointing: packed units (millidegrees, 1e-3 degrees)
Data_1Hz['Mispointing'] = np.zeros((n_records),dtype=np.int16)
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
Data_1Hz['N_valid'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 geophysical corrections (External Corrections Group)
Corrections = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['dryTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['wetTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Corrections['InvBar'] = np.zeros((n_records),dtype=np.int16)
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
Corrections['DAC'] = np.zeros((n_records),dtype=np.int16)
#-- Ionospheric Correction packed units (mm, 1e-3 m)
Corrections['Iono'] = np.zeros((n_records),dtype=np.int16)
#-- Sea State Bias Correction packed units (mm, 1e-3 m)
Corrections['SSB'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Corrections['ocTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Corrections['lpeTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Corrections['olTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Corrections['seTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Corrections['gpTideElv'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare1'] = np.zeros((n_records),dtype=np.int16)
#-- Surface Type: Packed in groups of three bits for each of the 20 records
Corrections['Surf_type'] = np.zeros((n_records),dtype=np.uint64)
#-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m)
Corrections['MSS_Geoid'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m)
Corrections['ODLE'] = np.zeros((n_records),dtype=np.int32)
#-- Ice Concentration packed units (%/100)
Corrections['Ice_conc'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Depth packed units (mm, 1e-3 m)
Corrections['Snow_depth'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Density packed units (kg/m^3)
Corrections['Snow_density'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare2'] = np.zeros((n_records),dtype=np.int16)
#-- Corrections Status Flag
Corrections['C_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Significant Wave Height (SWH) packed units (mm, 1e-3)
Corrections['SWH'] = np.zeros((n_records),dtype=np.int16)
#-- Wind Speed packed units (mm/s, 1e-3 m/s)
Corrections['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)
Corrections['Spare3'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare4'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare5'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare6'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 20 Hz data fields (Measurement Group)
#-- Derived from instrument measurement parameters
n_blocks = 20
Data_20Hz = {}
#-- Delta between the timestamps for 20Hz record and the 1Hz record
#-- D_time_mics packed units (microseconds)
Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['D_time_mics'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lat'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lon'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker: packed units (mm, 1e-3 m)
Data_20Hz['Elev'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height measurement count
Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_count'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_RMS'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker: packed units (1e-2 dB)
Data_20Hz['Sig0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Peakiness: packed units (1e-2)
Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
Data_20Hz['Peakiness'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Freeboard: packed units (mm, 1e-3 m)
#-- -9999 default value indicates computation has not been performed
Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Freeboard'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Number of averaged echoes or beams
Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['N_avg'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality flags
Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
Data_20Hz['Quality_flag'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare2'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare3'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare4'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare4'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare5'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare5'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Location Group for record r
Data_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Second'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Siral_mode'][r] = np.fromfile(fid,dtype='>u8',count=1)
Data_1Hz['Lat_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Lon_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Alt_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Mispointing'][r] = np.fromfile(fid,dtype='>i2',count=1)
Data_1Hz['N_valid'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 External Corrections Group for record r
Corrections['dryTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['wetTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['InvBar'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['DAC'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Iono'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['SSB'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['ocTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['lpeTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['olTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['seTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['gpTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare1'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Surf_type'][r] = np.fromfile(fid,dtype='>u8',count=1)
Corrections['MSS_Geoid'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['Ice_conc'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_depth'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_density'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare2'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['C_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Corrections['SWH'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)
Corrections['Spare3'][r] = | np.fromfile(fid,dtype='>i2',count=1) | numpy.fromfile |
# this program is going to estimate parameters from simulated datasets that originate from an OU process
# with noise added. The parameters of the simulation and the length of the simulation are set through
# arguments
import langevin
import pandas as pd
import numpy as np
import argparse
import pymc3 as pm
import theano.tensor as tt
from scipy.stats import pearsonr
from scipy.optimize import root
class Ornstein_Uhlenbeck(pm.Continuous):
"""
Ornstein-Uhlenbeck Process
Parameters
----------
B : tensor
B > 0, B = exp(-(D/A)*delta_t)
A : tensor
A > 0, amplitude of fluctuation <x**2>=A
delta_t: scalar
delta_t > 0, time step
"""
def __init__(self, A=None, B=None,
*args, **kwargs):
super(Ornstein_Uhlenbeck, self).__init__(*args, **kwargs)
self.A = A
self.B = B
self.mean = 0.
def logp(self, x):
A = self.A
B = self.B
x_im1 = x[:-1]
x_i = x[1:]
ou_like = pm.Normal.dist(mu=x_im1*B, tau=1.0/A/(1-B**2)).logp(x_i)
return pm.Normal.dist(mu=0.0,tau=1.0/A).logp(x[0]) + tt.sum(ou_like)
# function to calculate A and B from the dataset
def OUanalytic2(data):
N = data.size
data1sq = data[0]**2
dataNsq = data[-1]**2
datasq = np.sum(data[1:-1]**2)
datacorr = np.sum(data[0:-1]*data[1:])
coef = [(N-1)*datasq,
(2.0-N)*datacorr,
-data1sq-(N+1)*datasq-dataNsq,
N*datacorr]
B=np.roots(coef)[-1]
Q=(data1sq+dataNsq)/(1-B**2)
Q=Q+datasq*(1+B**2)/(1-B**2)
Q=Q-datacorr*2*B/(1-B**2)
A = Q/N
P2A = -N/A**2/2
Btmp = B**2*(1+2*N)
tmp = (1+Btmp)*(data1sq+dataNsq) + (2*Btmp + N + 1 -B**4*(N-1))*datasq - 2*B*(1+B**2+2*N)*datacorr
P2B = -tmp/((1-B**2)**2*(data1sq+dataNsq + (1+B**2)*datasq - 2*B*datacorr))
PAB = (N-1)*B/A/(1-B**2)
dA = np.sqrt(-P2B/(P2A*P2B-PAB**2))
dB = np.sqrt(-P2A/(P2A*P2B-PAB**2))
return A,dA,B,dB
def OUresult2(data,deltat):
A, dA, B ,dB = OUanalytic2(data)
tau = -deltat/np.log(B)
dtau = deltat*dB/B/np.log(B)**2
return A,dA,tau,dtau
def OUcross(data1,data2,deltat):
x1 = data1 + data2
x2 = data1 - data2
x1_A,x1_dA, x1_tau ,x1_dtau= OUresult2(x1,deltat)
x2_A, x2_dA, x2_tau ,x2_dtau= OUresult2(x2,deltat)
return (x1_A - x2_A)/x2_A, np.sqrt(x1_dA**2 + x1_A**2*x2_dA**2/x2_A**4)
def calc_fundstats(x):
return x[0]**2+x[-1]**2,np.sum(x[1:-1]**2),np.sum(x[0:-1]*x[1:])
def b(D,A,delta_t):
return np.exp(-D/A*delta_t)
def q(aep,ass,ac,b):
return (aep + (1+b**2)*ass - 2*b*ac)/(1-b**2)
def dqdB(aep,ass,ac,b):
return 2*(b*aep+2*b*ass-(1+b**2)*ac)/(1-b**2)**2
def d2qdB2(aep,ass,ac,b):
return (6*b+2)/(1-b**2)**3*(aep+2*ass)-(4*b**3+12*b)/(1-b**2)**3*ac
def dBdA(b,D,A,delta_t):
return b*D*delta_t/A**2
def dBdD(b,A,delta_t):
return -b*delta_t/A
def d2BdA2(b,D,A,delta_t):
return b*D*delta_t/A**3*(D*delta_t/A-2)
def d2BdD2(b,A,delta_t):
return b*delta_t**2/A**2
def d2BdAdD(b,D,A,delta_t):
return b*delta_t/A**2*(1-D*delta_t/A)
def d2qdD2(aep,ass,ac,b,A,delta_t):
return d2qdB2(aep,ass,ac,b)*dBdD(b,A,delta_t)**2+dqdB(aep,ass,ac,b)*d2BdD2(b,A,delta_t)
def d2qdA2(aep,ass,ac,b,D,A,delta_t):
return d2qdB2(aep,ass,ac,b)*dBdA(b,D,A,delta_t)**2+dqdB(aep,ass,ac,b)*d2BdA2(b,D,A,delta_t)
def d2qdAdD(aep,ass,ac,b,D,A,delta_t):
return d2qdB2(aep,ass,ac,b)*dBdA(b,D,A,delta_t)*dBdD(b,A,delta_t)+dqdB(aep,ass,ac,b)*d2BdAdD(b,D,A,delta_t)
def d2PdA2(N,aep,ass,ac,b,D,A,delta_t):
return (N/2/A**2 -
q(aep,ass,ac,b)/A**3 +
(N-1)/(1-b**2)*(b*d2BdA2(b,D,A,delta_t) + dBdA(b,D,A,delta_t)**2*(1+b**2)/(1-b**2)) -
d2qdA2(aep,ass,ac,b,D,A,delta_t)/2/A +
1/A*dqdB(aep,ass,ac,b)*dBdA(b,D,A,delta_t))
def d2PdAdD(N,aep,ass,ac,b,D,A,delta_t):
return (dqdB(aep,ass,ac,b)*dBdD(b,A,delta_t)/2/A**2 -
d2qdAdD(aep,ass,ac,b,D,A,delta_t)/2/A +
(N-1)/(1-b**2)*(b*d2BdAdD(b,D,A,delta_t) + dBdA(b,D,A,delta_t)*dBdD(b,A,delta_t)*(1+b**2)/(1-b**2)))
def d2PdD2(N,a1ep,a1ss,a1c,a2ep,a2ss,a2c,b1,b2,D,A1,A2,delta_t):
return ((N-1)/(1-b1**2)*(b1*d2BdD2(b1,A1,delta_t) + dBdD(b1,A1,delta_t)**2*(1+b1**2)/(1-b1**2))+
(N-1)/(1-b2**2)*(b2*d2BdD2(b2,A2,delta_t) + dBdD(b2,A2,delta_t)**2*(1+b2**2)/(1-b2**2))-
d2qdD2(a1ep,a1ss,a1c,b1,A1,delta_t)/2/A1 -
d2qdD2(a2ep,a2ss,a2c,b2,A2,delta_t)/2/A2)
def phi_deriv(x,a1ep,a1ss,a1c,a2ep,a2ss,a2c,delta_t,N):
# x[0] = A1, x[1] = A2, x[2]=D
A1 = x[0]
A2 = x[1]
D = x[2]
b1 = b(D,A1,delta_t)
b2 = b(D,A2,delta_t)
Q1 = q(a1ep,a1ss,a1c,b1)
Q2 = q(a2ep,a2ss,a2c,b2)
dQ1 = dqdB(a1ep,a1ss,a1c,b1)
dQ2 = dqdB(a2ep,a2ss,a2c,b2)
y1 = -N*A1**2/2 + A1*Q1/2 + b1*D*delta_t*(A1*b1*(N-1)/(1-b1**2)-dQ1/2)
y2 = -N*A2**2/2 + A2*Q2/2 + b2*D*delta_t*(A2*b2*(N-1)/(1-b2**2)-dQ2/2)
y3 = (b1*(N-1)/(1-b1**2)-dQ1/A1/2)*b1/A1 + (b2*(N-1)/(1-b2**2)-dQ2/A2/2)*b2/A2
return np.array([y1,y2,y3])
def correlated_ts(c,delta_t = 0.1,N=1000):
# parameters for coupled oscillator
K,D = 1.0,1.0
data1 = langevin.time_series(A=1/K, D=D, delta_t=delta_t, N=N)
data2 = langevin.time_series(A=1/(K+np.abs(c)), D=D, delta_t=delta_t, N=N)
x1 = (data1 + data2)/2
if c>0:
x2 = (data1 - data2)/2
else:
x2 = (data2-data1)/2
return x1,x2
#parameters
a_bound=5
M=500
N=1000
rho_list = [0.5]
results = None
for rho in rho_list:
for i in range(M):
print("rho: ",rho,"iteration: ",i)
delta_t = 0.3
coupling = 2*np.abs(rho)/(1- | np.abs(rho) | numpy.abs |
# -*- coding: utf-8 -*-
"""
@author: VHOEYS
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.transforms import offset_copy
from .sensitivity_base import *
from .extrafunctions import *
from .plot_functions_rev import plotbar, scatterwithtext
# from .latextablegenerator import *
class MorrisScreening(SensitivityAnalysis):
'''
Morris screening method, with the improved sampling strategy,
selecting a subset of the trajectories to improve the sampled space.
Working with groups is possible.
Parameters
-----------
parsin : list
either a list of (min,max,'name') values,
[(min,max,'name'),(min,max,'name'),...(min,max,'name')]
or a list of ModPar instances
ModelType : pyFUSE | PCRaster | external
Give the type of model working withµ
Attributes
------------
_ndim : int
number of factors examined. In case the groups are chosen the number of factors is stores in NumFact and sizea becomes the number of created groups, (k)
NumFact : int
number of factors examined in the case when groups are chosen
intervals(p) : int
number of intervals considered in (0, 1)
UB : ndarray
Upper Bound for each factor in list or array, (sizea,1)
LB : ndarray
Lower Bound for each factor in list or array, (sizea,1)
GroupNumber : int
Number of groups (eventually 0)
GroupMat : ndarray
Array which describes the chosen groups. Each column represents
a group and its elements are set to 1 in correspondence of the
factors that belong to the fixed group. All the other elements
are zero, (NumFact,GroupNumber)
Delta : float
jump value to calculate screening
intervals : int
number of intervals used in the sampling
noptimized : int
r-value of the number of base runs are done in the optimize sampling
OutMatrix : ndarray
not-optimized sample matrix
OutFact : ndarray
not-optimzed matrix of changing factors
Groupnumber : int
number of groups used
sizeb : int
when using groups, sizeb is determined by the number of groups,
otherwise the number of factors
OptMatrix_b : ndarray
the not-adapted version of the OptMatrix, with all sampled values
between, 0 and 1
parset2run : ndarrar
every row is a parameter set to run the model for. All sensitivity
methods have this attribute to interact with base-class running
Notes
---------
Original Matlab code from:
http://sensitivity-analysis.jrc.it/software/index.htm
Original method described in [M1]_, improved by the optimization of [M2]_.
The option to work with groups is added, as described in [M2]_.
Examples
------------
>>> Xi = [(0.0,5.0,r'$X_1$'),(4.0,7.0,r'$X_2$'),(0.0,1.0,r'$X_3$'),
(0.0,1.0,r'$X_4$'), (0.0,1.0,r'$X_5$'),(0.5,0.9,r'$X_6$')]
>>> # Set up the morris class instance with uncertain factors Xi
>>> sm = MorrisScreening(Xi,ModelType = 'external')
>>> # calculate an optimized set of parameter sets to run model
>>> OptMatrix, OptOutVec = sm.Optimized_Groups(nbaseruns=100,
intervals = 4, noptimized=4,
Delta = 0.4)
>>> # Check the quality of the selected trajects
>>> sm.Optimized_diagnostic(width=0.15)
>>> #RUN A MODEL AND GET OUTPUT (EXTERNAL) -> get output
>>> #Calculate the Morris screening diagnostics
>>> sm.Morris_Measure_Groups(output)
>>> #plot a barplot of mu, mustar and sigma (edgecolor and facecolor grey)
>>> sm.plotmu(ec='grey',fc='grey')
>>> sm.plotmustar(outputid = 1,ec='grey',fc='grey')
>>> sm.plotsigma(ec='grey',fc='grey')
>>> #plot the mu* sigma plain
>>> sm.plotmustarsigma(zoomperc = 0.05, outputid = 1, loc = 2)
>>> #export the results in txt file
>>> sm.txtresults(name='MorrisTestOut.txt')
>>> #export the results in tex-table
>>> sm.latexresults(name='MorrisTestOut.tex')
References
------------
.. [M1] Morris, <NAME>. Factorial Sampling Plans for Preliminary Computational
Experiments. Technometrics 33, no. 2 (1991): 161–174.
.. [M2] Campolongo, Francesca, <NAME>, and <NAME>.
An Effective Screening Design for Sensitivity Analysis of Large Models.
Environmental Modelling & Software 22, no. 10 (October 2007): 1509–1518.
http://linkinghub.elsevier.com/retrieve/pii/S1364815206002805.
.. [M3] Saltelli, Andrea, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>.
Global Sensitivity Analysis, The Primer. John Wiley & Sons Ltd, 2008.
'''
def __init__(self, parsin, ModelType = 'external'):
SensitivityAnalysis.__init__(self, parsin)
self._methodname = 'MorrisScreening'
if ModelType == 'pyFUSE':
self.modeltype = 'pyFUSE'
print('The analysed model is built up by the pyFUSE environment')
elif ModelType == 'external':
self.modeltype = 'pyFUSE'
print('The analysed model is externally run')
elif ModelType == 'PCRaster':
self.modeltype = 'PCRasterPython'
print('The analysed model is a PCRasterPython Framework instance')
elif ModelType == 'testmodel':
self.modeltype = 'testmodel'
print('The analysed model is a testmodel')
else:
raise Exception('Not supported model type')
self.LB = np.array([el[0] for el in self._parsin])
self.UB = np.array([el[1] for el in self._parsin])
def Sampling_Function_2(self, nbaseruns, LB, UB):
'''
Python version of the Morris sampling function
Parameters
-----------
nbaseruns : int
sample size
Returns
---------
OutMatrix(sizeb*r, sizea) :
for the entire sample size computed In(i,j) matrices, values to
run model for
OutFact(sizea*r,1) :
for the entire sample size computed Fact(i,1) vectors, indicates
the factor changing at specific line
Notes
-------
B0 is constructed as in Morris design when groups are not considered.
When groups are considered the routine follows the following steps
1. Creation of P0 and DD0 matrices defined in Morris for the groups.
This means that the dimensions of these 2 matrices are
(GroupNumber,GroupNumber).
2. Creation of AuxMat matrix with (GroupNumber+1,GroupNumber)
elements.
3. Definition of GroupB0 starting from AuxMat, GroupMat
and P0.
4. The final B0 for groups is obtained as [ones(sizeb,1)*x0' + GroupB0].
The P0 permutation is present in GroupB0 and it's not necessary to
permute the matrix (ones(sizeb,1)*x0') because it's already randomly
created.
Adapted from the matlab version of 15 November 2005 by J.Cariboni
References
-------------
.. [M4] <NAME>, <NAME>, <NAME>, Sensitivity Analysis
on page 68 ss
.. [M5] <NAME>, <NAME>, JRC - IPSC Ispra, Varese, IT
'''
#The integration in class version not optimal, therefor this mapping
k = self._ndim
self.nbaseruns = nbaseruns
r = nbaseruns
p = self.intervals
GroupMat = self.GroupMat
# Parameters and initialisation of the output matrix
sizea = k
Delta = self.Delta
NumFact = sizea
if GroupMat.shape[0]==GroupMat.size:
Groupnumber=0
else:
Groupnumber = GroupMat.shape[1] #size(GroupMat,2)
sizea = GroupMat.shape[1]
sizeb = sizea + 1
# sizec = 1
Outmatrix = np.zeros(((sizea+1)*r,NumFact))
OutFact = np.zeros(((sizea+1)*r,1))
# For each i generate a trajectory
for i in range(r):
Fact=np.zeros(sizea+1)
# Construct DD0
DD0 = np.matrix(np.diagflat(np.sign(np.random.random(k)*2-1)))
# Construct B (lower triangular)
B = np.matrix(np.tri((sizeb), sizea,k=-1, dtype=int))
# Construct A0, A
A0 = np.ones((sizeb,1))
A = np.ones((sizeb,NumFact))
# Construct the permutation matrix P0. In each column of P0 one randomly chosen element equals 1
# while all the others equal zero.
# P0 tells the order in which order factors are changed in each
# Note that P0 is then used reading it by rows.
I = np.matrix(np.eye(sizea))
P0 = I[:,np.random.permutation(sizea)]
# When groups are present the random permutation is done only on B. The effect is the same since
# the added part (A0*x0') is completely random.
if Groupnumber != 0:
B = B * (np.matrix(GroupMat)*P0.transpose()).transpose()
# Compute AuxMat both for single factors and groups analysis. For Single factors analysis
# AuxMat is added to (A0*X0) and then permutated through P0. When groups are active AuxMat is
# used to build GroupB0. AuxMat is created considering DD0. If the element on DD0 diagonal
# is 1 then AuxMat will start with zero and add Delta. If the element on DD0 diagonal is -1
# then DD0 will start Delta and goes to zero.
AuxMat = Delta* 0.5 *((2*B - A) * DD0 + A)
#----------------------------------------------------------------------
# a --> Define the random vector x0 for the factors. Note that x0 takes value in the hypercube
# [0,...,1-Delta]*[0,...,1-Delta]*[0,...,1-Delta]*[0,...,1-Delta]
xset=np.arange(0.0,1.0-Delta,1.0/(p-1))
try:
x0 = np.matrix(xset.take(list(np.ceil(np.random.random(k)*np.floor(p/2))-1))) #.transpose()
except:
raise Exception('invalid p (intervals) and Delta combination, please adapt')
#----------------------------------------------------------------------
# b --> Compute the matrix B*, here indicated as B0. Each row in B0 is a
# trajectory for Morris Calculations. The dimension of B0 is (Numfactors+1,Numfactors)
if Groupnumber != 0:
B0 = (A0*x0 + AuxMat)
else:
B0 = (A0*x0 + AuxMat)*P0
#----------------------------------------------------------------------
# c --> Compute values in the original intervals
# B0 has values x(i,j) in [0, 1/(p -1), 2/(p -1), ... , 1].
# To obtain values in the original intervals [LB, UB] we compute
# LB(j) + x(i,j)*(UB(j)-LB(j))
In=np.tile(LB, (sizeb,1)) + np.array(B0)*np.tile((UB-LB), (sizeb,1)) #array!! ????
# Create the Factor vector. Each component of this vector indicate which factor or group of factor
# has been changed in each step of the trajectory.
for j in range(sizea):
Fact[j] = np.where(P0[j,:])[1]
Fact[sizea] = int(-1) #Enkel om vorm logisch te houden. of Fact kleiner maken
#append the create traject to the others
Outmatrix[i*(sizea+1):i*(sizea+1)+(sizea+1),:]=np.array(In)
OutFact[i*(sizea+1):i*(sizea+1)+(sizea+1)]=np.array(Fact).reshape((sizea+1,1))
return Outmatrix, OutFact
def Optimized_Groups(self, nbaseruns=500, intervals = 4, noptimized=10,
GroupMat=np.array([]), Delta = 'default'):
'''
Optimization in the choice of trajectories for the Morris experiment.
Starting from an initial set of nbaseruns, a set of noptimized runs
is selected to use for the screening techique
Groups can be used to evaluate parameters together
Parameters
------------
nbaseruns : int (default 500)
Total number of trajectories
intervals : int (default 4)
Number of levels
noptimized : int (default 10)
Final number of optimal trajectories
GroupMat : [NumFact,NumGroups]
Matrix describing the groups. Each column represents a group and
its elements are set to 1 in correspondence of the factors that
belong to the fixed group. All the other elements are zero.
Delta : 'default'|float (0-1)
When default, the value is calculated from the p value (intervals),
otherwise the given number is taken
Returns
--------
OptMatrix/ self.OptOutMatrix : ndarray
Optimized sampled values giving the matrix too run the model for
OptOutVec/ self.OptOutFact : ndarray
Optimized sampled values giving the matrix indicating the factor
changed at a specific line
Notes
-----
The combination of Delta and intervals is important to get an
good overview. The user is directed to [M3]_
'''
#number of trajectorie (r)
N = nbaseruns
#check the p and Delta value workaround
if not intervals%2==0:
print('It is adviced to use an even number for the p-value, number \
of intervals, since currently not all levels are explored')
if Delta == 'default':
self.Delta = intervals/(2.*(intervals-1.))
else:
if Delta > 0.0 and Delta < 1.0:
self.Delta = Delta
else:
raise Exception('Invalid Delta value, please use default or float number')
self.intervals = intervals
# p = intervals
self.noptimized = noptimized
r = noptimized
self.GroupMat = GroupMat
NumFact = self._ndim
LBt = np.zeros(NumFact)
UBt = np.ones(NumFact)
OutMatrix, OutFact = self.Sampling_Function_2(nbaseruns, LBt, UBt) #Version with Groups
#again mapping (not optimal)
self.OutMatrix = OutMatrix
self.OutFact = OutFact
try:
Groupnumber = GroupMat.shape[1]
except:
Groupnumber = 0
self.Groupnumber = Groupnumber
if Groupnumber != 0:
sizeb = Groupnumber +1
else:
sizeb = NumFact +1
self.sizeb = sizeb
Dist = np.zeros((N,N))
Diff_Traj = | np.arange(0.0,N,1.0) | numpy.arange |
import argparse
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import cv2
import numpy as np
import os.path as osp
import time
from tqdm import tqdm
def parseInput():
choice_helper = {
1: "[1] (Default) for fast mode where ther image is converted into its FFT form and displayed",
2: "[2] for denoising where the image is denoised by applying an FFT, truncating high frequencies and then displyed",
3: "[3] for compressing and saving the image",
4: "[4] for plotting the runtime graphs for the report"
}
denoise_helper = {
1: "[1] (Default) remove low frequency",
2: "[2] remove high frequency",
3: "[3] threshold everything",
4: "[4] threshold low frequency",
5: "[5] threshold high frequency"
}
parser = argparse.ArgumentParser("fft.py")
parser.add_argument("-m", type=int, default=1, choices=[1, 2, 3, 4], help='; '.join(choice_helper.values()))
parser.add_argument("image", type=str, default="moonlanding.png", metavar="image.png", nargs="?",
help="(optional) filename of the image we wish to take the DFT of.")
parser.add_argument("--denoise_ratio", type=float, default=0.1, help="(optional) denoising ratio")
parser.add_argument("--compress_ratio", type=float, default=0.9, help="(optional) compressing ratio")
parser.add_argument("--denoise_type", type=int, default=2, choices=[1, 2, 3, 4, 5],
help='; '.join(denoise_helper.values()))
parser.add_argument("--denoise_cap", type=float, default=0.5, help="(optiona) thresholding for denoise")
parser.add_argument("--compress_type", type=int, default=1, choices=[1, 2])
parser.add_argument("--compress_cap", type=float, default=0.1, help="(optiona) thresholding for compress")
parser.add_argument("--debug", action="store_false", help="run under debug mode")
return parser.parse_args()
class FFTransformer:
def __init__(self, config):
self.mode = config.m
self.image_name = config.image
self.denosing_percentile = config.denoise_ratio
self.compressing_percentile = config.compress_ratio
self.debug = config.debug
self.denosing_type = config.denoise_type
self.denoise_cap = config.denoise_cap
self.compress_type = config.compress_type
self.compress_cap = config.compress_cap
if not osp.exists(self.image_name):
raise RuntimeError(
"INVALID image input: {}. Please type python fft.py -h for help.".format(self.image_name))
def to_string(self):
return "Mode: {}, Image: {}".format(self.mode, self.image_name)
def start(self):
original_image = self.validify_img(cv2.imread(self.image_name, cv2.IMREAD_GRAYSCALE))
# display original image
plt.title("original")
plt.imshow(original_image)
plt.savefig("assignment2/pics/original_" + self.image_name, bbox_inches='tight')
plt.close()
if self.mode == 1:
# mode 1
fft_image = self.dft_fast2d(original_image)
plt.title('self.fft')
plt.imshow(np.abs(fft_image), norm=LogNorm())
plt.savefig("assignment2/pics/self_fft_" + self.image_name, bbox_inches='tight')
plt.close()
fft_image = np.fft.fft2(original_image)
plt.title('np.fft')
plt.imshow(np.abs(fft_image), norm=LogNorm())
plt.savefig("assignment2/pics/np_fft_" + self.image_name, bbox_inches='tight')
elif self.mode == 2:
# mode 2
self.denoise(original_image, percentile=self.denosing_percentile, type=self.denosing_type,
cap=self.denoise_cap)
elif self.mode == 3:
# mode 3
self.compress(original_image, percentile=self.compressing_percentile)
else:
# mode 4
e = 10 if self.debug else 14
trial = 10 if self.debug else 10
naive_time = list()
fast_time = list()
for p in range(5, e):
size = 2 ** p
print("##########################")
print("Test data size: ({}, {})".format(size, size))
print("##########################")
temp_n_time = list()
temp_f_time = list()
for t in range(trial):
print("Trial: {}".format(t))
test_data = np.random.rand(size, size).astype(np.float32)
naive_start = time.time()
self.dft_naive2d(test_data)
n_time = time.time() - naive_start
print("Naive time: {}".format(n_time))
temp_n_time.append(n_time)
fast_start = time.time()
self.dft_fast2d(test_data)
f_time = time.time() - fast_start
print("Fast time: {}".format(f_time))
temp_f_time.append(f_time)
naive_time.append(temp_n_time)
fast_time.append(temp_f_time)
naive_time = np.array(naive_time)
fast_time = np.array(fast_time)
naive_mean = naive_time.mean(axis=1)
naive_std = naive_time.std(axis=1)*2
fast_mean = fast_time.mean(axis=1)
fast_std = fast_time.std(axis=1)*2
power = np.arange(5, e)
plt.errorbar(power, naive_mean, yerr=naive_std, label="naive")
plt.errorbar(power, fast_mean, yerr=fast_std, label="fast")
plt.xlabel("size of test data (power of 2)")
plt.ylabel("runtime (second)")
plt.xticks(power)
plt.title("Runtime for navie FT against fast ft")
plt.legend(loc='best')
plt.savefig("assignment2/pics/runtime.png", bbox_inches='tight')
plt.close()
def compress(self, image, percentile=0.25, threshold=16):
"""
:param image: 2D numpy array
:param percentile: compressing ratio
:param threshold: threshold for FFT
:return: image after compressing
"""
fft_img = self.dft_fast2d(image, threshold=threshold)
# filtering
row, col = fft_img.shape
counter = 0
for r in tqdm(range(row)):
for c in range(col):
if (r+c) > percentile*(row+col):
fft_img[r, c] = 0
counter += 1
print("Compression Ratio: {} of original image is compressed".format(counter/(row*col)))
name = "assignment2/pics/compressing_{}_".format(percentile) + self.image_name.split('.')[0] + ".csv"
| np.savetxt(name, fft_img, delimiter=",") | numpy.savetxt |
import numpy as np
from astropy.io import fits
import scipy.interpolate as spi
import scipy.ndimage.interpolation as spni
import gaussian as g
import matplotlib.pyplot as plt
from . import optspex
from . import julday
import sys, smooth, centroid
#import hst_scan as hst
from ..lib import sort_nicely as sn
import astropy.io.fits as pf
import smoothing
try:
basestring
except NameError:
basestring = str
# Read FITS files from HST's WFC3 instrument
def read(filenames, returnHdr=True):
'''
Reads FITS files from HST's WFC3 instrument.
Parameters
----------
filenames : Single or list of filenames to read
returnHdr : Set True to return header files
Returns
-------
data : Array of data frames
err : Array of uncertainty frames
hdr : List of header files
master_hdr : List of master header files
History
-------
Written by <NAME> November 2012
'''
if isinstance(filenames, basestring):
filenames = [filenames]
hdulist = fits.open(filenames[0])
nx = hdulist['SCI',1].header['NAXIS1']
ny = hdulist['SCI',1].header['NAXIS2']
# Determine if we are using IMA or FLT files
# FLT files already subtract first from last, only 1 read
if filenames[0].endswith('flt.fits'):
nreads = 1
else:
nreads = hdulist['SCI',1].header['SAMPNUM']
nfiles = len(filenames)
data = np.zeros((nfiles,nreads,ny,nx)) #Flux
err = np.zeros((nfiles,nreads,ny,nx)) #Uncertainty
hdr = []
mhdr = []
i = 0
for name in filenames:
hdulist = fits.open(name)
hdr.append([])
j = 0
for rd in range(nreads,0,-1):
if hdulist['SCI',rd].header['BUNIT'] == 'ELECTRONS/S':
#Science data and uncertainties were previously in units of e-/sec,
#therefore multiply by sample time to get electrons.
samptime = hdulist['SCI',rd].header['SAMPTIME']
data[i,j] = hdulist['SCI',rd].data*samptime
err[i,j] = hdulist['ERR',rd].data*samptime
else:
data[i,j] = hdulist['SCI',rd].data
err[i,j] = hdulist['ERR',rd].data
hdr[i].append(hdulist['SCI',rd].header)
j += 1
mhdr.append(hdulist[0].header)
i += 1
if returnHdr:
return data, err, hdr, mhdr
else:
return data, err
def imageCentroid(filenames, guess, trim, ny, scifile):
'''
Calculate centroid for a list of direct images.
Parameters
----------
filenames : List of direct image filenames
guess : Paired list, centroid guess
trim : Trim image when calculating centroid
Returns
-------
center : Centroids
History
-------
Written by <NAME> November 2013
Added IRSUB256 March 2016
'''
nfiles = len(filenames)
centers = []
image = []
scihdr0 = fits.getheader(scifile,0)
scihdr1 = fits.getheader(scifile,1)
for i in range(nfiles):
image.append(fits.getdata(filenames[i].rstrip()))
#hdr0 = fits.getheader(filenames[i],0)
#hdr1 = fits.getheader(filenames[i],1)
calhdr0 = fits.getheader(filenames[i].rstrip(),0)
calhdr1 = fits.getheader(filenames[i].rstrip(),1)
#Calculate centroid, correct for difference in image size, if any
centers.append(centroid.ctrgauss(image[i], guess=guess, trim=trim) - (image[i].shape[0]-ny)/2.)
xoffset = scihdr1['CRPIX1'] - calhdr1['CRPIX1'] + (scihdr0['POSTARG1'] - calhdr0['POSTARG1'])/0.135
yoffset = scihdr1['CRPIX2'] - calhdr1['CRPIX2'] + (scihdr0['POSTARG2'] - calhdr0['POSTARG2'])/0.121
centers[i][0] += yoffset
centers[i][1] += xoffset
print("Adding "+str(xoffset)+','+str(yoffset)+" pixels to x,y centroid position.")
"""
if calhdr0['APERTURE'] == 'IRSUB256':
#centers[i][1] -= 111
#xref_correct = xref + CRPIX1_spec - CRPIX1_im + (POSTARG1_spec - POSTARG1_im)/0.135
#offset = scihdr1['CRPIX1'] - calhdr1['CRPIX1'] + (scihdr0['POSTARG1'] - calhdr0['POSTARG1'])/0.135
#centers[i][1] += offset
xoffset = scihdr1['CRPIX1'] - calhdr1['CRPIX1'] + (scihdr0['POSTARG1'] - calhdr0['POSTARG1'])/0.135
yoffset = scihdr1['CRPIX2'] - calhdr1['CRPIX2'] + (scihdr0['POSTARG2'] - calhdr0['POSTARG2'])/0.121
centers[i][0] += yoffset
centers[i][1] += xoffset
print("****WARNING: Direct image uses IRSUB256, adding "+str(xoffset)+','+str(yoffset)+" pixels to x,y position.")
if calhdr0['APERTURE'] == 'IRSUB512':
#centers[i][1] -= 111
#xref_correct = xref + CRPIX1_spec - CRPIX1_im + (POSTARG1_spec - POSTARG1_im)/0.135
xoffset = scihdr1['CRPIX1'] - calhdr1['CRPIX1'] + (scihdr0['POSTARG1'] - calhdr0['POSTARG1'])/0.135
yoffset = scihdr1['CRPIX2'] - calhdr1['CRPIX2'] + (scihdr0['POSTARG2'] - calhdr0['POSTARG2'])/0.121
centers[i][0] += yoffset
centers[i][1] += xoffset
print("****WARNING: Direct image uses IRSUB512, adding "+str(xoffset)+','+str(yoffset)+" pixels to x,y position.")
"""
return centers, image
def groupFrames(dates):
'''
Group frames by orbit and batch number
Parameters
----------
dates : Time in days
exptime : exposure time in seconds
'''
n_frames = len(dates)
framenum = np.zeros(n_frames)
batchnum = np.zeros(n_frames)
orbitnum = np.zeros(n_frames)
frame = 0
batch = 0
orbit = 0
framegap = np.median(np.ediff1d(dates))
orbitgap = np.max(np.ediff1d(dates))
for i in range(1,n_frames):
if dates[i]-dates[i-1] < 2*framegap:
#New frames, same batch, same orbit
frame += 1
elif dates[i]-dates[i-1] > 0.5*orbitgap:
#Reset frame, new batch, rest orbit
frame = 0
batch = 0
orbit += 1
else: #dates[i]-dates[i-1] > 3*exptime[i]/86400.:
#Reset frame, new batch, same orbit
frame = 0
batch += 1
framenum[i] = frame
batchnum[i] = batch
orbitnum[i] = orbit
return framenum, batchnum, orbitnum
def calcTrace(x, centroid, grism):
'''
Calculates the WFC3 trace given the position of the direct image in physical pixels.
Parameters
----------
x : physical pixel values along dispersion direction over which the trace is calculated
centroid : [y,x] pair describing the centroid of the direct image
Returns
-------
y : computed trace
History
-------
Initial version by LK
Modified by <NAME> November 2012
'''
yref, xref = centroid
if isinstance(yref, float) == False:
yref = yref[:,np.newaxis]
x = x[np.newaxis]
if grism == 'G141':
#WFC3-2009-17.pdf
#Table 1: Field dependent trace descriptions for G141.
#Term a0 a1(X) a2(Y) a3(X^2) a4(X*Y) a5(Y^2)
DYDX_A_0 = [1.96882E+00, 9.09159E-05, -1.93260E-03]
DYDX_A_1 = [1.04275E-02, -7.96978E-06, -2.49607E-06, 1.45963E-09, 1.39757E-08, 4.84940E-10]
elif grism == 'G102':
#WFC3-2009-18.pdf
#Table 1: Field dependent trace descriptions for G102.
#Term a0 a1(X) a2(Y) a3(X^2) a4(X*Y) a5(Y^2)
DYDX_A_0 = [-3.55018E-01, 3.28722E-05, -1.44571E-03]
DYDX_A_1 = [ 1.42852E-02, -7.20713E-06, -2.42542E-06, 1.18294E-09, 1.19634E-08, 6.17274E-10
]
else:
print("Unknown filter/grism: " + grism)
return 0
DYDX_0 = DYDX_A_0[0] + DYDX_A_0[1]*xref + DYDX_A_0[2]*yref
DYDX_1 = DYDX_A_1[0] + DYDX_A_1[1]*xref + DYDX_A_1[2]*yref + \
DYDX_A_1[3]*xref**2 + DYDX_A_1[4]*xref*yref + DYDX_A_1[5]*yref**2
y = DYDX_0 + DYDX_1*(x-xref) + yref
return y
return
def calibrateLambda(x, centroid, grism):
'''
Calculates coefficients for the dispersion solution
Parameters
----------
x : physical pixel values along dispersion direction over which the wavelength is calculated
centroid : [y,x] pair describing the centroid of the direct image
Returns
-------
y : computed wavelength values
History
-------
Initial version by LK
Modified by <NAME> November 2012
'''
yref, xref = centroid
if isinstance(yref, float) == False:
yref = yref[:,np.newaxis]
x = x[np.newaxis]
if grism == 'G141':
#WFC3-2009-17.pdf
#Table 5: Field dependent wavelength solution for G141.
#Term a0 a1(X) a2(Y) a3(X^2) a4(X*Y) a5(Y^2)
DLDP_A_0 = [8.95431E+03, 9.35925E-02, 0.0, 0.0, 0.0, 0.0]
DLDP_A_1 = [4.51423E+01, 3.17239E-04, 2.17055E-03, -7.42504E-07, 3.48639E-07, 3.09213E-07]
elif grism == 'G102':
#WFC3-2009-18.pdf
#Table 5: Field dependent wavelength solution for G102.
#FINDME: y^2 term not given in Table 5, assuming 0.
#Term a0 a1(X) a2(Y) a3(X^2) a4(X*Y) a5(Y^2)
DLDP_A_0 = [6.38738E+03, 4.55507E-02, 0.0]
DLDP_A_1 = [2.35716E+01, 3.60396E-04, 1.58739E-03, -4.25234E-07, -6.53726E-08, 0.0]
else:
print("Unknown filter/grism: " + grism)
return 0
DLDP_0 = DLDP_A_0[0] + DLDP_A_0[1]*xref + DLDP_A_0[2]*yref
DLDP_1 = DLDP_A_1[0] + DLDP_A_1[1]*xref + DLDP_A_1[2]*yref + \
DLDP_A_1[3]*xref**2 + DLDP_A_1[4]*xref*yref + DLDP_A_1[5]*yref**2
y = DLDP_0 + DLDP_1*(x-xref) + yref
return y
# Make master flat fields
def makeflats(flatfile, wave, xwindow, ywindow, flatoffset, n_spec, ny, nx, sigma=5, isplots=0):
'''
Makes master flatfield image and new mask for WFC3 data.
Parameters
----------
flatfile : List of files containing flatfiles images
wave : wavelengths
xwindow : Array containing image limits in wavelength direction
ywindow : Array containing image limits in spatial direction
n_spec : Number of spectra
sigma : Sigma rejection level
Returns
-------
flat_master : Single master flatfield image
mask_master : Single bad-pixel mask image
History
-------
Written by <NAME> November 2012
'''
# Read in flat frames
hdulist = fits.open(flatfile)
flat_mhdr = hdulist[0].header
#print(hdulist[0].data)
wmin = float(flat_mhdr['wmin'])/1e4
wmax = float(flat_mhdr['wmax'])/1e4
#nx = flat_mhdr['naxis1']
#ny = flat_mhdr['naxis2']
# Build flat field, only compute for subframe
flat_master = []
mask_master = []
for i in range(n_spec):
# Read and assemble flat field
# Select windowed region containing the data
x = (wave[i] - wmin)/(wmax - wmin)
#print("Extracting flat field region:")
#print(ywindow[i][0]+flatoffset[i][0],ywindow[i][1]+flatoffset[i][0],xwindow[i][0]+flatoffset[i][1],xwindow[i][1]+flatoffset[i][1])
ylower = int(ywindow[i][0]+flatoffset[i][0])
yupper = int(ywindow[i][1]+flatoffset[i][0])
xlower = int(xwindow[i][0]+flatoffset[i][1])
xupper = int(xwindow[i][1]+flatoffset[i][1])
#flat_window += hdulist[j].data[ylower:yupper,xlower:xupper]*x**j
if flatfile[-19:] == 'sedFFcube-both.fits':
#sedFFcube-both
flat_window = hdulist[1].data[ylower:yupper,xlower:xupper]
for j in range(2,len(hdulist)):
flat_window += hdulist[j].data[ylower:yupper,xlower:xupper]*x**(j-1)
else:
#WFC3.IR.G141.flat.2
flat_window = hdulist[0].data[ylower:yupper,xlower:xupper]
for j in range(1,len(hdulist)):
#print(j)
flat_window += hdulist[j].data[ylower:yupper,xlower:xupper]*x**j
# Initialize bad-pixel mask
mask_window = np.ones(flat_window.shape,dtype=np.float32)
#mask_window[ywindow[i][0]:ywindow[i][1],xwindow[i][0]:xwindow[i][1]] = 1
'''
# Populate bad pixel submask where flat > sigma*std
flat_mean = np.mean(subflat)
flat_std = np.std(subflat)
#mask[np.where(np.abs(subflat-flat_mean) > sigma*flat_std)] = 0
# Mask bad pixels in subflat by setting to zero
subflat *= mask
'''
"""
# Normalize flat by taking out the spectroscopic effect
# Not fitting median spectrum trace, using straight median instead
# flat_window /= np.median(flat_window, axis=0)
medflat = np.median(flat_window, axis=0)
fitmedflat = smooth.smooth(medflat, 15)
if isplots >= 3:
plt.figure(1009)
plt.clf()
plt.suptitle("Median Flat Frame With Best Fit")
plt.title(str(i))
plt.plot(medflat, 'bo')
plt.plot(fitmedflat, 'r-')
#plt.savefig()
plt.pause(0.5)
flat_window /= fitmedflat
flat_norm = flat_window / np.median(flat_window[np.where(flat_window <> 0)])
"""
flat_norm = flat_window
if sigma != None and sigma > 0:
# Reject points from flat and flag them in the mask
#Points that are outliers, do this for the high and low sides separately
# 1. Reject points < 0
index = np.where(flat_norm < 0)
flat_norm[index] = 1.
mask_window[index] = 0
# 2. Reject outliers from low side
ilow = np.where(flat_norm < 1)
dbl = np.concatenate((flat_norm[ilow],1+(1-flat_norm[ilow]))) #Make distribution symetric about 1
std = 1.4826*np.median(np.abs(dbl - np.median(dbl))) #MAD
ibadpix = np.where((1-flat_norm[ilow]) > sigma*std)
flat_norm[ilow[0][ibadpix],ilow[1][ibadpix]] = 1.
mask_window[ilow[0][ibadpix],ilow[1][ibadpix]] = 0
# 3. Reject outliers from high side
ihi = np.where(flat_norm > 1)
dbl = np.concatenate((flat_norm[ihi],2-flat_norm[ihi])) #Make distribution symetric about 1
std = 1.4826*np.median(np.abs(dbl - np.median(dbl))) #MAD
ibadpix = np.where((flat_norm[ihi]-1) > sigma*std)
flat_norm[ihi[0][ibadpix],ihi[1][ibadpix]] = 1.
mask_window[ihi[0][ibadpix],ihi[1][ibadpix]] = 0
#Put the subframes back in the full frames
flat_new = np.ones((ny,nx),dtype=np.float32)
mask = np.zeros((ny,nx),dtype=np.float32)
flat_new[ywindow[i][0]:ywindow[i][1],xwindow[i][0]:xwindow[i][1]] = flat_norm
mask [ywindow[i][0]:ywindow[i][1],xwindow[i][0]:xwindow[i][1]] = mask_window
flat_master.append(flat_new)
mask_master.append(mask)
return flat_master, mask_master
# Make master flat fields
def makeBasicFlats(flatfile, xwindow, ywindow, flatoffset, ny, nx, sigma=5, isplots=0):
'''
Makes master flatfield image (with no wavelength correction) and new mask for WFC3 data.
Parameters
----------
flatfile : List of files containing flatfiles images
xwindow : Array containing image limits in wavelength direction
ywindow : Array containing image limits in spatial direction
n_spec : Number of spectra
sigma : Sigma rejection level
Returns
-------
flat_master : Single master flatfield image
mask_master : Single bad-pixel mask image
History
-------
Written by <NAME> November 2012
Removed wavelength dependence February 2018
'''
# Read in flat frames
hdulist = pf.open(flatfile)
#flat_mhdr = hdulist[0].header
#wmin = float(flat_mhdr['wmin'])/1e4
#wmax = float(flat_mhdr['wmax'])/1e4
#nx = flat_mhdr['naxis1']
#ny = flat_mhdr['naxis2']
# Build flat field, only compute for subframe
flat_master = []
mask_master = []
# Read and assemble flat field
# Select windowed region containing the data
#x = (wave[i] - wmin)/(wmax - wmin)
ylower = int(ywindow[0]+flatoffset[0])
yupper = int(ywindow[1]+flatoffset[0])
xlower = int(xwindow[0]+flatoffset[1])
xupper = int(xwindow[1]+flatoffset[1])
if flatfile[-19:] == 'sedFFcube-both.fits':
#sedFFcube-both
flat_window = hdulist[1].data[ylower:yupper,xlower:xupper]
else:
#WFC3.IR.G141.flat.2
flat_window = hdulist[0].data[ylower:yupper,xlower:xupper]
# Initialize bad-pixel mask
mask_window = np.ones(flat_window.shape,dtype=np.float32)
#mask_window[ywindow[i][0]:ywindow[i][1],xwindow[i][0]:xwindow[i][1]] = 1
flat_norm = flat_window
if sigma != None and sigma > 0:
# Reject points from flat and flag them in the mask
# Points that are outliers, do this for the high and low sides separately
# 1. Reject points < 0
index = np.where(flat_norm < 0)
flat_norm[index] = 1.
mask_window[index] = 0
# 2. Reject outliers from low side
ilow = np.where(flat_norm < 1)
dbl = np.concatenate((flat_norm[ilow],1+(1-flat_norm[ilow]))) #Make distribution symetric about 1
std = 1.4826*np.median(np.abs(dbl - np.median(dbl))) #MAD
ibadpix = np.where((1-flat_norm[ilow]) > sigma*std)
flat_norm[ilow[0][ibadpix],ilow[1][ibadpix]] = 1.
mask_window[ilow[0][ibadpix],ilow[1][ibadpix]] = 0
# 3. Reject outliers from high side
ihi = np.where(flat_norm > 1)
dbl = np.concatenate((flat_norm[ihi],2-flat_norm[ihi])) #Make distribution symetric about 1
std = 1.4826*np.median(np.abs(dbl - np.median(dbl))) #MAD
ibadpix = np.where((flat_norm[ihi]-1) > sigma*std)
flat_norm[ihi[0][ibadpix],ihi[1][ibadpix]] = 1.
mask_window[ihi[0][ibadpix],ihi[1][ibadpix]] = 0
#Put the subframes back in the full frames
flat_new = np.ones((ny,nx),dtype=np.float32)
mask = np.zeros((ny,nx),dtype=np.float32)
flat_new[ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]] = flat_norm
mask [ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]] = mask_window
flat_master.append(flat_new)
mask_master.append(mask)
return flat_master, mask_master
# Calculate slitshifts
def calc_slitshift2(spectrum, xrng, ywindow, xwindow, width=5, deg=1):
'''
Calcualte horizontal shift to correct tilt in data using spectrum.
History
-------
Written by <NAME> July 2014
'''
ny, nx = spectrum.shape
# Determine spectrum boundaries on detector along y
ind = np.where(spectrum[:,nx//2] > np.mean(spectrum[:,nx//2]))
# Select smaller subset for cross correlation to ensure good signal
ystart = np.min(ind)+5
yend = np.max(ind)-5
subspec = spectrum[ystart:yend,xwindow[0]:xwindow[1]]
subny,subnx = subspec.shape
drift = np.zeros(subny)
# Create reference spectrum that is slightly smaller for 'valid' cross correlation
ref_spec = subspec[subny//2-1,5:-5]
ref_spec -= np.mean(ref_spec[np.where(np.isnan(ref_spec) == False)])
# Perform cross correlation for each row
for h in range(subny):
fit_spec = subspec[h]
fit_spec -= np.mean(fit_spec[np.where(np.isnan(fit_spec) == False)])
vals = np.correlate(ref_spec, fit_spec, mode='valid')
params, err = g.fitgaussian(vals, guess=[width/5., width*1., vals.max()-np.median(vals)])
drift[h] = len(vals)/2 - params[1]
# Fit a polynomial to shifts, evaluate
shift_values = drift
yfit = range(ystart,yend)
shift_coeffs = np.polyfit(yfit, shift_values, deg=deg)
shift_models = np.polyval(shift_coeffs, range(ywindow[0],ywindow[1]))
return shift_models, shift_values, yfit
#return ev
# Estimate slit shift
def calc_slitshift(wavegrid, xrng, refwave=None, width=3, deg=2):
'''
Calculates horizontal shift to correct tilt in data using wavelength.
Parameters
----------
Returns
-------
History
-------
Written by <NAME> Nov 2013
'''
n_spec = len(wavegrid)
shift_models = []
shift_values = []
for i in range(n_spec):
ny, nx = wavegrid[i].shape
loc = np.zeros(ny)
if refwave == None:
refwave = np.mean(wavegrid[i])
# Interpolate to find location of reference wavelength
for h in range(ny):
tck = spi.splrep(wavegrid[i][h],xrng[i],s=0,k=3)
loc[h] = spi.splev(refwave,tck)
# Fit a polynomial to shifts, evaluate
shift = loc - loc.mean()
shift_coeffs = np.polyfit(range(ny), shift, deg=deg)
shift_models.append(np.polyval(shift_coeffs, range(ny)))
shift_values.append(shift)
return shift_models, shift_values
"""
def correct_slitshift(reddata, mask, data_hdr, slitshift, window, isreverse=False):
'''
Old routine no longer used.
'''
# Create slit-shift-corrected indices of region containing data
ny, nx = np.shape(reddata)
location = find_data(data_hdr)
subny = window[1] - window[0]
subnx = location[1] - location[0]
xgrid, ygrid = np.meshgrid(range(location[0],location[1]), range(window[0],window[1]))
if isreverse:
xgrid = (xgrid.T - slitshift).T
else:
xgrid = (xgrid.T + slitshift).T
# Interpolate reduced data to account for slit shift
spline = spi.RectBivariateSpline(range(ny), range(nx), reddata, kx=1, ky=1, s=0)
# Evaluate interpolated array within region containing data
subdata = spline.ev(ygrid.flatten(), xgrid.flatten()).reshape(subny,subnx)
# Do the same for the bad pixel mask
foomask = np.zeros((ny,nx))
foomask[window[0]:window[1],location[0]:location[1]] = mask[window[0]:window[1],location[0]:location[1]]
spline = spi.RectBivariateSpline(range(ny), range(nx), foomask, kx=1, ky=1, s=0)
submask = spline.ev(ygrid.flatten(), xgrid.flatten()).reshape(subny,subnx).astype(int)
return subdata, submask
"""
def correct_slitshift2(data, slitshift, mask=None, isreverse=False):
'''
Applies horizontal shift to correct tilt in data.
Parameters
----------
Returns
-------
History
-------
Written by <NAME> June 2012
'''
# Create slit-shift-corrected indices
ny, nx = np.shape(data)
xgrid, ygrid = np.meshgrid(range(nx), range(ny))
if isreverse:
xgrid = (xgrid.T - slitshift).T
else:
xgrid = (xgrid.T + slitshift).T
# Interpolate reduced data to account for slit shift
spline = spi.RectBivariateSpline(range(ny), range(nx), data, kx=3, ky=3)
# Evaluate interpolated array within region containing data
cordata = spline.ev(ygrid.flatten(), xgrid.flatten()).reshape(ny,nx)
# Do the same for the bad pixel mask
if mask != None:
spline = spi.RectBivariateSpline(range(ny), range(nx), mask, kx=3, ky=3)
#cormask = np.round(spline.ev(ygrid.flatten(), xgrid.flatten()).reshape(ny,nx),2).astype(int)
cormask = spline.ev(ygrid.flatten(), xgrid.flatten()).reshape(ny,nx)
cormask[np.where(cormask >= 0.9)] = 1
return cordata, cormask.astype(int)
else:
return cordata
# Calulate drift2D
#import image_registration as imr
def calcDrift2D(im1, im2, m, n, n_files):
try:
sys.stdout.write('\r'+str(m+1)+'/'+str(n_files))
sys.stdout.flush()
except:
pass
drift2D = imr.chi2_shift(im1, im2, boundary='constant', nthreads=1,
zeromean=False, return_error=False)
return (drift2D, m, n)
# Fit background
def fitbg(diffdata, diffmask, x1, x2, bgdeg, p3thresh, isplots, m, n, n_files):
try:
sys.stdout.write('\r'+str(m+1)+'/'+str(n_files))
sys.stdout.flush()
except:
pass
bg, mask = optspex.fitbg(diffdata, diffmask, x1, x2, deg=bgdeg,
threshold=p3thresh, isrotate=2, isplots=isplots)
return (bg, mask, m, n)
# Replace bad pixels
def replacePixels(shiftdata, shiftmask, m, n, i, j, k, ktot, ny, nx, sy, sx):
try:
sys.stdout.write('\r'+str(k+1)+'/'+str(ktot))
sys.stdout.flush()
except:
pass
#Pad image initially with zeros
newim = np.zeros(np.array(shiftdata.shape) + 2*np.array((ny, nx)))
newim[ny:-ny, nx:-nx] = shiftdata
#Calculate kernel
gk = smoothing.gauss_kernel_mask2((ny,nx), (sy,sx), (m,i), shiftmask)
shift = np.sum(gk * newim[m:m+2*ny+1, i:i+2*nx+1])
return (shift, m, n, i, j)
# Calculate spectrum
def calcSpectrum(filename, mask, bias_master, flat_master, slitshift, xwindow, ywindow, gain, v0, spec_width, fitbghw, m, n, diffthresh=5, p3thresh=5, p5thresh=10, p7thresh=10, fittype='smooth', window_len=150, deg=3, expand=1, isplots=False, eventdir='.', bgdeg=1):
'''
Driver routine for optimal spectral extraction
Parameters
----------
deg : Degree of polynomial fit of profile
isplots : Set True to produce plots
Returns
-------
History
-------
Written by <NAME> November 2012
'''
"""
filename = ev.obj_list[0]
bias_master = ev.bias_master
flat_master = flat_master[0]
xwindow = ev.xwindow[0]
ywindow = ev.ywindow[0]
mask = mask[0][0]
expand = ev.expand
spec_width = ev.spec_width
slitshift = ev.slitshift
gain = ev.gain
v0 = ev.v0
"""
sys.stdout.write('\r'+str(m+1))
sys.stdout.flush()
#Read file
frame, frameerr = hst.read(filename, returnHdr=False)
# Calculate reduced image
reddata = ((frame - bias_master)/flat_master)[0] #.squeeze()
nreads = reddata.shape[0]
subny = ywindow[1] - ywindow[0]
subnx = xwindow[1] - xwindow[0]
subdata = reddata[:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
#suberr = frameerr.squeeze()[:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
suberr = frameerr[0,:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
submask = mask[ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
if nreads > 1:
# Subtract pairs of subframes
diffdata = np.zeros((nreads-1,subny,subnx))
diffmask = np.zeros((diffdata.shape))
for i in range(nreads-1):
diffmask[i] = np.copy(submask)
diffmask[i][np.where(suberr[i ] > diffthresh*np.std(suberr[i ]))] = 0
diffmask[i][np.where(suberr[i+1] > diffthresh*np.std(suberr[i+1]))] = 0
diffdata[i] = (subdata[i+1]-subdata[i])*diffmask[i]
else:
# FLT data has already been differenced
nreads = 2
diffdata = subdata
diffmask = np.zeros((diffdata.shape))
diffmask[0] = | np.copy(submask) | numpy.copy |
"""Test EKF implementation with CartPole"""
import numpy as np
import matplotlib.pyplot as plt
import random
from systems.cart_pole import (
CartPole, PendulumTipPosition, PendulumTipVelocity)
from estimation.extended_kf import ExtendedKF
from utils import add_gaussian_noise, simulate_system, wrap_angles
# Simulate system to extract simulated measurements and ground truth
cp = CartPole()
t_final = 15
s0 = np.zeros(cp.n_state)
class BangBang:
def __init__(self, u_max: float, t_final: float):
self._u_max = u_max
self._switch_pt = t_final/2
def __call__(self, t, state):
if t < self._switch_pt:
return self._u_max
else:
return -self._u_max
policy = BangBang(1, t_final)
simulated_sys = simulate_system(cp, s0, policy, t_final, n_steps = 150)
# define x0 and P0
x0 = | np.array([.2, 0, 0, 0]) | numpy.array |
from __future__ import print_function
import inspect
import re
import numpy as np
import cv2
import os
import collections
try:
import pickle as pickle
except ImportError:
import pickle
def debug_trace():
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def PickleLoad(file_name):
try:
with open(file_name, 'rb') as f:
data = pickle.load(f)
except UnicodeDecodeError:
with open(file_name, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def PickleSave(file_name, data):
with open(file_name, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def interp_z(z0, z1, ratio, interp='linear'):
if interp == 'linear':
z_t = (1 - ratio) * z0 + ratio * z1
if interp == 'slerp':
N = len(z0)
z_t = []
for i in range(N):
z0_i = z0[i]
z1_i = z1[i]
z0_n = z0_i / np.linalg.norm(z0_i)
z1_n = z1_i / | np.linalg.norm(z1_i) | numpy.linalg.norm |
"""
Library Features:
Name: lib_dryes_downloader_geo
Author(s): <NAME> (<EMAIL>), <NAME> (<EMAIL>)
Date: '20210929'
Version: '1.0.0'
"""
#################################################################################
# Library
import os
import logging
from osgeo import gdal, gdalconst
import numpy as np
import rasterio
import matplotlib.pylab as plt
from lib_dryes_downloader_hsaf_generic import create_darray_2d
#################################################################################
logging.getLogger("rasterio").setLevel(logging.WARNING)
# -------------------------------------------------------------------------------------
# Method to read tiff file
def reproject_file_tiff(file_name_in, file_name_out,
file_wide_out, file_high_out, file_geotrans_out, file_proj_out):
dset_tiff_out = gdal.GetDriverByName('GTiff').Create(
file_name_out, file_wide_out, file_high_out, 1, gdalconst.GDT_Float32)
dset_tiff_out.SetGeoTransform(file_geotrans_out)
dset_tiff_out.SetProjection(file_proj_out)
dset_tiff_in = gdal.Open(file_name_in, gdalconst.GA_ReadOnly)
dset_proj_in = dset_tiff_in.GetProjection()
dset_geotrans_in = dset_tiff_in.GetGeoTransform()
dset_data_in = dset_tiff_in.ReadAsArray()
dset_band_in = dset_tiff_in.GetRasterBand(1)
# Reproject from input file to output file set with out information
gdal.ReprojectImage(dset_tiff_in, dset_tiff_out, dset_proj_in, file_proj_out,
gdalconst.GRA_NearestNeighbour)
return dset_tiff_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get a raster file
def read_file_raster(file_name, file_proj='epsg:4326', var_name='land',
coord_name_x='Longitude', coord_name_y='Latitude',
dim_name_x='Longitude', dim_name_y='Latitude', no_data_default=-9999.0):
if os.path.exists(file_name):
if (file_name.endswith('.txt') or file_name.endswith('.asc')) or file_name.endswith('.tif'):
crs = rasterio.crs.CRS({"init": file_proj})
with rasterio.open(file_name, mode='r+') as dset:
dset.crs = crs
bounds = dset.bounds
no_data = dset.nodata
res = dset.res
transform = dset.transform
data = dset.read()
proj = dset.crs.wkt
values = data[0, :, :]
if (no_data is None) or (np.isnan(no_data)):
no_data = no_data_default
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), | np.abs(res[0]) | numpy.abs |
import numpy as np
from matplotlib import pyplot as plt
import glob
from scipy.interpolate import PchipInterpolator
plt.style.use("../template.mplstyle")
# purple - green - darkgoldenrod - blue - red
colors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']
#################################################################
def ctr_level(hist, lvl, infinite=False):
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist = cum_hist / cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)
clist = [0]+[hist[-i] for i in alvl]
if not infinite:
return clist[1:]
return clist
def get_hist(data, num_bins=40, weights=[None]):
if not any(weights):
weights = np.ones(len(data))
hist, bin_edges = np.histogram(data, bins=num_bins, weights=weights)
bin_centres = 0.5*(bin_edges[1:]+bin_edges[:-1])
return hist, bin_edges, bin_centres
def plot_hist(data, ax, num_bins=30, weights=[None], color=None):
if not any(weights):
weights = np.ones(len(data))
if color == None:
color="darkblue"
hist, bin_edges, bin_centres = get_hist(data, num_bins=num_bins, weights=weights)
ax.plot(bin_centres, hist/max(hist), color=color, lw=2)
ax.step(bin_centres, hist/max(hist), where='mid', color=color)
pkarray = np.linspace(min(bin_centres), max(bin_centres), 1000)
interpolator = PchipInterpolator(bin_centres, hist)(pkarray)
ax.plot(pkarray, interpolator/max(interpolator), color="red", lw=2)
#################################################################
UVLF_MPS = []
for filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_powerspectrum/*__*.txt'):
data = np.loadtxt(filepath)
UVLF_MPS.append(data)
UVLF_MPS = np.vstack(np.array(UVLF_MPS))
names = [r'$P(k=1.06\,\mathrm{Mpc}^{-1})$', r'$P(k=1.5\,\mathrm{Mpc}^{-1})$', r'$P(k=2\,\mathrm{Mpc}^{-1})$', r'$P(k=4.7\,\mathrm{Mpc}^{-1})$', r'$P(k=6\,\mathrm{Mpc}^{-1})$', r'$P(k=8\,\mathrm{Mpc}^{-1})$']
fig = plt.figure(figsize=(20,12))
ax1 = plt.subplot(231)
ax2 = plt.subplot(232)
ax3 = plt.subplot(233)
ax4 = plt.subplot(234)
ax5 = plt.subplot(235)
ax6 = plt.subplot(236)
plot_hist(UVLF_MPS[:,-1]*10**UVLF_MPS[:,17], ax1, 30, UVLF_MPS[:,0], "black")
plot_hist(UVLF_MPS[:,-2]*10**UVLF_MPS[:,17], ax2, 30, UVLF_MPS[:,0], "black")
plot_hist(UVLF_MPS[:,-3]*10**UVLF_MPS[:,17], ax3, 30, UVLF_MPS[:,0], "black")
plot_hist(UVLF_MPS[:,-4]*10**UVLF_MPS[:,16], ax4, 30, UVLF_MPS[:,0], "black")
plot_hist(UVLF_MPS[:,-5]*10**UVLF_MPS[:,16], ax5, 30, UVLF_MPS[:,0], "black")
plot_hist(UVLF_MPS[:,-6]*10**UVLF_MPS[:,16], ax6, 30, UVLF_MPS[:,0], "black")
ax1.set_xlabel(names[-1])
ax2.set_xlabel(names[-2])
ax3.set_xlabel(names[-3])
ax4.set_xlabel(names[-4])
ax5.set_xlabel(names[-5])
ax6.set_xlabel(names[-6])
data_for_lims = UVLF_MPS[:,-6] * 10**UVLF_MPS[:,16]
hist, bin_edges, bin_centres = get_hist(data_for_lims, num_bins=30, weights=UVLF_MPS[:,0])
pkarray = np.linspace(min(bin_centres), max(bin_centres), 1000)
interpolator = PchipInterpolator(bin_centres, hist)(pkarray)
levels = ctr_level(interpolator.copy(), [0.68])
pos = [np.searchsorted(interpolator[:np.argmax(interpolator)], levels)[0]-1, -np.searchsorted((interpolator[::-1])[:np.argmax(interpolator[::-1])], levels)[0]]
ax6.axvline(pkarray[pos[0]], linestyle="dashed", color="black", alpha=0.6)
ax6.axvline(pkarray[pos[1]], linestyle="dashed", color="black", alpha=0.6)
print("bin1 frequent: ", pkarray[np.argmax(interpolator)])
print("bin1 mean - lower (68%): ", pkarray[np.argmax(interpolator)] - pkarray[pos[0]])
print("bin1 upper - mean (68%): ", pkarray[pos[1]] - pkarray[ | np.argmax(interpolator) | numpy.argmax |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import pytest
import numpy as np
from numpy.random import randint
from pyccel.epyccel import epyccel
from modules import arrays
#==============================================================================
# TEST: 1D ARRAYS OF INT-32
#==============================================================================
def test_array_int32_1d_scalar_add(language):
f1 = arrays.array_int32_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_sub(language):
f1 = arrays.array_int32_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_mul(language):
f1 = arrays.array_int32_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_div(language):
f1 = arrays.array_int32_1d_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_idiv(language):
f1 = arrays.array_int32_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add(language):
f1 = arrays.array_int32_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub(language):
f1 = arrays.array_int32_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_mul(language):
f1 = arrays.array_int32_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_idiv(language):
f1 = arrays.array_int32_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add_augassign(language):
f1 = arrays.array_int32_1d_add_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub_augassign(language):
f1 = arrays.array_int32_1d_sub_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_1(language):
f1 = arrays.array_int_1d_initialization_1
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_2(language):
f1 = arrays.array_int_1d_initialization_2
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_3(language):
f1 = arrays.array_int_1d_initialization_3
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH C ORDERING
#==============================================================================
def test_array_int32_2d_C_scalar_add(language):
f1 = arrays.array_int32_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_sub(language):
f1 = arrays.array_int32_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_mul(language):
f1 = arrays.array_int32_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_idiv(language):
f1 = arrays.array_int32_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_add(language):
f1 = arrays.array_int32_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_sub(language):
f1 = arrays.array_int32_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_mul(language):
f1 = arrays.array_int32_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_idiv(language):
f1 = arrays.array_int32_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH F ORDERING
#==============================================================================
def test_array_int32_2d_F_scalar_add(language):
f1 = arrays.array_int32_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_sub(language):
f1 = arrays.array_int32_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_mul(language):
f1 = arrays.array_int32_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_idiv(language):
f1 = arrays.array_int32_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_add(language):
f1 = arrays.array_int32_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_sub(language):
f1 = arrays.array_int32_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_mul(language):
f1 = arrays.array_int32_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_idiv(language):
f1 = arrays.array_int32_2d_F_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 1D ARRAYS OF INT-64
#==============================================================================
def test_array_int_1d_scalar_add(language):
f1 = arrays.array_int_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_sub(language):
f1 = arrays.array_int_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_mul(language):
f1 = arrays.array_int_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_idiv(language):
f1 = arrays.array_int_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_add(language):
f1 = arrays.array_int_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_sub(language):
f1 = arrays.array_int_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_mul(language):
f1 = arrays.array_int_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_idiv(language):
f1 = arrays.array_int_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH C ORDERING
#==============================================================================
def test_array_int_2d_C_scalar_add(language):
f1 = arrays.array_int_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_sub(language):
f1 = arrays.array_int_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_mul(language):
f1 = arrays.array_int_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_idiv(language):
f1 = arrays.array_int_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_add(language):
f1 = arrays.array_int_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_sub(language):
f1 = arrays.array_int_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_mul(language):
f1 = arrays.array_int_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_idiv(language):
f1 = arrays.array_int_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_initialization(language):
f1 = arrays.array_int_2d_C_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=int)
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH F ORDERING
#==============================================================================
def test_array_int_2d_F_scalar_add(language):
f1 = arrays.array_int_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_sub(language):
f1 = arrays.array_int_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_mul(language):
f1 = arrays.array_int_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = | np.array( [[1,2,3], [4,5,6]], order='F' ) | numpy.array |
import numpy as np
import paddlex as pdx
from model import Embedding
from deep_sort import NearestNeighborDistanceMetric, Detection, Tracker
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(
self,
det_model_dir,
emb_model_dir,
use_gpu=False,
threshold=0.5,
max_cosine_distance=0.2,
nn_budget=100,
max_iou_distance=0.9,
max_age=70,
n_init=3
):
self.detector = pdx.load_model(det_model_dir)
self.emb = Embedding(emb_model_dir, use_gpu)
self.threshold = threshold
metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
def update(self, ori_img, threshold):
self.height, self.width = ori_img.shape[:2]
results = self.detector.predict(ori_img)
tlwh = []
xyxy = []
confidences = []
confid = 0.
cnt = 0
for result in results:
if(result['score']) < threshold:
continue
x1, x2, w, h = result['bbox']
tlwh.append([x1, x2, w, h])
xyxy.append([x1, x2, x1 + w, x2 + h])
confidences.append(result['score'])
confid += result['score']
cnt += 1
tlwh = np.array(tlwh).astype(np.int)
xyxy = | np.array(xyxy) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 2020
Class to read and manipulate CryoSat-2 waveform data
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
http://www.numpy.org
http://www.scipy.org/NumPy_for_Matlab_Users
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 08/2020: flake8 compatible binary regular expression strings
Forked 02/2020 from read_cryosat_L1b.py
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import numpy as np
import pointCollection as pc
import netCDF4
import re
import os
class data(pc.data):
np.seterr(invalid='ignore')
def __default_field_dict__(self):
"""
Define the default fields that get read from the CryoSat-2 file
"""
field_dict = {}
field_dict['Location'] = ['days_J2k','Day','Second','Micsec','USO_Corr',
'Mode_ID','SSC','Inst_config','Rec_Count','Lat','Lon','Alt','Alt_rate',
'Sat_velocity','Real_beam','Baseline','ST_ID','Roll','Pitch','Yaw','MCD']
field_dict['Data'] = ['TD', 'H_0','COR2','LAI','FAI','AGC_CH1','AGC_CH2',
'TR_gain_CH1','TR_gain_CH2','TX_Power','Doppler_range','TR_inst_range',
'R_inst_range','TR_inst_gain','R_inst_gain','Internal_phase',
'External_phase','Noise_power','Phase_slope']
field_dict['Geometry'] = ['dryTrop','wetTrop','InvBar','DAC','Iono_GIM',
'Iono_model','ocTideElv','lpeTideElv','olTideElv','seTideElv','gpTideElv',
'Surf_type','Corr_status','Corr_error']
field_dict['Waveform_20Hz'] = ['Waveform','Linear_Wfm_Multiplier',
'Power2_Wfm_Multiplier','N_avg_echoes']
field_dict['METADATA'] = ['MPH','SPH']
return field_dict
def from_dbl(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from binary formats
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
# CryoSat-2 Mode record sizes
i_size_timestamp = 12
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 125
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# check baseline from file to set i_record_size and allocation function
if (BASELINE == 'C'):
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2 + 6*4 + 3*3*4 + 3*2 + 4*4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_BC_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_BC_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_BC_RW*2 + \
n_SARIN_BC_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baseline C
read_cryosat_variables = self.cryosat_baseline_C
else:
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2+ 6*4 + 3*3*4 + 4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_RW*2 + \
n_SARIN_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baselines A and B
read_cryosat_variables = self.cryosat_baseline_AB
# get dataset MODE from PRODUCT portion of file name
# set record sizes and DS_TYPE for read_DSD function
self.MODE = re.findall('(LRM|SAR|SIN)', PRODUCT).pop()
if (self.MODE == 'LRM'):
i_record_size = i_record_size_LRM_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SAR'):
i_record_size = i_record_size_SAR_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SIN'):
i_record_size = i_record_size_SARIN_L1b
DS_TYPE = 'CS_L1B'
# read the input file to get file information
fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fid)
os.close(fid)
# num DSRs from SPH
j_num_DSR = np.int32(file_info.st_size//i_record_size)
# print file information
if verbose:
print(full_filename)
print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size == file_info.st_size):
print('No Header on file')
print('The number of DSRs is: {0:d}'.format(j_num_DSR))
else:
print('Header on file')
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size != file_info.st_size):
# If there are MPH/SPH/DSD headers
s_MPH_fields = self.read_MPH(full_filename)
j_sph_size = np.int32(re.findall(r'[-+]?\d+',s_MPH_fields['SPH_SIZE']).pop())
s_SPH_fields = self.read_SPH(full_filename, j_sph_size)
# extract information from DSD fields
s_DSD_fields = self.read_DSD(full_filename, DS_TYPE=DS_TYPE)
# extract DS_OFFSET
j_DS_start = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DS_OFFSET']).pop())
# extract number of DSR in the file
j_num_DSR = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['NUM_DSR']).pop())
# check the record size
j_DSR_size = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DSR_SIZE']).pop())
# minimum size is start of the read plus number of records to read
j_check_size = j_DS_start + (j_DSR_size*j_num_DSR)
if verbose:
print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))
print('The number of DSRs is {0:d}'.format(j_num_DSR))
print('The size of the DSR is {0:d}'.format(j_DSR_size))
# check if invalid file size
if (j_check_size > file_info.st_size):
raise IOError('File size error')
# extract binary data from input CryoSat data file (skip headers)
fid = open(os.path.expanduser(full_filename), 'rb')
cryosat_header = fid.read(j_DS_start)
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# add headers to output dictionary as METADATA
CS_L1b_mds['METADATA'] = {}
CS_L1b_mds['METADATA']['MPH'] = s_MPH_fields
CS_L1b_mds['METADATA']['SPH'] = s_SPH_fields
CS_L1b_mds['METADATA']['DSD'] = s_DSD_fields
# close the input CryoSat binary file
fid.close()
else:
# If there are not MPH/SPH/DSD headers
# extract binary data from input CryoSat data file
fid = open(os.path.expanduser(full_filename), 'rb')
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# close the input CryoSat binary file
fid.close()
# if unpacking the units
if unpack:
CS_l1b_scale = self.cryosat_scaling_factors()
# for each dictionary key
for group in CS_l1b_scale.keys():
# for each variable
for key,val in CS_L1b_mds[group].items():
# check if val is the 20Hz waveform beam variables
if isinstance(val, dict):
# for each waveform beam variable
for k,v in val.items():
# scale variable
CS_L1b_mds[group][key][k] = CS_l1b_scale[group][key][k]*v.copy()
else:
# scale variable
CS_L1b_mds[group][key] = CS_l1b_scale[group][key]*val.copy()
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def from_nc(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from netCDF4 format data
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
print(full_filename) if verbose else None
# get dataset MODE from PRODUCT portion of file name
self.MODE = re.findall(r'(LRM|FDM|SAR|SIN)', PRODUCT).pop()
# read level-2 CryoSat-2 data from netCDF4 file
CS_L1b_mds = self.cryosat_baseline_D(full_filename, unpack=unpack)
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def calc_GPS_time(self, day, second, micsec):
"""
Calculate the GPS time (seconds since Jan 6, 1980 00:00:00)
"""
# TAI time is ahead of GPS by 19 seconds
return (day + 7300.0)*86400.0 + second.astype('f') + micsec/1e6 - 19
def count_leap_seconds(self, GPS_Time):
"""
Count number of leap seconds that have passed for given GPS times
"""
# GPS times for leap seconds
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
# number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
i_records,i_blocks = np.nonzero(GPS_Time >= leap)
n_leaps[i_records,i_blocks] += 1.0
return n_leaps
def read_MPH(self, full_filename):
"""
Read ASCII Main Product Header (MPH) block from an ESA PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
# read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_MPH_fields
def read_SPH(self, full_filename, j_sph_size):
"""
Read ASCII Specific Product Header (SPH) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
# check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
# read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
# extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
# check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
# add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
# data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
# data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
# add 6 to counter to go to next entry
c += 6
# use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# add 1 to counter to go to next line
c += 1
# Return block name array to calling function
return s_SPH_fields
def read_DSD(self, full_filename, DS_TYPE=None):
"""
Read ASCII Data Set Descriptors (DSD) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# number of text lines in a DSD header
n_DSD_lines = 8
# Level-1b CryoSat DS_NAMES within files
regex_patterns = []
if (DS_TYPE == 'CS_L1B'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_LRM[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SAR[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SARIN[\s+]*"')
elif (DS_TYPE == 'SIR_L1B_FDM'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_FDM[\s+]*"')
# find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
# find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
# check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
# extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_DSD_fields
def cryosat_baseline_AB(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baselines A and B
"""
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100): converted from telemetry units to be
# the noise floor of FBR measurement echoes.
# Set to -9999.99 when the telemetry contains zero.
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
# CryoSat-2 mode specific waveforms
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
elif (self.MODE == 'SIN'):
# SARIN Mode
# Averaged Power Echo Waveform [512]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
# Coherence [512]: packed units (1/1000)
CS_l1b_mds['Waveform_20Hz']['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
# Phase Difference [512]: packed units (microradians)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
# for each record in the CryoSat file
for r in range(n_records):
# CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
CS_l1b_mds['Location']['Day'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
for b in range(n_blocks):
CS_l1b_mds['Data']['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Data']['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TX_Power'][r,b] = | np.fromfile(fid,dtype='>i4',count=1) | numpy.fromfile |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from scipy import stats, misc, special
from tests.distributions import utils
from zhusuan.distributions.multivariate import *
class TestMultinomial(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Multinomial(tf.zeros([]), 10)
def test_init_n(self):
dist = Multinomial(tf.ones([2]), 10)
self.assertTrue(isinstance(dist.n_categories, int))
self.assertEqual(dist.n_categories, 2)
self.assertTrue(isinstance(dist.n_experiments, int))
self.assertEqual(dist.n_experiments, 10)
with self.assertRaisesRegexp(ValueError, "must be positive"):
_ = Multinomial(tf.ones([2]), 0)
with self.test_session(use_gpu=True) as sess:
logits = tf.placeholder(tf.float32, None)
n_experiments = tf.placeholder(tf.int32, None)
dist2 = Multinomial(logits, n_experiments)
self.assertEqual(
sess.run([dist2.n_categories, dist2.n_experiments],
feed_dict={logits: np.ones([2]), n_experiments: 10}),
[2, 10])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
dist2.n_categories.eval(feed_dict={logits: 1.,
n_experiments: 10})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
dist2.n_experiments.eval(feed_dict={logits: [1.],
n_experiments: [10]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"must be positive"):
dist2.n_experiments.eval(feed_dict={logits: [1.],
n_experiments: 0})
def test_value_shape(self):
# static
dist = Multinomial(tf.placeholder(tf.float32, [None, 2]), 10)
self.assertEqual(dist.get_value_shape().as_list(), [2])
# dynamic
logits = tf.placeholder(tf.float32, None)
dist2 = Multinomial(logits, 10)
self.assertTrue(dist2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(dist2._value_shape().eval(
feed_dict={logits: np.ones([2])}).tolist(), [2])
self.assertEqual(dist._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _distribution(param):
return Multinomial(param, 10)
utils.test_batch_shape_1parameter(
self, _distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _distribution(param):
return Multinomial(param, 10)
utils.test_1parameter_sample_shape_one_rank_less(
self, _distribution, np.zeros)
def test_log_prob_shape(self):
def _distribution(param):
return Multinomial(param, 10)
def _make_samples(shape):
samples = np.zeros(shape)
samples = samples.reshape((-1, shape[-1]))
samples[:, 0] = 1
return samples.reshape(shape)
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _distribution, _make_samples, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(logits, n_experiments, given):
logits = np.array(logits, np.float32)
normalized_logits = logits - misc.logsumexp(
logits, axis=-1, keepdims=True)
given = np.array(given)
dist = Multinomial(logits, n_experiments)
log_p = dist.log_prob(given)
target_log_p = np.log(misc.factorial(n_experiments)) - \
np.sum(np.log(misc.factorial(given)), -1) + \
np.sum(given * normalized_logits, -1)
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = np.exp(target_log_p)
self.assertAllClose(p.eval(), target_p)
_test_value([-50., -20., 0.], 4, [1, 0, 3])
_test_value([1., 10., 1000.], 1, [1, 0, 0])
_test_value([[2., 3., 1.], [5., 7., 4.]], 3,
np.ones([3, 1, 3], dtype=np.int32))
_test_value([-10., 10., 20., 50.], 100, [[0, 1, 99, 100],
[100, 99, 1, 0]])
def test_dtype(self):
def _distribution(param, dtype=None):
return Multinomial(param, 10, dtype)
utils.test_dtype_1parameter_discrete(self, _distribution)
with self.assertRaisesRegexp(TypeError, "n_experiments must be"):
Multinomial([1., 1.], tf.placeholder(tf.float32, []))
class TestOnehotCategorical(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
OnehotCategorical(logits=tf.zeros([]))
def test_init_n_categories(self):
cat = OnehotCategorical(tf.ones([10]))
self.assertTrue(isinstance(cat.n_categories, int))
self.assertEqual(cat.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
cat2 = OnehotCategorical(logits)
self.assertEqual(
cat2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
cat2.n_categories.eval(feed_dict={logits: 1.})
def test_value_shape(self):
# static
cat = OnehotCategorical(tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(cat.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
cat2 = OnehotCategorical(logits)
self.assertTrue(cat2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(cat2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(cat._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_1parameter(
self, OnehotCategorical, np.zeros, is_univariate=False)
def test_sample_shape(self):
utils.test_1parameter_sample_shape_one_rank_less(
self, OnehotCategorical, np.zeros)
def test_log_prob_shape(self):
def _make_samples(shape):
samples = np.zeros(shape)
samples = samples.reshape((-1, shape[-1]))
samples[:, 0] = 1
return samples.reshape(shape)
utils.test_1parameter_log_prob_shape_one_rank_less(
self, OnehotCategorical, _make_samples, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(logits, given):
logits = np.array(logits, np.float32)
normalized_logits = logits - misc.logsumexp(
logits, axis=-1, keepdims=True)
given = np.array(given, np.int32)
cat = OnehotCategorical(logits)
log_p = cat.log_prob(tf.one_hot(given, logits.shape[-1],
dtype=tf.int32))
def _one_hot(x, depth):
n_elements = x.size
ret = np.zeros((n_elements, depth))
ret[np.arange(n_elements), x.flat] = 1
return ret.reshape(list(x.shape) + [depth])
target_log_p = np.sum(_one_hot(
given, logits.shape[-1]) * normalized_logits, -1)
self.assertAllClose(log_p.eval(), target_log_p)
p = cat.prob(tf.one_hot(given, logits.shape[-1],
dtype=tf.int32))
target_p = np.sum(_one_hot(
given, logits.shape[-1]) * np.exp(normalized_logits), -1)
self.assertAllClose(p.eval(), target_p)
_test_value([0.], [0, 0, 0])
_test_value([-50., -10., -50.], [0, 1, 2, 1])
_test_value([0., 4.], [[0, 1], [0, 1]])
_test_value([[2., 3., 1.], [5., 7., 4.]],
np.ones([3, 1, 1], dtype=np.int32))
def test_dtype(self):
utils.test_dtype_1parameter_discrete(self, OnehotCategorical)
class TestDirichlet(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Dirichlet(alpha=tf.zeros([]))
def test_init_n_categories(self):
dist = Dirichlet(tf.ones([10]))
self.assertTrue(isinstance(dist.n_categories, int))
self.assertEqual(dist.n_categories, 10)
with self.assertRaisesRegexp(ValueError,
"n_categories.*should be at least 2"):
Dirichlet(tf.ones([3, 1]))
dist2 = Dirichlet(tf.placeholder(tf.float32, [3, None]))
self.assertTrue(dist2.n_categories is not None)
with self.test_session(use_gpu=True):
alpha = tf.placeholder(tf.float32, None)
dist3 = Dirichlet(alpha)
self.assertEqual(
dist3.n_categories.eval(feed_dict={alpha: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
dist3.n_categories.eval(feed_dict={alpha: 1.})
def test_value_shape(self):
# static
dist = Dirichlet(tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(dist.get_value_shape().as_list(), [10])
# dynamic
alpha = tf.placeholder(tf.float32, None)
dist2 = Dirichlet(alpha)
self.assertEqual(dist2.get_value_shape().as_list(), [None])
self.assertTrue(dist2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(dist2._value_shape().eval(
feed_dict={alpha: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(dist._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_1parameter(
self, Dirichlet, np.zeros, is_univariate=False)
def test_sample_shape(self):
utils.test_1parameter_sample_shape_one_rank_less(
self, Dirichlet, np.zeros)
def test_log_prob_shape(self):
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return samples / samples.sum(axis=-1, keepdims=True)
# TODO: This failed with a bug in Tensorflow, waiting fix.
# https://github.com/tensorflow/tensorflow/issues/8391
# _test_static([3, None], [3, 2, 1, None], [3, 2, 3])
utils.test_1parameter_log_prob_shape_one_rank_less(
self, Dirichlet, np.ones, _make_samples)
def test_value(self):
def dirichlet_logpdf(x, alpha):
# scipy's implementation of dirichlet logpdf doesn't support
# batch of x, we use this modified version.
def _lnB(alpha):
return np.sum(special.gammaln(alpha)) - \
special.gammaln(np.sum(alpha))
lnB = _lnB(alpha)
return - lnB + np.sum(np.log(x) * (alpha - 1), -1)
def dirichlet_pdf(x, alpha):
return np.exp(dirichlet_logpdf(x, alpha))
with self.test_session(use_gpu=True):
def _test_value_alpha_rank1(alpha, given):
alpha = np.array(alpha, np.float32)
given = np.array(given, np.float32)
dist = Dirichlet(alpha)
log_p = dist.log_prob(given)
target_log_p = dirichlet_logpdf(given, alpha)
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = dirichlet_pdf(given, alpha)
self.assertAllClose(p.eval(), target_p)
_test_value_alpha_rank1([1., 1., 1.],
[[0.2, 0.5, 0.3], [0.3, 0.4, 0.3]])
_test_value_alpha_rank1([2., 3., 4.], [0.3, 0.7, 0.])
# TODO: fix for case when alpha=1, given=0
def _test_value_alpha_rank2_given_rank2(alpha, given):
alpha = np.array(alpha, np.float32)
given = np.array(given, np.float32)
alpha_b = alpha * np.ones_like(given)
given_b = given * np.ones_like(alpha)
dist = Dirichlet(alpha)
log_p = dist.log_prob(given)
target_log_p = np.array(
[dirichlet_logpdf(given_b[i], alpha_b[i])
for i in range(alpha_b.shape[0])])
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = np.array(
[dirichlet_pdf(given_b[i], alpha_b[i])
for i in range(alpha_b.shape[0])])
self.assertAllClose(p.eval(), target_p)
_test_value_alpha_rank2_given_rank2([[1., 2.], [3., 4.]],
[0.5, 0.5])
_test_value_alpha_rank2_given_rank2([[5., 6.], [7., 8.]],
[[0.1, 0.9]])
_test_value_alpha_rank2_given_rank2([[100., 1.], [0.01, 10.]],
[[0., 1.], [1., 0.]])
def test_check_numerics(self):
alpha = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = Dirichlet(alpha, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had Inf"):
log_p.eval(feed_dict={alpha: np.ones([2]), given: [0., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"lbeta\(alpha\).*Tensor had NaN"):
log_p.eval(feed_dict={alpha: [-1., 1.], given: [0.5, 0.5]})
def test_dtype(self):
utils.test_dtype_1parameter_continuous(self, Dirichlet)
class TestExpConcrete(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
ExpConcrete(1., logits=tf.zeros([]))
def test_init_n_categories(self):
con = ExpConcrete(1., tf.ones([10]))
self.assertTrue(isinstance(con.n_categories, int))
self.assertEqual(con.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
con2 = ExpConcrete(1., logits)
self.assertEqual(
con2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
con2.n_categories.eval(feed_dict={logits: 1.})
def test_init_temperature(self):
with self.assertRaisesRegexp(ValueError,
"should be a scalar"):
ExpConcrete([1.], [1., 2.])
with self.test_session(use_gpu=True):
temperature = tf.placeholder(tf.float32, None)
con = ExpConcrete(temperature, [1., 2.])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
con.temperature.eval(feed_dict={temperature: [1.]})
def test_value_shape(self):
# static
con = ExpConcrete(1., tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(con.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
con2 = ExpConcrete(1., logits)
self.assertTrue(con2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(con2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(con._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
utils.test_batch_shape_1parameter(
self, _proxy_distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
utils.test_1parameter_sample_shape_one_rank_less(
self, _proxy_distribution, np.zeros)
def test_log_prob_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return np.log(samples / samples.sum(axis=-1, keepdims=True))
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _proxy_distribution, np.ones, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - t * given).sum(axis=-1) - \
n * np.log(np.exp(logits - t * given).sum(axis=-1))
con = ExpConcrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([np.log(0.25), np.log(0.25), np.log(0.5)],
0.1,
[1., 1., 1.2])
_test_value([[np.log(0.25), | np.log(0.25) | numpy.log |
import pandas as pd
import numpy as np
import sys
import copy
import subprocess
import os
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
def main():
# Import CPS data file
data = pd.read_csv(os.path.join(CUR_PATH, 'cps_raw.csv.gz'),
compression='gzip')
adj_targets = pd.read_csv(os.path.join(CUR_PATH, 'adjustment_targets.csv'))
other_ben = pd.read_csv(os.path.join(CUR_PATH, 'benefitprograms.csv'),
index_col='Program')
# Rename specified variables
renames = {
'IFDEPT': 'DSI',
'TAXYEAR': 'FLPDYR',
'XXTOT': 'XTOT',
'JCPS21': 'e00200p',
'JCPS31': 'e00200s',
'ALIMONY': 'e00800',
'JCPS25': 'e00900p',
'JCPS35': 'e00900s',
'JCPS28': 'e02100p',
'JCPS38': 'e02100s',
'UCOMP': 'e02300',
'SEHEALTH': 'e03270',
'DPAD': 'e03240',
'MEDICALEXP': 'e17500',
'REALEST': 'e18500',
'MISCITEM': 'e20400',
'CCE': 'e32800',
'ICPS01': 'age_head',
'ICPS02': 'age_spouse',
'WT': 's006',
'FILST': 'filer',
'SEQUENCE': 'RECID',
'PENSIONS': 'e01500',
'DBE': 'e00600',
'KEOGH': 'e03300',
'TIRAD': 'e01400',
'NU18': 'nu18',
'N1821': 'n1820',
'N21': 'n21',
'CGAGIX': 'e01100',
'BLIND_HEAD': 'blind_head',
'BLIND_SPOUSE': 'blind_spouse',
'HMIE': 'e19200',
'SS': 'e02400',
'VB': 'vet_ben',
'MEDICARE': 'mcare_ben',
'MEDICAID': 'mcaid_ben',
'SSI': 'ssi_ben',
'SNAP': 'snap_ben',
'WIC': 'wic_ben',
'TANF': 'tanf_ben',
'UI': 'ui_ben',
'HOUSING': 'housing_ben',
'SLTX': 'e18400',
'XHID': 'h_seq',
'XFID': 'ffpos',
'XSTATE': 'fips',
'NU13': 'nu13',
'NU05': 'nu05',
'N24': 'n24',
'ELDERLY_DEPENDENT': 'elderly_dependents',
'F2441': 'f2441'
}
data = data.rename(columns=renames)
data['MARS'] = np.where(data.JS == 3, 4, data.JS)
data['EIC'] = np.minimum(3, data.EIC)
# Use taxpayer and spouse records to get total tax unit earnings and AGI
data['e00100'] = data['JCPS9'] + data['JCPS19']
data['e00900'] = data['e00900p'] + data['e00900s']
np.random.seed(79)
# Determine amount of qualified dividends
# percent of units where all dividends are qualified
all_qualified_prob = 0.429
# percent of units where no dividends are qualified
no_qualified_prob = 0.093
# percent of units where either all or no dividends are qualified
non_avg_prob = all_qualified_prob + no_qualified_prob
# percent of dividends that are qualified among remaining units
qualified_frac = 0.678
# Determine qualified dividend percentage
probs = np.random.random(len(data['e00600']))
qualified = np.ones(len(data['e00600']))
qualified = np.where((probs > all_qualified_prob) &
(probs <= non_avg_prob), 0.0, qualified)
qualified = np.where(probs > non_avg_prob, qualified_frac, qualified)
data['e00650'] = data.e00600 * qualified
# Split interest income into taxable and tax exempt
slope = 0.068
ratio = 0.46
prob = 1. - slope * (data.INTST * 1e-3)
uniform_rn = np.random.random(len(prob))
data['e00300'] = np.where(uniform_rn < prob,
data.INTST,
data.INTST * ratio)
data['e00400'] = data['INTST'] - data['e00300']
# Split pentions and annuities using random assignment
# probabiliies used for random assignment
probs = np.random.random(len(data['e01500']))
fully_taxable_prob = 0.612
zero_tax_prob = 0.073
non_avg_prob = fully_taxable_prob + zero_tax_prob
avg_taxable_amout = 0.577
# determine tax ability
taxability = np.ones(len(data['e01500']))
taxability = np.where((probs > fully_taxable_prob) &
(probs <= non_avg_prob), 0.0, taxability)
taxability = | np.where(probs > non_avg_prob, avg_taxable_amout, taxability) | numpy.where |
from impedance.models.circuits import BaseCircuit, CustomCircuit, Randles
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pytest
# get example data
data = np.genfromtxt(os.path.join("./data/", "exampleData.csv"), delimiter=",")
f = data[:, 0]
Z = data[:, 1] + 1j * data[:, 2]
def test_BaseCircuit():
initial_guess = [0.01, 0.02, 50]
base_circuit = BaseCircuit(initial_guess)
# __init__()
# check initial_guess is loaded in correctly
assert base_circuit.initial_guess == initial_guess
# improper input_guess types raise an TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=["hi", 0.1])
# __eq__()
# incorrect comparisons raise a TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r == 8
# fit()
# improper data types in fitting raise a TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit([42, 4.2], []) # frequencies not ndarray
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42 + 42j]), []) # frequencies not numeric type
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42]), [42 + 42j]) # Z not ndarray
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42]), | np.array([0.5, 0.2]) | numpy.array |
import ROOT as root
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import nominal_values as noms
from uncertainties.unumpy import std_devs as stds
from array import array
import sys
############### Readout command line argument
try:
name_of_folder = sys.argv[1]
try:
plot_style = sys.argv[2]
except IndexError:
plot_style = None
except IndexError:
print('No Argument given Or other Index out of Range Er')
sys.path.insert(0, './' + name_of_folder + '/')
########################## import pyData.py ######################################
from pyData import *
##################################################### Set Cnavas Style #############################
root.gStyle.SetOptTitle(0)
root.gStyle.SetOptFit(1)
root.gStyle.SetLabelSize(.05, "XY");
root.gStyle.SetTitleSize(.05, "XY");
root.gStyle.SetTitleOffset(1, "XY");
root.gStyle.SetStatFontSize(.08)
########################### Def Gaus function ######################
personal_gaus = root.TF1("personal_gaus", " [0] * exp( -0.5 * ( (x - [1]) / [2] ) * ( (x - [1]) / [2] ) ) ")
name_params = [ "amplitude/[MeanVcal]", "mean/[Col]", "sigma/[Col]"]
personal_gaus.SetParName(0,'Amplitude')
personal_gaus.SetParName(2,'Sigma')
if plot_style == 'thesis':
personal_gaus.SetParName(1,'Mittelwert')
else :
personal_gaus.SetParName(1,'Mean')
############################### Save Data in list #######################################
mean_value_col_list = []
mean_error_col_list = []
x_value = []
x_error = []
##############################################################################################################################
################################### Getting the mean hit value of all columns near the laserspot #############################
###############################################################################################################################
################################## Set sum area, size of sensetive area ###############################
xmin = 20
xmax = 26
ymin = 62
ymax = 72
#################################### calculating mean of each coloum ################################
for i in range(xmin,xmax): # going thru all col
content = []
error = []
x_value.append(i)
x_error.append(0.5)
test_error = []
for j in range(ymin,ymax): # going thru all rows
if qMap_Ag_C0_V0.GetBinContent(i,j) != 0:
content.append( qMap_Ag_C0_V0.GetBinContent(i,j)) # Is this the real error
N = qMap_Ag_C0_V0.GetBinEntries( qMap_Ag_C0_V0.GetBin(i,j))
if N == 1:
new_error = np.sqrt( ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
else:
new_error = np.sqrt( 1/(N-1) * ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
#error.append( 1/N * np.sqrt(qMap_Ag_C0_V0.GetBinContent(i,j) *N ) ) # Is this the real error
error.append( new_error ) # Is this the real error
else:
pass
content_bin = unp.uarray( content, error)
mean_content_col = content_bin.sum() # mean value of each bin in the col
# Saving values in lists
mean_value_col_list.append( noms(mean_content_col))
mean_error_col_list.append( stds(mean_content_col) )
########################### Create errorbar plot #####################################
errorbar_plot_col = root.TGraphErrors( len(x_value), array( 'f', x_value- np.ones(len(x_value))), array( 'f', mean_value_col_list), array( 'f', x_error), array( 'f', mean_error_col_list) )
x_value -= np.ones(len(x_value))
############################## Set axis label and range of errobar plot ##################################
if plot_style == 'thesis':
errorbar_plot_col.GetXaxis().SetTitle("Spalte")
errorbar_plot_col.GetYaxis().SetTitle("Summe Hits / Vcal")
else:
errorbar_plot_col.GetXaxis().SetTitle("Col")
errorbar_plot_col.GetYaxis().SetTitle("Mean Hit / Vcal")
errorbar_plot_col.SetMinimum(0)
errorbar_plot_col.SetMaximum( max( mean_value_col_list) + 0.3 * max(mean_value_col_list) )
####################### create Canvas and FIT ##########################################
c1 = root.TCanvas("c1", "c1", 1980, 1080)
c1.SetGrid()
if name_of_folder == '7_mm':
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .2, max(mean_value_col_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value)) * 1.4 )
elif name_of_folder == '14_mm':
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .4, max(mean_value_col_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .8, np.mean(x_value) * 1.1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value))*1.1 )
else:
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .5, max(mean_value_col_list) * 1.8 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value)) * 1.2 )
errorbar_plot_col.Fit(personal_gaus, "", "", min(x_value) -0.5 , max( x_value) +0.5 )
#errorbar_plot_col.Fit("gaus", "", "", min(x_value) -0.5 , max( x_value) +0.5 )
errorbar_plot_col.Draw("ap*")
############################### Create legend ####################################
if plot_style == 'thesis':
legend = root.TLegend(0.15,0.71,0.37,0.93)
legend.SetTextSize(0.055)
legend.AddEntry(errorbar_plot_col,"Summe Hits","lep")
legend.AddEntry( personal_gaus,"Fit","l")
legend.Draw()
else:
legend = root.TLegend(0.65,0.47,0.98,0.7)
legend.SetTextSize(0.04)
legend.AddEntry(errorbar_plot_col,"Row sum hit value","lep")
legend.AddEntry( personal_gaus,"Gaussian Fit","l")
legend.Draw()
######## Transfer Sigma from Bin to mumeter ############################
sigma_mu_meter_col = ufloat(personal_gaus.GetParameter(2), personal_gaus.GetParError(2)) * 150 # 150 is pixel size in y direction
#############################################################################
############################### Save parameter and plot ###########################################
with open( f'./fit_params/{name_of_folder}_fit_parameters_col_xaxis.txt', 'w') as file:
for i in range(0,3):
file.write( name_params[i] + ' ' + str( personal_gaus.GetParameter(i) ) + ' ' + str(personal_gaus.GetParError(i)) + '\n')
with open( f'./fit_parameters_col_xaxis.txt', 'a') as file:
file.write( name_of_folder + 'Amplitude/Sigma/Mean:' + ' ' + str( personal_gaus.GetParameter(0) ) + ' ' + str(personal_gaus.GetParError(0)) + ' ' + str( personal_gaus.GetParameter(1) ) + ' ' + str(personal_gaus.GetParError(1)) + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_col_xaxis.txt', 'a') as file:
file.write( name_params[i] + '_' + name_of_folder + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_col_in_mumeter_xaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( noms(sigma_mu_meter_col) ) + ' ' + str( stds(sigma_mu_meter_col) ) + '\n')
c1.SaveAs(f'./plots/{name_of_folder}_erorbar_plot_col.pdf')
##############################################################################################################################
################################### Getting the mean hit value of all rows near the laserspot #############################
###############################################################################################################################
############################Reset lists###########################################
mean_value_row_list = []
mean_error_row_list = []
x_value = []
x_error = []
row_with_hits = []
#################################### calculating mean of each row #####################################
for i in range(ymin,ymax): # going thru all rows
content = []
error = []
x_value.append(i)
x_error.append(0.5)
for j in range(xmin,xmax): # going thru all col
if qMap_Ag_C0_V0.GetBinContent(j,i) != 0:
content.append( qMap_Ag_C0_V0.GetBinContent(j,i))
N = qMap_Ag_C0_V0.GetBinEntries( qMap_Ag_C0_V0.GetBin(j,i))
if N == 1:
new_error = np.sqrt( ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N )**2)
else:
new_error = np.sqrt( 1/(N-1) * ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
#error.append( 1/N * np.sqrt(qMap_Ag_C0_V0.GetBinContent(j,i) * N ) )
error.append( new_error)
else:
pass
content_bin = unp.uarray( content, error)
mean_content_row = content_bin.sum() # mean value of each bin in the col
# Saving values in lists
mean_value_row_list.append( noms(mean_content_row))
mean_error_row_list.append( stds(mean_content_row))
############################# Create new errorbar plot ####################################
errorbar_plot_rows = root.TGraphErrors( len(x_value), array( 'f', x_value - np.ones(len(x_value))), array( 'f', mean_value_row_list), array( 'f', x_error), array( 'f', mean_error_row_list) )
x_value -= np.ones(len(x_value))
errorbar_plot_rows.GetXaxis().SetNdivisions(20)
############################### create Canvas ########################################
c2 = root.TCanvas("c2", "c2", 1980, 1080);
c2.SetGrid()
############################## Set axis label of errobar plot ##################################
if plot_style == 'thesis':
errorbar_plot_rows.GetXaxis().SetTitle("Zeile")
errorbar_plot_rows.GetYaxis().SetTitle("Summe Hits / Vcal")
else:
errorbar_plot_rows.GetXaxis().SetTitle("Row")
errorbar_plot_rows.GetYaxis().SetTitle("Mean Hit / Vcal")
errorbar_plot_rows.SetMinimum(0)
if name_of_folder == '10-5_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.15 * max(mean_value_row_list) )
elif name_of_folder == '11_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.9 * max(mean_value_row_list) )
elif name_of_folder == '9_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.4 * max(mean_value_row_list) )
else:
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.3 * max(mean_value_row_list) )
############################### Plot fucntion and fit #############################################
if name_of_folder == '10-5_mm':
print(np.std(np.array(x_value)))
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .5, max(mean_value_row_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .9, np.mean(x_value) * 1.12)
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value))*0.6 )
elif name_of_folder == '11_mm':
#personal_gaus.SetParameter(1, 66 )
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .5, max(mean_value_row_list) * 1.8)
personal_gaus.SetParLimits(1, np.mean(x_value) * .9, np.mean(x_value) * 1.12 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .05, np.std(np.array(x_value))*0.8 )
elif name_of_folder == '7_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .2, max(mean_value_row_list)*1.2 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.3)
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) * 1.05 )
elif name_of_folder == '6_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .2, max(mean_value_row_list) * 1.31 )
personal_gaus.SetParLimits(1, np.mean(x_value) -3, np.mean(x_value)+3 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) *1.05 )
elif name_of_folder == '9-5_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) )
elif name_of_folder == '9_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '12_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '13_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '14_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std( | np.array(x_value) | numpy.array |
#
# Copyright (C) 2016-2019 by <NAME>, <NAME>, <NAME>, and contributors
#
# This file is part of Power Sequencer.
#
# Power Sequencer is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Power Sequencer is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with Power Sequencer. If
# not, see <https://www.gnu.org/licenses/>.
#
import numpy as np
def trfbank(fs, nfft, lowfreq, linsc, logsc, nlinfilt, nlogfilt):
"""Compute triangular filterbank for MFCC computation."""
# Total number of filters
nfilt = nlinfilt + nlogfilt
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
freqs = np.zeros(nfilt + 2)
freqs[:nlinfilt] = lowfreq + np.arange(nlinfilt) * linsc
freqs[nlinfilt:] = freqs[nlinfilt - 1] * logsc ** np.arange(1, nlogfilt + 3)
heights = 2.0 / (freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((nfilt, nfft))
# FFT bins (in Hz)
nfreqs = np.arange(nfft) / (1.0 * nfft) * fs
for i in range(nfilt):
low = freqs[i]
cen = freqs[i + 1]
hi = freqs[i + 2]
lid = np.arange(np.floor(low * nfft / fs) + 1, np.floor(cen * nfft / fs) + 1, dtype=np.int)
lslope = heights[i] / (cen - low)
rid = np.arange(np.floor(cen * nfft / fs) + 1, | np.floor(hi * nfft / fs) | numpy.floor |
from __future__ import division
from nose.tools import *
import numpy as np
import causalinference.causal as c
from utils import random_data
def test_est_propensity():
D = np.array([0, 0, 0, 1, 1, 1])
X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
Y = random_data(D_cur=D, X_cur=X)
causal = c.CausalModel(Y, D, X)
causal.est_propensity()
lin = [0, 1]
qua = []
coef = np.array([6.8066090, -0.0244874, -0.7524939])
loglike = -3.626517
fitted = np.array([0.6491366, 0.3117840, 0.2911631,
0.8086407, 0.3013733, 0.6379023])
se = np.array([8.5373779, 0.4595191, 0.8106499])
keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
assert_equal(causal.propensity['lin'], lin)
assert_equal(causal.propensity['qua'], qua)
assert np.allclose(causal.propensity['coef'], coef)
assert np.allclose(causal.propensity['loglike'], loglike)
assert np.allclose(causal.propensity['fitted'], fitted)
assert np.allclose(causal.propensity['se'], se)
assert_equal(set(causal.propensity.keys()), keys)
assert np.allclose(causal.raw_data['pscore'], fitted)
def test_est_propensity_s():
D = np.array([0, 0, 0, 1, 1, 1])
X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
Y = random_data(D_cur=D, X_cur=X)
causal = c.CausalModel(Y, D, X)
causal.est_propensity_s()
lin1 = [1]
qua1 = []
coef1 = np.array([6.5424027, -0.7392041])
loglike1 = -3.627939
fitted1 = np.array([0.6522105, 0.2995088, 0.2995088,
0.7970526, 0.2995088, 0.6522105])
se1 = np.array([6.8455179, 0.7641445])
keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
assert_equal(causal.propensity['lin'], lin1)
assert_equal(causal.propensity['qua'], qua1)
assert np.allclose(causal.propensity['coef'], coef1)
assert np.allclose(causal.propensity['loglike'], loglike1)
assert np.allclose(causal.propensity['fitted'], fitted1)
assert np.allclose(causal.propensity['se'], se1)
assert_equal(set(causal.propensity.keys()), keys)
assert np.allclose(causal.raw_data['pscore'], fitted1)
causal.est_propensity_s([0,1])
lin2 = [0, 1]
qua2 = []
coef2 = np.array([6.8066090, -0.0244874, -0.7524939])
loglike2 = -3.626517
fitted2 = np.array([0.6491366, 0.3117840, 0.2911631,
0.8086407, 0.3013733, 0.6379023])
se2 = np.array([8.5373779, 0.4595191, 0.8106499])
assert_equal(causal.propensity['lin'], lin2)
assert_equal(causal.propensity['qua'], qua2)
assert np.allclose(causal.propensity['coef'], coef2)
assert np.allclose(causal.propensity['loglike'], loglike2)
assert np.allclose(causal.propensity['fitted'], fitted2)
assert np.allclose(causal.propensity['se'], se2)
assert np.allclose(causal.raw_data['pscore'], fitted2)
def test_est_via_ols():
Y = np.array([52, 30, 5, 29, 12, 10, 44, 87])
D = np.array([0, 0, 0, 0, 1, 1, 1, 1])
X = np.array([[1, 42], [3, 32], [9, 7], [12, 86],
[5, 94], [4, 36], [2, 13], [6, 61]])
causal = c.CausalModel(Y, D, X)
adj1 = 0
causal.est_via_ols(adj1)
ate1 = 9.25
ate_se1 = 17.68253
keys1 = {'ate', 'ate_se'}
assert np.allclose(causal.estimates['ols']['ate'], ate1)
assert np.allclose(causal.estimates['ols']['ate_se'], ate_se1)
assert_equal(set(causal.estimates['ols'].keys()), keys1)
adj2 = 1
causal.est_via_ols(adj2)
ate2 = 3.654552
ate_se2 = 17.749993
keys2 = {'ate', 'ate_se'}
assert np.allclose(causal.estimates['ols']['ate'], ate2)
assert np.allclose(causal.estimates['ols']['ate_se'], ate_se2)
assert_equal(set(causal.estimates['ols'].keys()), keys2)
adj3 = 2
causal.est_via_ols(adj3)
ate3 = 30.59444
atc3 = 63.2095
att3 = -2.020611
ate_se3 = 19.91887865
atc_se3 = 29.92152
att_se3 = 11.8586
keys3 = {'ate', 'atc', 'att', 'ate_se', 'atc_se', 'att_se'}
assert np.allclose(causal.estimates['ols']['ate'], ate3)
assert np.allclose(causal.estimates['ols']['atc'], atc3)
assert np.allclose(causal.estimates['ols']['att'], att3)
assert np.allclose(causal.estimates['ols']['ate_se'], ate_se3)
assert np.allclose(causal.estimates['ols']['atc_se'], atc_se3)
assert np.allclose(causal.estimates['ols']['att_se'], att_se3)
assert_equal(set(causal.estimates['ols'].keys()), keys3)
def test_parse_lin_terms():
K1 = 4
lin1 = None
ans1 = []
assert_equal(c.parse_lin_terms(K1, lin1), ans1)
K2 = 2
lin2 = 'all'
ans2 = [0, 1]
assert_equal(c.parse_lin_terms(K2, lin2), ans2)
K3 = 2
lin3 = [1]
ans3 = [1]
assert_equal(c.parse_lin_terms(K3, lin3), ans3)
K4 = 2
lin4 = []
ans4 = []
assert_equal(c.parse_lin_terms(K4, lin4), ans4)
def test_parse_qua_terms():
K1 = 3
qua1 = None
ans1 = []
assert_equal(c.parse_qua_terms(K1, qua1), ans1)
K2 = 2
qua2 = 'all'
ans2 = [(0, 0), (0, 1), (1, 1)]
assert_equal(c.parse_qua_terms(K2, qua2), ans2)
K3 = 2
qua3 = [(0, 1)]
ans3 = [(0, 1)]
assert_equal(c.parse_qua_terms(K3, qua3), ans3)
K4 = 2
qua4 = []
ans4 = []
assert_equal(c.parse_qua_terms(K4, qua4), ans4)
def test_split_equal_bins():
pscore = np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.95])
blocks = 5
ans = [0, 0.2, 0.4, 0.6, 0.8, 1]
assert_equal(c.split_equal_bins(pscore, blocks), ans)
def test_sumlessthan():
g1 = np.array([3, 1, 2, 4, 3, 3])
sg1 = np.array([1, 2, 3, 3, 3, 4])
cs11 = np.array([1, 2, 3, 4, 5, 6])
csg1 = np.array([1, 3, 6, 9, 12, 16])
ans1 = np.array([5, 1, 2, 6, 5, 5])
ans2 = np.array([12, 1, 3, 16, 12, 12])
assert np.array_equal(c.sumlessthan(g1, sg1, cs11), ans1)
assert np.array_equal(c.sumlessthan(g1, sg1, csg1), ans2)
g2 = np.array([22, 4, 6, 4, 25, 5])
sg2 = np.array([4, 4, 5, 6, 22, 25])
cs12 = np.array([1, 2, 3, 4, 5, 6])
csg2 = | np.array([4, 8, 13, 19, 41, 66]) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.units import Quantity, Unit
from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap
from gammapy.utils.testing import mpl_plot_check, requires_dependency
pytest.importorskip("healpy")
map_axes = [
MapAxis.from_bounds(1.0, 10.0, 3, interp="log", name="energy"),
MapAxis.from_bounds(0.1, 1.0, 4, interp="log", name="time"),
]
mapbase_args = [
(0.1, 10.0, "wcs", SkyCoord(0.0, 30.0, unit="deg"), None, ""),
(0.1, 10.0, "wcs", SkyCoord(0.0, 30.0, unit="deg"), map_axes[:1], ""),
(0.1, 10.0, "wcs", SkyCoord(0.0, 30.0, unit="deg"), map_axes, "m^2"),
(0.1, 10.0, "hpx", SkyCoord(0.0, 30.0, unit="deg"), None, ""),
(0.1, 10.0, "hpx", SkyCoord(0.0, 30.0, unit="deg"), map_axes[:1], ""),
(0.1, 10.0, "hpx", SkyCoord(0.0, 30.0, unit="deg"), map_axes, "s^2"),
]
mapbase_args_with_axes = [_ for _ in mapbase_args if _[4] is not None]
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args
)
def test_map_create(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
assert m.unit == unit
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_copy(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
m_copy = m.copy()
assert repr(m) == repr(m_copy)
m_copy = m.copy(unit="cm-2 s-1")
assert m_copy.unit == "cm-2 s-1"
assert m_copy.unit is not m.unit
m_copy = m.copy(meta={"is_copy": True})
assert m_copy.meta["is_copy"]
assert m_copy.meta is not m.meta
m_copy = m.copy(data=42 * np.ones(m.data.shape))
assert m_copy.data[(0,) * m_copy.data.ndim] == 42
assert m_copy.data is not m.data
def test_map_from_geom():
geom = WcsGeom.create(binsz=1.0, width=10.0)
m = Map.from_geom(geom)
assert isinstance(m, WcsNDMap)
assert m.geom.is_image
geom = HpxGeom.create(binsz=1.0, width=10.0)
m = Map.from_geom(geom)
assert isinstance(m, HpxNDMap)
assert m.geom.is_image
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_get_image_by_coord(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
m.data = np.arange(m.data.size, dtype=float).reshape(m.data.shape)
coords = (3.456, 0.1234)[: len(m.geom.axes)]
m_image = m.get_image_by_coord(coords)
im_geom = m.geom.to_image()
skycoord = im_geom.get_coord().skycoord
m_vals = m.get_by_coord((skycoord,) + coords)
assert_equal(m_image.data, m_vals)
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_get_image_by_pix(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
pix = (1.2345, 0.1234)[: len(m.geom.axes)]
m_image = m.get_image_by_pix(pix)
im_geom = m.geom.to_image()
idx = im_geom.get_idx()
m_vals = m.get_by_pix(idx + pix)
assert_equal(m_image.data, m_vals)
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_slice_by_idx(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
data = np.arange(m.data.size, dtype=float)
m.data = data.reshape(m.data.shape)
# Test none slicing
sliced = m.slice_by_idx({})
assert_equal(m.geom.shape_axes, sliced.geom.shape_axes)
slices = {"energy": slice(0, 1), "time": slice(0, 2)}
sliced = m.slice_by_idx(slices)
assert not sliced.geom.is_image
slices = tuple([slices[ax.name] for ax in m.geom.axes])
assert_equal(m.data[slices[::-1]], sliced.data)
assert sliced.data.base is data
slices = {"energy": 0, "time": 1}
sliced = m.slice_by_idx(slices)
assert sliced.geom.is_image
slices = tuple([slices[ax.name] for ax in m.geom.axes])
assert_equal(m.data[slices[::-1]], sliced.data)
assert sliced.data.base is data
@pytest.mark.parametrize("map_type", ["wcs", "hpx"])
def test_map_meta_read_write(map_type):
meta = {"user": "test"}
m = Map.create(
binsz=0.1,
width=10.0,
map_type=map_type,
skydir=SkyCoord(0.0, 30.0, unit="deg"),
meta=meta,
)
hdulist = m.to_hdulist(hdu="COUNTS")
header = hdulist["COUNTS"].header
assert header["META"] == '{"user": "test"}'
m2 = Map.from_hdulist(hdulist)
assert m2.meta == meta
@pytest.mark.parametrize("map_type", ["wcs", "hpx"])
def test_map_time_axis_read_write(map_type):
time_axis = TimeMapAxis(
edges_min=[0, 2, 4] * u.d,
edges_max=[1, 3, 5] * u.d,
reference_time="2000-01-01",
)
energy_axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=5)
m = Map.create(
binsz=0.1,
width=10.0,
map_type=map_type,
skydir=SkyCoord(0.0, 30.0, unit="deg"),
axes=[energy_axis, time_axis],
)
hdulist = m.to_hdulist(hdu="COUNTS")
m2 = Map.from_hdulist(hdulist)
time_axis_new = m2.geom.axes["time"]
assert time_axis_new == time_axis
assert time_axis.reference_time.scale == "utc"
assert time_axis_new.reference_time.scale == "tt"
unit_args = [("wcs", "s"), ("wcs", ""), ("wcs", Unit("sr")), ("hpx", "m^2")]
@pytest.mark.parametrize(("map_type", "unit"), unit_args)
def test_map_quantity(map_type, unit):
m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
# This is to test if default constructor with no unit performs as expected
if unit is None:
unit = ""
assert m.quantity.unit == Unit(unit)
m.quantity = Quantity(np.ones_like(m.data), "m2")
assert m.unit == "m2"
m1 = m.__class__(geom=m.geom, data=m.quantity)
assert m1.unit == "m2"
assert_allclose(m1.data, m.data)
@pytest.mark.parametrize(("map_type", "unit"), unit_args)
def test_map_unit_read_write(map_type, unit):
m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
hdu_list = m.to_hdulist(hdu="COUNTS")
header = hdu_list["COUNTS"].header
assert Unit(header["BUNIT"]) == Unit(unit)
m2 = Map.from_hdulist(hdu_list)
assert m2.unit == unit
@pytest.mark.parametrize(("map_type", "unit"), unit_args)
def test_map_repr(map_type, unit):
m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
assert m.__class__.__name__ in repr(m)
def test_map_properties():
# Test default values and types of all map properties,
# as well as the behaviour for the property get and set.
m = Map.create(npix=(2, 1))
assert isinstance(m.unit, u.CompositeUnit)
assert m._unit == u.one
m._unit = u.Unit("cm-2 s-1")
assert m.unit.to_string() == "1 / (cm2 s)"
assert isinstance(m.meta, dict)
m.meta = {"spam": 42}
assert isinstance(m.meta, dict)
# The rest of the tests are for the `data` property
assert isinstance(m.data, np.ndarray)
assert m.data.dtype == np.float32
assert m.data.shape == (1, 2)
assert_equal(m.data, 0)
# Assigning an array of matching shape stores it away
data = np.ones((1, 2))
m.data = data
assert m.data is data
# In-place modification += should work as expected
m.data = np.array([[42, 43]])
data = m.data
m.data += 1
assert m.data is data
assert_equal(m.data, [[43, 44]])
# Assigning to a slice of the map data should work as expected
data = m.data
m.data[:, :1] = 99
assert m.data is data
| assert_equal(m.data, [[99, 44]]) | numpy.testing.assert_equal |
from sklearn.kernel_ridge import KernelRidge
import numpy as np
import matplotlib.pyplot as plt
import pickle
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 160
mpl.rc('text', usetex=True)
plt.rcParams.update({'font.size': 7})
A = []
f = []
c_d = []
c_l = []
c_d_max = []
c_d_min = []
c_l_max = []
c_l_min = []
with open(r"data_py.config", "rb") as file:
data = pickle.load(file)
tml = (np.abs(list(data.values())[0][0] - 4.0)).argmin()
for key in data.keys():
first, sec = key.split("-")
A.append(float(first[1:]))
f.append(float(sec[1:]))
c_d.append(np.mean(data[key][1][tml:]))
c_l.append(np.mean(data[key][2][tml:]))
c_d_max.append(np.max(data[key][1][tml:]))
c_d_min.append(np.min(data[key][1][tml:]))
c_l_max.append(np.max(data[key][2][tml:]))
c_l_min.append(np.min(data[key][2][tml:]))
A = np.asarray(A)
f = np.asarray(f)
c_d = np.asarray(c_d)
c_l = np.asarray(c_l)
c_d_max = np.asarray(c_d_max)
c_d_min = np.asarray(c_d_min)
c_l_max = np.asarray(c_l_max)
c_l_min = np.asarray(c_l_min)
# uncontrolled state
cd_uc = [3.14741, 3.17212, 3.19653] # [cd_min, cd_mean, cd_max]
cl_uc = [-0.904919, -0.0126599, 0.878955] # [cl_min, cl_mean, cl_max]
uc_val = ((cd_uc[1]**2+cl_uc[1]**2)**.5) + (((cd_uc[2]-cd_uc[0])**2+(cl_uc[2]-cl_uc[0])**2)**.5)
def plot_data(arg1, arg2, arg3, lim, nointep):
fig, (ax1) = plt.subplots(1, 1, figsize=(7, 7))
levels = np.linspace(lim[0], lim[1], 200)
levels_line = np.linspace(lim[0], lim[1], 30)
if nointep:
cntr2 = ax1.contourf(arg1, arg2, arg3, levels=levels, cmap="jet")
ax1.contour(arg1, arg2, arg3, levels=levels_line, linewidths=1)
else:
cntr2 = ax1.tricontourf(arg1, arg2, arg3, levels=levels, cmap="jet")
ax1.tricontour(arg1, arg2, arg3, levels=levels_line, linewidths=1)
ax1.scatter(arg1, arg2, s=1, color='k')
ax1.set_ylabel(r"$S_f \times 10$", fontsize=12)
ax1.set_xlabel(r"$\Omega$", fontsize=12)
ax1.tick_params(labelsize=12)
ax1.set_ylim(4.8,14)
cbar=fig.colorbar(cntr2, ax=ax1)
cbar.ax.tick_params(labelsize=12)
cbar.ax.set_ylabel(r"$\Phi$", fontsize=14)
plt.subplots_adjust(hspace=0.5)
w = [1, 1]
fn_1 = (w[0]*((c_d**2+c_l**2)**.5)+w[1]*(((c_d_max-c_d_min)**2+(c_l_max-c_l_min)**2)**.5)) / uc_val
X = np.asarray((A, f)).transpose()
# krr
krr = KernelRidge(alpha=1e-7, kernel='rbf', gamma=1.5).fit(X, fn_1)
loss = np.sum((krr.predict(X) - fn_1) ** 2)
# uniform data setting for prediction
x = np.arange(0.1, 3.5, 0.1)
y = np.arange(3, 15.1, 0.3)
xx, yy = | np.meshgrid(x, y) | numpy.meshgrid |
#####################################################################################################################
# more_nodes: This module implements several new nodes and helper functions. It is part of the Cuicuilco framework. #
# #
# These nodes include: BasicAdaptiveCutoffNode, SFA_GaussianClassifier, RandomizedMaskNode, GeneralExpansionNode, #
# PointwiseFunctionNode, RandomPermutationNode #
# #
# By <NAME>. <EMAIL> #
# Ruhr-University-Bochum, Institute for Neural Computation, Group of Prof. Dr. Wiskott #
#####################################################################################################################
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy
import scipy
import scipy.optimize
import scipy.stats
from scipy.stats import ortho_group
import copy
import sys
import inspect
import mdp
from mdp.utils import (mult, pinv, symeig, CovarianceMatrix, SymeigException)
from . import sfa_libs
from .sfa_libs import select_rows_from_matrix, distance_squared_Euclidean
# from . import inversion
from .histogram_equalization import *
def add_corrections(initial_corrections, added_corrections):
if initial_corrections is None:
return added_corrections
elif added_corrections is None:
return initial_corrections
else:
return initial_corrections * added_corrections
def combine_correction_factors(flow_or_node, average_over_layers = True, average_inside_layers=False):
"""This function takes into account all corrections performed by the BasicAdaptiveCutoffNodes of
a flow (possibly a hierarchical network) and combines them into a single vector. The function also
works on standard nodes.
average_over_layers: if True, the combined corrections are the average of the corrections of each
node in the flow, otherwise they are multiplied (omitting nodes without corrections)
average_inside_layers: if True, the combined corrections of Layers are computed as the average of
the corrections of each node in the layer, otherwise they are multiplied
The combined correction factor of each sample estimates the probability that it is not an anomaly. That is,
correction=1.0 implies "not anomaly", and smaller values increase the rareness of the sample.
"""
final_corrections = None
final_gauss_corrections = None
if isinstance(flow_or_node, mdp.Flow):
flow = flow_or_node
if average_over_layers:
corrections = []
gauss_corrections = []
for node in flow:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(node, average_over_layers)
if another_node_corrections is not None:
corrections.append(another_node_corrections)
if another_node_gauss_corrections is not None:
gauss_corrections.append(another_node_gauss_corrections)
if len(corrections) > 0:
corrections = numpy.stack(corrections, axis=1)
final_corrections = corrections.mean(axis=1)
gauss_corrections = numpy.stack(gauss_corrections, axis=1)
final_gauss_corrections = gauss_corrections.mean(axis=1)
else:
final_corrections = None
final_gauss_corrections = None
else:
for node in flow:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(node)
final_corrections = add_corrections(final_corrections, another_node_corrections)
final_gauss_corrections = add_corrections(final_gauss_corrections, another_node_gauss_corrections)
elif isinstance(flow_or_node, mdp.Node):
node = flow_or_node
if isinstance(node, mdp.hinet.CloneLayer):
err = "CloneLayers not yet supported when computing/storing correction factors"
print(err)
final_corrections = None
final_gauss_corrections = None
# raise Exception(err)
elif isinstance(node, mdp.hinet.Layer):
if average_inside_layers:
corrections = []
gauss_corrections = []
for another_node in node.nodes:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(another_node)
corrections.append(another_node_corrections)
gauss_corrections.append(another_node_gauss_corrections)
if len(corrections) > 0:
corrections = numpy.stack(corrections, axis=1)
final_corrections = corrections.mean(axis=1)
gauss_corrections = numpy.stack(gauss_corrections, axis=1)
final_gauss_corrections = gauss_corrections.mean(axis=1)
else:
final_corrections = None
final_gauss_corrections = None
else:
for another_node in node.nodes:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(another_node)
final_corrections = add_corrections(final_corrections, another_node_corrections)
final_gauss_corrections = add_corrections(final_gauss_corrections, another_node_gauss_corrections)
elif isinstance(node, BasicAdaptiveCutoffNode):
final_corrections = add_corrections(final_corrections, node.corrections)
final_gauss_corrections = add_corrections(final_gauss_corrections, node.gauss_corrections)
return final_corrections, final_gauss_corrections
class BasicAdaptiveCutoffNode(mdp.PreserveDimNode):
"""Node that allows to "cut off" values at bounds derived from the training data.
This node is similar to CutoffNode, but the bounds are computed based on the training data. And it is
also similar to AdaptiveCutoffNode, but no histograms are stored and the limits are hard.
This node does not have any have no effect on training data but it corrects atypical variances in test data
and may improve generalization.
"""
def __init__(self, input_dim=None, output_dim=None, num_rotations=1, measure_corrections=False,
only_measure=False, verbose=True, dtype=None):
"""Initialize node. """
super(BasicAdaptiveCutoffNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype)
self.lower_bounds = None
self.upper_bounds = None
self.rotation_matrices = None
self.num_rotations = num_rotations
self.measure_corrections = measure_corrections
self.corrections = None
self.gauss_corrections = None
self.only_measure = only_measure
self.verbose = verbose
self._avg_x = None
self._avg_x_squared = None
self._num_samples = 0
self._std_x = None
if self.verbose:
print("num_rotations:", num_rotations, "measure_corrections:", measure_corrections,
"only_measure:", only_measure, "verbose:", verbose)
@staticmethod
def is_trainable():
return True
@staticmethod
def is_invertible():
return True
@staticmethod
def _get_supported_dtypes():
return (mdp.utils.get_dtypes('Float'))
def _train(self, x):
# initialize rotations and arrays that store the bounds
dim = x.shape[1]
if self.rotation_matrices is None:
self.rotation_matrices = [None] * self.num_rotations
self.lower_bounds = [None] * self.num_rotations
self.upper_bounds = [None] * self.num_rotations
if self.num_rotations >= 1:
self.rotation_matrices[0] = numpy.eye(dim)
for i in range(1, self.num_rotations):
self.rotation_matrices[i] = ortho_group.rvs(dim=dim)
# The training method updates the lower and upper bounds
for i in range(self.num_rotations):
rotated_data = numpy.dot(x, self.rotation_matrices[i])
if self.lower_bounds[i] is None:
self.lower_bounds[i] = rotated_data.min(axis=0)
else:
self.lower_bounds[i] = numpy.minimum(self.lower_bounds[i], rotated_data.min(axis=0))
if self.upper_bounds[i] is None:
self.upper_bounds[i] = rotated_data.max(axis=0)
else:
self.upper_bounds[i] = numpy.maximum(self.upper_bounds[i], rotated_data.max(axis=0))
if self._avg_x is None:
self._avg_x = x.sum(axis=0)
self._avg_x_squared = (x**2).sum(axis=0)
else:
self._avg_x += x.sum(axis=0)
self._avg_x_squared += (x ** 2).sum(axis=0)
self._num_samples += x.shape[0]
def _stop_training(self):
self._avg_x /= self._num_samples
self._avg_x_squared /= self._num_samples
self._std_x = (self._avg_x_squared - self._avg_x **2) ** 0.5
if self.verbose:
print("self._avg_x", self._avg_x)
print("self._avg_x_squared", self._avg_x_squared)
print("self._std_x", self._std_x)
def _execute(self, x):
"""Return the clipped data."""
num_samples = x.shape[0]
self.corrections = numpy.ones(num_samples)
self.gauss_corrections = numpy.ones(num_samples)
if self.only_measure:
x_copy = x.copy()
for i in range(self.num_rotations):
data_rotated = numpy.dot(x, self.rotation_matrices[i])
data_rotated_clipped = numpy.clip(data_rotated, self.lower_bounds[i], self.upper_bounds[i])
if self.measure_corrections:
interval = numpy.abs(self.upper_bounds[i] - self.lower_bounds[i])
delta = numpy.abs(data_rotated_clipped - data_rotated)
# factors = interval ** 2 / (delta + interval) ** 2
norm_delta = delta / interval
factors = 1.0 - (norm_delta / (norm_delta + 0.15)) ** 2
self.corrections *= factors.prod(axis=1) # consider using here and below the mean instead of the product
if self.verbose:
print("Factors of BasicAdaptiveCutoffNode:", factors)
# Computation of Gaussian probabilities
factors = scipy.stats.norm.pdf(x, loc=self._avg_x, scale=4*self._std_x)
if self.verbose:
print("Factors of BasicAdaptiveCutoffNode (gauss):", factors)
print("x.mean(axis=0):", x.mean(axis=0))
print("x.std(axis=0):", x.std(axis=0))
self.gauss_corrections *= factors.prod(axis=1)
x = numpy.dot(data_rotated_clipped, self.rotation_matrices[i].T) # Project back to original coordinates
if self.verbose:
print("Corrections of BasicAdaptiveCutoffNode:", self.corrections)
print("20 worst final corrections at indices:", numpy.argsort(self.corrections)[0:20])
print("20 worst final corrections:", self.corrections[numpy.argsort(self.corrections)[0:20]])
print("Gaussian corrections of BasicAdaptiveCutoffNode:", self.gauss_corrections)
print("20 worst final Gaussian corrections at indices:", numpy.argsort(self.gauss_corrections)[0:20])
print("20 worst final Gaussian corrections:",
self.corrections[numpy.argsort(self.gauss_corrections)[0:20]])
if self.only_measure:
return x_copy
else:
return x
def _inverse(self, x):
"""An approximate inverse applies the same clipping. """
return self.execute(x)
class SFA_GaussianClassifier(mdp.ClassifierNode):
""" This node is a simple extension of the GaussianClassifier node, where SFA is applied before the classifier.
The labels are important, since they are used to order the data samples before SFA.
"""
def __init__(self, reduced_dim=None, verbose=False, **argv):
super(SFA_GaussianClassifier, self).__init__(**argv)
self.gc_node = mdp.nodes.GaussianClassifier()
self.reduced_dim = reduced_dim
if self.reduced_dim > 0:
self.sfa_node = mdp.nodes.SFANode(output_dim=self.reduced_dim)
else:
self.sfa_node = mdp.nodes.IdentityNode()
self.verbose = verbose
def _train(self, x, labels=None):
if self.reduced_dim > 0:
ordering = numpy.argsort(labels)
x_ordered = x[ordering, :]
self.sfa_node.train(x_ordered)
self.sfa_node.stop_training()
if self.verbose:
print("SFA_GaussianClassifier: sfa_node.d = ", self.sfa_node.d)
else: # sfa_node is the identity node
pass
y = self.sfa_node.execute(x)
self.gc_node.train(y, labels=labels)
self.gc_node.stop_training()
def _label(self, x):
y = self.sfa_node.execute(x)
return self.gc_node.label(y)
def regression(self, x, avg_labels, estimate_std=False):
y = self.sfa_node.execute(x)
return self.gc_node.regression(y, avg_labels, estimate_std)
def regressionMAE(self, x, avg_labels):
y = self.sfa_node.execute(x)
return self.gc_node.regressionMAE(y, avg_labels)
def softCR(self, x, true_classes):
y = self.sfa_node.execute(x)
return self.gc_node.softCR(y, true_classes)
def class_probabilities(self, x):
y = self.sfa_node.execute(x)
return self.gc_node.class_probabilities(y)
@staticmethod
def is_trainable():
return True
# using the provided average and standard deviation
def gauss_noise(x, avg, std):
return numpy.random.normal(avg, std, x.shape)
# Zero centered
def additive_gauss_noise(x, std):
return x + numpy.random.normal(0, std, x.shape)
class RandomizedMaskNode(mdp.Node):
"""Selectively mask some components of a random variable by
hiding them with arbitrary noise or by removing them from the feature vector.
This code has been inspired by NoiseNode
"""
def __init__(self, remove_mask=None, noise_amount_mask=None, noise_func=gauss_noise, noise_args=(0, 1),
noise_mix_func=None, input_dim=None, dtype=None):
self.remove_mask = remove_mask
self.noise_amount_mask = noise_amount_mask
self.noise_func = noise_func
self.noise_args = noise_args
self.noise_mix_func = noise_mix_func
self.seen_samples = 0
self.x_avg = None
self.x_std = None
self.type = dtype
if remove_mask is not None and input_dim is None:
input_dim = remove_mask.size
elif remove_mask is None and input_dim is not None:
remove_mask = numpy.zeros(input_dim) > 0.5
elif remove_mask and input_dim is not None:
if remove_mask.size != input_dim:
err = "size of remove_mask and input_dim not compatible"
raise Exception(err)
else:
err = "At least one of input_dim or remove_mask should be specified"
raise Exception(err)
if noise_amount_mask is None:
print ("Signal will be only the computed noise")
self.noise_amount_mask = numpy.ones(input_dim)
else:
self.noise_amount_mask = noise_amount_mask
output_dim = remove_mask.size - remove_mask.sum()
print ("Output_dim should be:", output_dim)
super(RandomizedMaskNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype)
@staticmethod
def is_trainable():
return True
def _train(self, x):
if self.x_avg is None:
self.x_avg = numpy.zeros(self.input_dim, dtype=self.type)
self.x_std = numpy.zeros(self.input_dim, dtype=self.type)
new_samples = x.shape[0]
self.x_avg = (self.x_avg * self.seen_samples + x.sum(axis=0)) / (self.seen_samples + new_samples)
self.x_std = (self.x_std * self.seen_samples + x.std(axis=0) * new_samples) / (self.seen_samples + new_samples)
self.seen_samples = self.seen_samples + new_samples
@staticmethod
def is_invertible():
return False
def _execute(self, x):
print ("computed X_avg=", self.x_avg)
print ("computed X_std=", self.x_std)
noise_mat = self.noise_func(x, self.x_avg, self.x_std)
# noise_mat = self._refcast(self.noise_func(*self.noise_args,
# **{'size': x.shape}))
print ("Noise_amount_mask:", self.noise_amount_mask)
print ("Noise_mat:", noise_mat)
noisy_signal = (1.0 - self.noise_amount_mask) * x + self.noise_amount_mask * noise_mat
preserve_mask = (self.remove_mask == False)
return noisy_signal[:, preserve_mask]
class GeneralExpansionNode(mdp.Node):
def __init__(self, funcs, input_dim=None, dtype=None, \
use_pseudoinverse=True, use_hint=False, output_dim=None, starting_point=None, use_special_features=False, max_steady_factor=1.5,
delta_factor=0.6, min_delta=0.00001, verbose=False):
self.funcs = funcs
self.exp_output_dim = output_dim
self.expanded_dims = None
self.starting_point = starting_point
self.use_special_features = use_special_features
if self.funcs == "RandomSigmoids" and self.exp_output_dim <= 0:
er = "output_dim in GeneralExpansion node with RandomSigmoids should be at least 1, but is" + \
str(self.exp_output_dim)
raise Exception(er)
self.use_pseudoinverse = use_pseudoinverse
self.use_hint = use_hint
self.max_steady_factor = max_steady_factor
self.delta_factor = delta_factor
self.min_delta = min_delta
self.verbose = verbose
if self.verbose:
print("GeneralExpansionNode with expansion functions:", funcs)
self.rs_coefficients = None
self.rs_offsets = None
self.rs_data_training_std = None
self.rs_data_training_mean = None
self.normalization_constant = None
super(GeneralExpansionNode, self).__init__(input_dim, dtype)
def expanded_dim(self, n):
exp_dim = 0
x = numpy.zeros((1, n))
for func in self.funcs:
outx = func(x)
# print "outx= ", outx
exp_dim += outx.shape[1]
return exp_dim
def output_sizes(self, n):
if self.funcs == "RandomSigmoids":
sizes = [self.exp_output_dim]
else:
sizes = numpy.zeros(len(self.funcs), dtype=int)
x = numpy.zeros((1, n))
for i, func in enumerate(self.funcs):
outx = func(x)
sizes[i] = outx.shape[1]
print ("S", end="")
return sizes
def is_trainable(self):
if self.funcs == "RandomSigmoids":
return True
else:
return False
def _train(self, x, verbose=None):
if verbose is None:
verbose = self.verbose
if self.input_dim is None:
self.set_input_dim(x.shape[1])
input_dim = self.input_dim
# Generate functions used for regression
self.rs_data_training_mean = x.mean(axis=0)
self.rs_data_training_std = x.std(axis=0)
if verbose:
print ("GeneralExpansionNode: output_dim=", self.output_dim, end="")
starting_point = self.starting_point
c1, l1 = generate_random_sigmoid_weights(self.input_dim, self.output_dim)
if starting_point == "Identity":
if verbose:
print ("starting_point: adding (encoded) identity coefficients to expansion")
c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim)
l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity
elif starting_point == "Sigmoids":
if verbose:
print ("starting_point: adding sigmoid of coefficients to expansion")
c1[0:input_dim, 0:input_dim] = 4.0 * numpy.identity(input_dim)
l1[0:input_dim] = numpy.ones(input_dim) * 0.0
elif starting_point == "08Exp":
if verbose:
print ("starting_point: adding (encoded) 08Exp coefficients to expansion")
c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim)
c1[0:input_dim, input_dim:2 * input_dim] = numpy.identity(input_dim)
l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity
l1[input_dim:2 * input_dim] = numpy.ones(input_dim) * 0.8 # Code abs(x)**0.8
elif starting_point == "Pseudo-Identity":
if verbose:
print ("starting_point: adding pseudo-identity coefficients to expansion")
c1[0:input_dim, 0:input_dim] = 0.1 * numpy.identity(input_dim)
l1[0:input_dim] = numpy.zeros(input_dim) # nothig is encoded
elif starting_point is None:
if verbose:
print ("starting_point: no starting point")
else:
er = "Unknown starting_point", starting_point
raise Exception(er)
self.rs_coefficients = c1
self.rs_offsets = l1
# 4.0 was working fine, 2.0 was apparently better. This also depends on how many features are computed!!!
self.normalization_constant = (2.0 / self.input_dim) ** 0.5
def is_invertible(self):
return self.use_pseudoinverse
def inverse(self, x, use_hint=None, max_steady_factor=None, delta_factor=None, min_delta=None):
if self.use_pseudoinverse is False:
ex = "Inversion not activated"
raise Exception(ex)
if use_hint is None:
use_hint = self.use_hint
if max_steady_factor is None:
max_steady_factor = self.max_steady_factor
if delta_factor is None:
delta_factor = self.delta_factor
if min_delta is None:
min_delta = self.min_delta
# print "Noisy pre = ", x, "****************************************************"
app_x_2, app_ex_x_2 = invert_exp_funcs2(x, self.input_dim, self.funcs, use_hint=use_hint,
max_steady_factor=max_steady_factor, delta_factor=delta_factor,
min_delta=min_delta)
# print "Noisy post = ", x, "****************************************************"
return app_x_2
def _set_input_dim(self, n):
self._input_dim = n
if self.funcs == "RandomSigmoids":
self._output_dim = self.exp_output_dim
else:
self._output_dim = self.expanded_dim(n)
self.expanded_dims = self.output_sizes(n)
def _execute(self, x):
if self.input_dim is None:
self.set_input_dim(x.shape[1])
if "expanded_dims" not in self.__dict__:
self.expanded_dims = self.output_sizes(self.input_dim)
if self.funcs != "RandomSigmoids":
num_samples = x.shape[0]
# output_dim = expanded_dim(self.input_dim)
# self.expanded_dims = self.output_sizes(self.input_dim)
out = numpy.zeros((num_samples, self.output_dim))
current_pos = 0
for i, func in enumerate(self.funcs):
out[:, current_pos:current_pos + self.expanded_dims[i]] = func(x)
current_pos += self.expanded_dims[i]
else:
data_norm = self.normalization_constant * (x - self.rs_data_training_mean) / self.rs_data_training_std
# A variation of He random weight initialization
out = extract_sigmoid_features(data_norm, self.rs_coefficients, self.rs_offsets, scale=1.0, offset=0.0,
use_special_features=self.use_special_features)
return out
class PointwiseFunctionNode(mdp.Node):
""""This node applies a function to the whole input.
It also supports a given 'inverse' function.
"""
def __init__(self, func, inv_func, input_dim=None, dtype=None):
self.func = func
self.inv_func = inv_func
super(PointwiseFunctionNode, self).__init__(input_dim, dtype)
@staticmethod
def is_trainable():
return False
def is_invertible(self):
if self.inv_func is None:
return True
else:
return False
def inverse(self, x):
if self.inv_func:
return self.inv_func(x)
else:
return x
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = n
def _execute(self, x):
if self.input_dim is None:
self.set_input_dim(x.shape[1])
if self.func:
return self.func(x)
else:
return x
class PairwiseAbsoluteExpansionNode(mdp.Node):
def expanded_dim(self, n):
return n + n * (n + 1) // 2
def is_trainable(self):
return False
def is_invertible(self):
return False
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = self.expanded_dim(n)
def _execute(self, x):
out = numpy.concatenate((x, pairwise_expansion(x, abs_sum)), axis=1)
return out
# TODO:ADD inverse type sum, suitable for when output_scaling is True
class PInvSwitchboard(mdp.hinet.Switchboard):
"""This node is a variation of the RectangularSwitchboard that facilitates (approximate) inverse operations. """
def __init__(self, input_dim, connections, slow_inv=False, type_inverse="average", output_scaling=True,
additive_noise_std=0.00004, verbose=False):
super(PInvSwitchboard, self).__init__(input_dim=input_dim, connections=connections)
self.pinv = None
self.mat2 = None
self.slow_inv = slow_inv
self.type_inverse = type_inverse
self.output_dim = len(connections)
self.output_scales = None
self.additive_noise_std = additive_noise_std
self.verbose = verbose
if verbose:
print ("self.inverse_connections=", self.inverse_connections, "self.slow_inv=", self.slow_inv)
# WARNING! IF/ELIF doesn't make any sense! what are the semantics of inverse_connections
if self.inverse_connections is None:
if verbose:
print ("type(connections)", type(connections))
all_outputs = numpy.arange(self.output_dim)
self.inverse_indices = [[]] * self.input_dim
for i in range(self.input_dim):
self.inverse_indices[i] = all_outputs[connections == i]
# print "inverse_indices[%d]="%i, self.inverse_indices[i]
# print "inverse_indices =", self.inverse_indices
elif self.inverse_connections is None and not self.slow_inv:
index_array = numpy.argsort(connections)
value_array = connections[index_array]
value_range = numpy.zeros((input_dim, 2))
self.inverse_indices = range(input_dim)
for i in range(input_dim):
value_range[i] = numpy.searchsorted(value_array, [i - 0.5, i + 0.5])
if value_range[i][1] == value_range[i][0]:
self.inverse_indices[i] = []
else:
self.inverse_indices[i] = index_array[value_range[i][0]: value_range[i][1]]
if verbose:
print ("inverse_indices computed in PINVSB")
elif self.inverse_connections is None and self.slow_inv:
if verbose:
print ("warning using slow inversion in PInvSwitchboard!!!")
# find input variables not used by connections:
used_inputs = numpy.unique(connections)
used_inputs_set = set(used_inputs)
all_inputs_set = set(range(input_dim))
unused_inputs_set = all_inputs_set - all_inputs_set.intersection(used_inputs_set)
unused_inputs = list(unused_inputs_set)
self.num_unused_inputs = len(unused_inputs)
# extend connections array
# ext_connections = numpy.concatenate((connections, unused_inputs))
# create connections matrix
mat_height = len(connections) + len(unused_inputs)
mat_width = input_dim
mat = numpy.zeros((mat_height, mat_width))
# fill connections matrix
for i in range(len(connections)):
mat[i, connections[i]] = 1
#
for i in range(len(unused_inputs)):
mat[i + len(connections), unused_inputs[i]] = 1
#
if verbose:
print ("extended matrix is:", mat)
# compute pseudoinverse
mat2 = numpy.matrix(mat)
self.mat2 = mat2
self.pinv = (mat2.T * mat2).I * mat2.T
else:
if verbose:
print ("Inverse connections already given, in PInvSwitchboard")
if output_scaling:
if self.inverse_connections is None and not self.slow_inv:
if verbose:
print ("**A", end="")
if self.type_inverse != "average":
err = "self.type_inverse not supported " + self.type_inverse
raise Exception(err)
self.output_scales = numpy.zeros(self.output_dim)
tt = 0
for i in range(self.input_dim):
output_indices = self.inverse_indices[i]
multiplicity = len(output_indices)
for j in output_indices:
self.output_scales[j] = (1.0 / multiplicity) ** 0.5
tt += 1
if verbose:
print ("connections in switchboard considered: ", tt, "output dimension=", self.output_dim)
elif self.inverse_connections is None and self.slow_inv:
if verbose:
print ("**B", end="")
err = "use of self.slow_inv = True is obsolete"
raise Exception(err)
else: # inverse connections are unique, mapping bijective
if verbose:
print ("**C", end="")
self.output_scales = numpy.ones(self.output_dim)
else:
if verbose:
print ("**D", end="")
self.output_scales = numpy.ones(self.output_dim)
if verbose:
print ("PINVSB output_scales =", self.output_scales)
print ("SUM output_scales/len(output_scales)=", self.output_scales.sum() / len(self.output_scales))
print ("output_scales.min()", self.output_scales.min())
# PInvSwitchboard is always invertible
def is_invertible(self):
return True
def _execute(self, x):
force_float32_type = False # Experimental variation, ignore
if force_float32_type:
x = x.astype("float32")
use_fortran_ordering = False # Experimental variation, ignore
if use_fortran_ordering:
x = numpy.array(x, order="FORTRAN")
y = super(PInvSwitchboard, self)._execute(x)
# print "y computed"
# print "y.shape", y.shape
# print "output_scales ", self.output_scales
y *= self.output_scales
if self.additive_noise_std > 0.0:
n, dim = y.shape
steps = int(n / 9000 + 1)
if self.verbose:
print ("PInvSwitchboard is adding noise to the output features with std", self.additive_noise_std,
end="")
print (" computation in %d steps" % steps)
step_size = int(n / steps)
for s in range(steps):
y[step_size * s:step_size * (s + 1)] += numpy.random.uniform(low=-(3 ** 0.5) * self.additive_noise_std,
high=(3 ** 0.5) * self.additive_noise_std,
size=(step_size, dim))
if self.verbose:
print ("noise block %d added" % s)
if step_size * steps < n:
rest = n - step_size * steps
y[step_size * steps:step_size * steps + rest] += numpy.random.uniform(
low=-(3 ** 0.5) * self.additive_noise_std, high=(3 ** 0.5) * self.additive_noise_std,
size=(rest, dim))
if self.verbose:
print ("remaining noise block added")
return y
# If true inverse is present, just use it, otherwise compute it by means of the pseudoinverse
def _inverse(self, x):
x = x * (1.0 / self.output_scales)
if self.inverse_connections is None and not self.slow_inv:
height_x = x.shape[0]
mat2 = numpy.zeros((height_x, self.input_dim))
for row in range(height_x):
x_row = x[row]
for i in range(self.input_dim):
elements = x_row[self.inverse_indices[i]]
if self.type_inverse == "average":
if elements.size > 0:
mat2[row][i] = elements.mean()
else:
err = "self.type_inverse not supported: " + self.type_inverse
raise Exception(err)
output = mat2
elif self.inverse_connections is None and self.slow_inv:
height_x = x.shape[0]
full_x = numpy.concatenate((x, 255 * numpy.ones((height_x, self.num_unused_inputs))), axis=1)
data2 = numpy.matrix(full_x)
if self.verbose:
print ("x=", x)
print ("data2=", data2)
print ("PINV=", self.pinv)
output = (self.pinv * data2.T).T
else:
if self.verbose:
print ("using inverse_connections in PInvSwitchboard")
# return apply_permutation_to_signal(x, self.inverse_connections, self.input_dim)
output = select_rows_from_matrix(x, self.inverse_connections)
return output
class RandomPermutationNode(mdp.Node):
"""This node randomly permutes the components of the input signal in a consistent way.
The concrete permuntation is fixed during the training procedure.
"""
def __init__(self, input_dim=None, output_dim=None, dtype=None, verbose=False):
super(RandomPermutationNode, self).__init__(input_dim, output_dim, dtype)
self.permutation = None
self.inv_permutation = None
self.dummy = 5 # without it the hash fails!!!!!
def is_trainable(self):
return True
def is_invertible(self):
return True
def inverse(self, x):
return select_rows_from_matrix(x, self.inv_permutation)
# def localized_inverse(self, xf, yf, y):
# return y[:, self.inv_permutation]
def _set_input_dim(self, n, verbose=False):
if verbose:
print ("RandomPermutationNode: Setting input_dim to ", n)
self._input_dim = n
self._output_dim = n
def _train(self, x, verbose=True):
n = x.shape[1]
if self.input_dim is None:
self.set_input_dim(n)
if self.input_dim is None:
print ("*******Really Setting input_dim to ", n)
self.input_dim = n
if self.output_dim is None:
print ("*******Really Setting output_dim to ", n)
self.output_dim = n
if self.permutation is None:
if verbose:
print ("Creating new random permutation")
print ("Permutation=", self.permutation)
print ("x=", x, "with shape", x.shape)
print ("Input dim is: ", self.input_dim())
self.permutation = numpy.random.permutation(range(self.input_dim))
self.inv_permutation = numpy.zeros(self.input_dim, dtype="int")
self.inv_permutation[self.permutation] = numpy.arange(self.input_dim)
if verbose:
print ("Permutation=", self.permutation)
print ("Output dim is: ", self.output_dim)
def _execute(self, x, verbose=False):
# print "RandomPermutationNode: About to excecute, with input x= ", x
y = select_rows_from_matrix(x, self.permutation)
if verbose:
print ("Output shape is = ", y.shape, end="")
return y
def sfa_pretty_coefficients(sfa_node, transf_training, start_negative=True):
count = 0
for i in range(sfa_node.output_dim):
sum_firsts = transf_training[0, i] + transf_training[1, i] + transf_training[2, i] + transf_training[3, i] + \
transf_training[4, i] + transf_training[5, i] + transf_training[6, i] + transf_training[7, i] + \
transf_training[8, i] + transf_training[9, i] + transf_training[10, i] + transf_training[11, i]
if (sum_firsts > 0 and start_negative) or (sum_firsts < 0 and not start_negative):
sfa_node.sf[:, i] = (sfa_node.sf[:, i] * -1)
transf_training[:, i] = (transf_training[:, i] * -1)
count += 1
print ("Polarization of %d SFA Signals Corrected!!!\n" % count, end="")
sfa_node._bias = mdp.utils.mult(sfa_node.avg, sfa_node.sf)
print ("Bias updated")
return transf_training
def describe_flow(flow):
length = len(flow)
total_size = 0
print ("Flow has %d nodes:" % length)
for i in range(length):
node = flow[i]
node_size = compute_node_size(node)
total_size += node_size
print ("Node[%d] is %s, has input_dim=%d, output_dim=%d and size=%d" % (i, str(node), node.input_dim,
node.output_dim, node_size))
if isinstance(node, mdp.hinet.CloneLayer):
print (" contains %d cloned nodes of type %s, each with input_dim=%d, output_dim=%d" %
(len(node.nodes), str(node.nodes[0]), node.nodes[0].input_dim, node.nodes[0].output_dim))
elif isinstance(node, mdp.hinet.Layer):
print (" contains %d nodes of type %s, each with input_dim=%d, output_dim=%d" %
(len(node.nodes), str(node.nodes[0]), node.nodes[0].input_dim, node.nodes[0].output_dim))
print ("Total flow size: %d" % total_size)
print ("Largest node size: %d" % compute_largest_node_size(flow))
def display_node_eigenvalues(node, i, mode="All"):
if isinstance(node, mdp.hinet.CloneLayer):
if isinstance(node.nodes[0], mdp.nodes.SFANode):
print ("Node %d is a CloneLayer that contains an SFANode with d=" % i, node.nodes[0].d)
# elif isinstance(node.nodes[0], mdp.nodes.IEVMNode):
# if node.nodes[0].use_sfa:
# print ("Node %d is a CloneLayer that contains an IEVMNode containing an SFA node with" % i, end="")
# print ("num_sfa_features_preserved=%d" % node.nodes[0].num_sfa_features_preserved, end="")
# print ("and d=", node.nodes[0].sfa_node.d)
elif isinstance(node.nodes[0], mdp.nodes.iGSFANode):
print ("Node %d is a CloneLayer that contains an iGSFANode containing an SFA node with " % i, end="")
print ("num_sfa_features_preserved=%d " % node.nodes[0].num_sfa_features_preserved, end="")
print ("and d=", node.nodes[0].sfa_node.d, end=" ")
print ("and evar=", node.nodes[0].evar)
elif isinstance(node.nodes[0], mdp.nodes.PCANode):
print ("Node %d is a CloneLayer that contains a PCANode with d=" % i, node.nodes[0].d, end=" ")
print ("and evar=", node.nodes[0].explained_variance)
elif isinstance(node, mdp.hinet.Layer):
if isinstance(node.nodes[0], mdp.nodes.SFANode):
if mode == "Average":
out = 0.0
for n in node.nodes:
out += n.d
print ("Node %d is a Layer that contains %d SFANodes with avg(d)= " % (i, len(node.nodes)), out / len(node.nodes))
elif mode == "All":
for n in node.nodes:
print ("Node %d is a Layer that contains an SFANode with d= " % i, n.d)
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first SFANode has d= " % i, node.nodes[0].d)
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node.nodes[0], mdp.nodes.iGSFANode):
if mode == "Average":
evar_avg = 0.0
d_avg = 0.0
avg_num_sfa_features = 0.0
min_num_sfa_features_preserved = min([n.num_sfa_features_preserved for n in node.nodes])
for n in node.nodes:
d_avg += n.sfa_node.d[:min_num_sfa_features_preserved]
evar_avg += n.evar
avg_num_sfa_features += n.num_sfa_features_preserved
d_avg /= len(node.nodes)
evar_avg /= len(node.nodes)
avg_num_sfa_features /= len(node.nodes)
print ("Node %d" % i, "is a Layer that contains", len(node.nodes), "iGSFANodes containing SFANodes with " +
"avg(num_sfa_features_preserved)=%f " % avg_num_sfa_features, "and avg(d)=%s" % str(d_avg) +
"and avg(evar)=%f" % evar_avg)
elif mode == "All":
print ("Node %d is a Layer that contains iGSFANodeRecNodes:" % i)
for n in node.nodes:
print (" iGSFANode containing an SFANode with num_sfa_features_preserved=%f, d=%s and evar=%f" %
(n.num_sfa_features_preserved, str(n.sfa_node.d), n.evar))
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first iGSFANode " % i, end="")
print ("contains an SFANode with num_sfa_features_preserved)=%f, d=%s and evar=%f" %
(node.nodes[0].num_sfa_features_preserved, str(node.nodes[0].sfa_node.d), node.nodes[0].evar))
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node.nodes[0], mdp.nodes.SFAAdaptiveNLNode):
if mode == "Average":
out = 0.0
for n in node.nodes:
out += n.sfa_node.d
print ("Node %d is a Layer that contains SFAAdaptiveNLNodes containing SFANodes with", end="")
print ("avg(d)=" % i, out / len(node.nodes))
elif mode == "All":
for n in node.nodes:
print ("Node %d is a Layer that contains an SFAAdaptiveNLNode" % i, end="")
print ("containing an SFANode with d=", n.sfa_node.d)
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first SFAAdaptiveNLNode" % i)
print ("contains an SFANode with d=", node.nodes[0].sfa_node.d)
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node.nodes[0], mdp.nodes.PCANode):
if mode == "Average":
d_avg = 0.0
evar_avg = 0.0
min_num_pca_features_preserved = min([n.output_dim for n in node.nodes])
for n in node.nodes:
d_avg += n.d[:min_num_pca_features_preserved]
evar_avg += n.explained_variance
d_avg /= len(node.nodes)
evar_avg /= len(node.nodes)
print ("Node %d is a Layer that contains PCA nodes with avg(d)=%s and avg(evar)=%f" % (
i, str(d_avg), evar_avg))
elif mode == "All":
print ("Node %d is a Layer that contains PCA nodes:" % i)
for n in node.nodes:
print (" PCANode with d=%s and evar=%f" % (str(n.d), n.explained_variance))
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first PCANode" % i, "has d=%s and evar=%f" % (
str(node.nodes[0].sfa_node.d), node.nodes[0].explained_variance))
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node, mdp.nodes.iGSFANode):
print ("Node %d is an iGSFANode containing an SFA node with num_sfa_features_preserved=%d" %
(i, node.num_sfa_features_preserved), end="")
print ("and d=", node.sfa_node.d)
elif isinstance(node, mdp.nodes.SFANode):
print ("Node %d is an SFANode with d=" % i, node.d)
elif isinstance(node, mdp.nodes.PCANode):
print ("Node %d is a PCANode with d=%s and evar=%f" % (i, str(node.d), node.explained_variance))
else:
print ("Cannot display eigenvalues of Node %d" % i, node)
def display_eigenvalues(flow, mode="All"):
"""This function displays the learned eigenvalues of different nodes in a trained Flow object.
Three mode parameter can take three values and it specifies what to do when a layer is found:
"FirstNodeInLayer": the eigenvalues of the first node in the layer are displayed
"Average": the average eigenvalues of all nodes in a layer are displayed (bounded to the smallest length).
"All": the eigenvalues of all nodes in the layer are displayed.
"""
length = len(flow)
print ("Displaying eigenvalues of SFA Nodes in flow of length", length)
for i in range(length):
node = flow[i]
display_node_eigenvalues(node, i, mode)
def compute_node_size(node, verbose=False):
""" Computes the number of parameters (weights) that have been learned by node.
Note: Means and offsets are not counted, only (multiplicative) weights. The node must have been already trained.
The following nodes are supported currently:
SFANode, PCANode, WhitheningNode, CloneLayer, Layer, GSFANode, iGSFANode, LinearRegressionNode
"""
if isinstance(node, mdp.nodes.iGSFANode):
return compute_node_size(node.sfa_node) + compute_node_size(node.pca_node) + compute_node_size(node.lr_node)
elif isinstance(node, (mdp.nodes.SFANode, mdp.nodes.PCANode, mdp.nodes.GSFANode, mdp.nodes.LinearRegressionNode,
mdp.nodes.WhiteningNode)) and node.input_dim is not None and node.output_dim is not None:
return node.input_dim * node.output_dim
elif isinstance(node, mdp.hinet.CloneLayer):
return compute_node_size(node.nodes[0])
elif isinstance(node, mdp.hinet.Layer):
size = 0
for node_child in node.nodes:
size += compute_node_size(node_child)
return size
else:
if verbose:
print ("compute_node_size not implemented for nodes of type:", type(node), "or training has not finished")
return 0
def compute_flow_size(flow):
""" Computes the number of weights learned by the whole flow after training.
See compute_node_size for more details on the counting procedure
"""
flow_size = 0
for node in flow:
flow_size += compute_node_size(node)
return flow_size
def compute_largest_node_size(flow):
""" Computes the larger number of weights learned by a node after training.
See compute_node_size for more details on the counting procedure
"""
largest_size = 0
for node in flow:
if (isinstance(node, mdp.nodes.SFANode) or isinstance(node, mdp.nodes.PCANode) or
isinstance(node, mdp.nodes.WhiteningNode)):
current_size = compute_node_size(node)
elif isinstance(node, mdp.hinet.CloneLayer):
current_size = compute_node_size(node.nodes[0])
elif isinstance(node, mdp.hinet.Layer):
current_size = 0
for nodechild in node.nodes:
tmp_size = compute_node_size(nodechild)
if tmp_size > current_size:
current_size = tmp_size
else:
current_size = 0
if current_size > largest_size:
largest_size = current_size
return largest_size
# Used to compare the effectiveness of several PCA Networks
def estimate_explained_variance(images, flow, sl_images, num_considered_images=100, verbose=True):
# Here explained variance is defined as 1 - normalized reconstruction error
num_images = images.shape[0]
im_numbers = numpy.random.randint(num_images, size=num_considered_images)
avg_image = images[im_numbers].mean(axis=0)
selected_images = images[im_numbers]
ori_differences = selected_images - avg_image
ori_energies = ori_differences ** 2
ori_energy = ori_energies.sum()
sl_selected_images = sl_images[im_numbers]
print ("sl_selected_images.shape=", sl_selected_images.shape)
inverses = flow.inverse(sl_selected_images)
rec_differences = inverses - avg_image
rec_energies = rec_differences ** 2
rec_energy = rec_energies.sum()
rec_errors = selected_images - inverses
rec_error_energies = rec_errors ** 2
rec_error_energy = rec_error_energies.sum()
if verbose:
explained_individual = rec_energies.sum(axis=1) / ori_energies.sum(axis=1)
print ("Individual explained variances: ", explained_individual)
print ("Which, itself has standar deviation: ", explained_individual.std())
print ("Therefore, estimated explained variance has std of about: ", explained_individual.std() / numpy.sqrt(
num_considered_images))
print ("Dumb reconstruction_energy/original_energy=", rec_energy / ori_energy)
print ("rec_error_energy/ori_energy=", rec_error_energy / ori_energy)
print ("Thus explained variance about:", 1 - rec_error_energy / ori_energy)
return 1 - rec_error_energy / ori_energy # rec_energy/ori_energy
class HeadNode(mdp.Node):
"""Preserve only the first k dimensions from the data
"""
def __init__(self, input_dim=None, output_dim=None, dtype=None):
self.type = dtype
super(HeadNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype)
def is_trainable(self):
return True
def _train(self, x):
pass
def _is_invertible(self):
return True
def _execute(self, x):
if self.output_dim is None:
er = "Warning 12345..."
raise Exception(er)
return x[:, 0:self.output_dim]
def _stop_training(self):
pass
def _inverse(self, y):
num_samples, out_dim = y.shape[0], y.shape[1]
zz = numpy.zeros((num_samples, self.input_dim - out_dim))
return numpy.concatenate((y, zz), axis=1)
# # This code is obsolete.
# class SFAPCANode(mdp.Node):
# """Node that extracts slow features unless their delta value is too high. In such a case PCA features are extracted.
# """
#
# def __init__(self, input_dim=None, output_dim=None, max_delta=1.95, sfa_args={}, pca_args={}, **argv):
# super(SFAPCANode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv)
# self.sfa_node = mdp.nodes.SFANode(**sfa_args)
# # max delta value allowed for a slow feature, otherwise a principal component is extracted
# self.max_delta = max_delta
# self.avg = None # input average
# self.W = None # weights for complete transformation
# self.pinv = None # weights for pseudoinverse of complete transformation
#
# def is_trainable(self):
# return True
#
# def _train(self, x, **argv):
# self.sfa_node.train(x, **argv)
#
# @staticmethod
# def _is_invertible():
# return True
#
# def _execute(self, x):
# W = self.W
# avg = self.avg
# return numpy.dot(x - avg, W)
#
# def _stop_training(self, **argv):
# # New GraphSFA node
# if "_covdcovmtx" in dir(self.sfa_node):
# # Warning, fix is computed twice. TODO: avoid double computation
# C, self.avg, CD = self.sfa_node._covdcovmtx.fix()
# else:
# # Old fix destroys data... so we copy the matrices first.
# cov_mtx = copy.deepcopy(self.sfa_node._cov_mtx)
# dcov_mtx = copy.deepcopy(self.sfa_node._dcov_mtx)
#
# C, self.avg, tlen = cov_mtx.fix()
# DC, davg, dtlen = dcov_mtx.fix()
#
# dim = C.shape[0]
# type_ = C.dtype
# self.sfa_node.stop_training()
# d = self.sfa_node.d
# sfa_output_dim = len(d[d <= self.max_delta])
# sfa_output_dim = min(sfa_output_dim, self.output_dim)
# print ("sfa_output_dim=", sfa_output_dim)
#
# Wsfa = self.sfa_node.sf[:, 0:sfa_output_dim]
# print ("Wsfa.shape=", Wsfa.shape)
# if Wsfa.shape[1] == 0: # No slow components will be used
# print ("No Psfa created")
# PS = numpy.zeros((dim, dim), dtype=type_)
# else:
# Psfa = pinv(Wsfa)
# print ("Psfa.shape=", Psfa.shape)
# PS = numpy.dot(Wsfa, Psfa)
#
# print ("PS.shape=", PS.shape)
# Cproy = numpy.dot(PS, numpy.dot(C, PS.T))
# Cpca = C - Cproy
#
# if self.output_dim is None:
# self.output_dim = dim
#
# pca_output_dim = self.output_dim - sfa_output_dim
# print ("PCA output_dim=", pca_output_dim)
# if pca_output_dim > 0:
# pca_node = mdp.nodes.PCANode(output_dim=pca_output_dim) # WARNING: WhiteningNode should be used here
# pca_node._cov_mtx._dtype = type_
# pca_node._cov_mtx._input_dim = dim
# pca_node._cov_mtx._avg = numpy.zeros(dim, type_)
# pca_node._cov_mtx.bias = True
# pca_node._cov_mtx._tlen = 1 # WARNING!!! 1
# pca_node._cov_mtx._cov_mtx = Cpca
# pca_node._input_dim = dim
# pca_node._train_phase_started = True
# pca_node.stop_training()
# print ("pca_node.d=", pca_node.d)
# print ("1000000 * pca_node.d[0]=", 1000000 * pca_node.d[0])
#
# Wpca = pca_node.v
# Ppca = pca_node.v.T
# else:
# Wpca = numpy.array([]).reshape((dim, 0))
# Ppca = numpy.array([]).reshape((0, dim))
#
# print ("Wpca.shape=", Wpca.shape)
# print ("Ppca.shape=", Ppca.shape)
#
# self.W = numpy.concatenate((Wsfa, Wpca), axis=1)
# self.pinv = None # WARNING, why this does not work correctly: numpy.concatenate((Psfa, Ppca),axis=0) ?????
# # print "Pinv 1=", self.pinv
# # print "Pinv 2-Pinv1=", pinv(self.W)-self.pinv
# print ("W.shape=", self.W.shape)
# # print "pinv.shape=", self.pinv.shape
# print ("avg.shape=", self.avg.shape)
#
# def _inverse(self, y):
# if self.pinv is None:
# print ("Computing PINV", end="")
# self.pinv = pinv(self.W)
# return numpy.dot(y, self.pinv) + self.avg
# Computes the variance of some MDP data array
def data_variance(x):
return ((x - x.mean(axis=0)) ** 2).sum(axis=1).mean()
def estimate_explained_var_linearly(x, y, x_test, y_test):
x_test_app = approximate_linearly(x, y, y_test)
explained_variance = compute_explained_var(x_test, x_test_app)
x_variance = data_variance(x_test)
print ("x_variance=", x_variance, ", explained_variance=", explained_variance)
return explained_variance / x_variance
def approximate_linearly(x, y, y_test):
lr_node = mdp.nodes.LinearRegressionNode(use_pseudoinverse=True)
lr_node.train(y, x)
lr_node.stop_training()
x_test_app = lr_node.execute(y_test)
return x_test_app
# Approximates x from y, and computes how sensitive the estimation is to changes in y
def sensivity_of_linearly_approximation(x, y):
lr_node = mdp.nodes.LinearRegressionNode(use_pseudoinverse=True)
lr_node.train(y, x)
lr_node.stop_training()
beta = lr_node.beta[1:, :] # bias is used by default, we do not need to consider it
print ("beta.shape=", beta.shape)
sens = (beta ** 2).sum(axis=1)
return sens
def estimate_explained_var_with_kNN(x, y, max_num_samples_for_ev=None, max_test_samples_for_ev=None, k=1,
ignore_closest_match=False, operation="average"):
num_samples = x.shape[0]
indices_all_x = numpy.arange(x.shape[0])
if max_num_samples_for_ev is not None: # use all samples for reconstruction
max_num_samples_for_ev = min(max_num_samples_for_ev, num_samples)
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
y_sel = y[indices_all_x_selection]
else:
x_sel = x
y_sel = y
if max_test_samples_for_ev is not None: # use all samples for reconstruction
max_test_samples_for_ev = min(max_test_samples_for_ev, num_samples)
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_test_samples_for_ev]
x_test = x[indices_all_x_selection]
y_test = y[indices_all_x_selection]
else:
x_test = x
y_test = y
x_app_test = approximate_kNN_op(x_sel, y_sel, y_test, k, ignore_closest_match, operation=operation)
print ("x_test=", x_test)
print ("x_app_test=", x_app_test)
explained_variance = compute_explained_var(x_test, x_app_test)
test_variance = data_variance(x_test)
print ("explained_variance=", explained_variance)
print ("test_variance=", test_variance)
return explained_variance / test_variance
def random_subindices(num_indices, size_selection):
if size_selection > num_indices:
ex = "Error, size_selection is larger than num_indices! (", size_selection, ">", num_indices, ")"
raise Exception(ex)
all_indices = numpy.arange(num_indices)
numpy.random.shuffle(all_indices)
return all_indices[0:size_selection] + 0
def estimate_explained_var_linear_global(subimages_train, sl_seq_training, subimages_newid, sl_seq_newid,
reg_num_signals, number_samples_EV_linear_global):
"""Function that computes how much variance is explained linearly from a global mapping.
It works as follows: 1) Linear regression is trained with sl_seq_training and subimages_train.
2) Estimation is done on subset of size number_samples_EV_linear_global from training and test data
3) For training data evaluation is done on the same data used to train LR, and on new random subset of data.
4) For test data all samples are used.
"""
indices_all_train1 = random_subindices(subimages_train.shape[0], number_samples_EV_linear_global)
indices_all_train2 = random_subindices(subimages_train.shape[0], number_samples_EV_linear_global)
indices_all_newid = numpy.arange(subimages_newid.shape[0])
lr_node = mdp.nodes.LinearRegressionNode()
sl_seq_training_sel1 = sl_seq_training[indices_all_train1, 0:reg_num_signals]
subimages_train_sel1 = subimages_train[indices_all_train1]
lr_node.train(sl_seq_training_sel1,
subimages_train_sel1) # Notice that the input "x"=n_sfa_x and the output to learn is "y" = x_pca
lr_node.stop_training()
subimages_train_app1 = lr_node.execute(sl_seq_training_sel1)
EVLinGlobal_train1 = compute_explained_var(subimages_train_sel1, subimages_train_app1)
data_variance_train1 = data_variance(subimages_train_sel1)
sl_seq_training_sel2 = sl_seq_training[indices_all_train2, 0:reg_num_signals]
subimages_train_sel2 = subimages_train[indices_all_train2]
subimages_train_app2 = lr_node.execute(sl_seq_training_sel2)
EVLinGlobal_train2 = compute_explained_var(subimages_train_sel2, subimages_train_app2)
data_variance_train2 = data_variance(subimages_train_sel2)
sl_seq_newid_sel = sl_seq_newid[indices_all_newid, 0:reg_num_signals]
subimages_newid_sel = subimages_newid[indices_all_newid]
subimages_newid_app = lr_node.execute(sl_seq_newid_sel)
EVLinGlobal_newid = compute_explained_var(subimages_newid_sel, subimages_newid_app)
data_variance_newid = data_variance(subimages_newid_sel)
print ("Data variances=", data_variance_train1, data_variance_train2, data_variance_newid)
print ("EVLinGlobal=", EVLinGlobal_train1, EVLinGlobal_train2, EVLinGlobal_newid)
return EVLinGlobal_train1 / data_variance_train1, EVLinGlobal_train2 / data_variance_train2, \
EVLinGlobal_newid / data_variance_newid
def compute_explained_var(true_samples, approximated_samples):
"""Computes the explained variance provided by the approximation to some data, with respect to the true data.
Additionally, the original data variance is provided:
app = true_samples + error
exp_var ~ energy(true_samples) - energy(error)
"""
error = (approximated_samples - true_samples)
error_energy = (error ** 2.0).sum(axis=1).mean() # average squared error per sample
true_energy = data_variance(true_samples) # (true_samples-true_samples.mean(axis=0)).var()
explained_var = true_energy - error_energy
# print "Debug information:", error_energy, true_energy
return explained_var
def approximate_kNN_op(x, x_exp, y_exp, k=1, ignore_closest_match=False, operation=None):
""" Approximates a signal y given its expansion y_exp. The method is kNN with training data given by x, x_exp
If label_avg=True, the inputs of the k closest expansions are averaged, otherwise the most frequent
among k-closest is returned.
When label_avg=True, one can also specify to ignore the best match (useful if y_exp = x_exp)
"""
n = mdp.nodes.KNNClassifier(k=k, execute_method="label")
n.train(x_exp, range(len(x_exp)))
if operation == "average":
n.stop_training()
ii = n.klabels(y_exp)
if ignore_closest_match and k == 1:
ex = "Error, k==1 but ignoring closest match!"
raise Exception(ex)
elif ignore_closest_match:
ii = ii[:, 1:]
y = x[ii].mean(axis=1)
# y_exp_app = x_exp[ii].mean(axis=1)
# print "Error for y_exp is:", ((y_exp_app - y_exp)**2).sum(axis=1).mean()
# print "y=",y
return y # x[ii].mean(axis=1)
elif operation == "lin_app":
n.stop_training()
ii = n.klabels(y_exp)
if ignore_closest_match and k == 1:
ex = "Error, k==1 but ignoring closest match!"
raise Exception(ex)
elif ignore_closest_match:
ii = ii[:, 1:]
x_dim = x.shape[1]
x_exp_dim = x_exp.shape[1]
x_mean = x.mean(axis=0)
x = x - x_mean
nk = ii.shape[1]
y = numpy.zeros((len(y_exp), x_dim))
y_exp_app = numpy.zeros((len(y_exp), x_exp_dim))
x_ind = x[ii]
x_exp_ind = x_exp[ii]
y_expit = numpy.zeros((x_exp_dim + 1, 1))
k = 1.0e10 # make larger to force sum closer to one?!
y_expit[x_exp_dim, 0] = 0.0 * 1.0 * k
x_expit = numpy.zeros((x_exp_dim + 1, nk))
x_expit[x_exp_dim, :] = 1.0 * k #
zero_threshold = -40.0500 # -0.004
max_zero_weights = nk // 5
w_0 = numpy.ones((nk, 1)) * 1.0 / nk
# print "w_0", w_0
for i in range(len(y_exp)):
negative_weights = 0
iterate = True
# print "Iteration: ", i,
x_expit[0:x_exp_dim, :] = x_exp_ind[i].T
y_0 = numpy.dot(x_exp_ind[i].T, w_0)
fixing_zero_threshold = zero_threshold * 500
while iterate:
# print x_exp_ind[i].T.shape
# print x_expit[0:x_exp_dim,:].shape
x_pinv = numpy.linalg.pinv(x_expit)
# print y_0.shape, y_exp[i].shape
y_expit[0:x_exp_dim, 0] = y_exp[i] - y_0.flatten()
w_i = numpy.dot(x_pinv, y_expit) + w_0
iterate = False
if (w_i < zero_threshold).any():
# print "w_i[:,0] =", w_i[:,0]
# print "x_expit = ", x_expit
negative_weights += (w_i < fixing_zero_threshold).sum()
negative_elements = numpy.arange(nk)[w_i[:, 0] < fixing_zero_threshold]
numpy.random.shuffle(negative_elements)
for nn in negative_elements:
# print "nn=", nn
x_expit[0:x_exp_dim + 1, nn] = 0.0
# print "negative_elements", negative_elements
iterate = True
fixing_zero_threshold /= 2
if negative_weights >= max_zero_weights:
iterate = False
# FORCE SUM WEIGHTS=1:
# print "w_i[:,0] =", w_i[:,0]
# print "weight sum=",w_i.sum(),"min_weight=",w_i.min(),"max_weight=",w_i.max(),
# "negative weights=", negative_weights
w_i /= w_i.sum()
# print "y[i].shape", y[i].shape
# print "as.shape", numpy.dot(x_ind[i].T, w_i).T.shape
y[i] = numpy.dot(x_ind[i].T, w_i).T + x_mean # numpy.dot(w_i, x_ind[i]).T
y_exp_app[i] = numpy.dot(x_exp_ind[i].T, w_i).T
if w_i.min() < zero_threshold: # 0.1: #negative_weights >= max_zero_weights:
# quit()max_zero_weights
print ("Warning smallest weight is", w_i.min(), "thus replacing with simple average")
# print "Warning, at least %d all weights turned out to be negative! (%d)"%(max_zero_weights,
# negative_weights)
# print x_ind[i]
# print x_ind[i].shape
y[i] = x_ind[i].mean(axis=0)
print (".", end="")
# print "Error for y_exp is:", ((y_exp_app - y_exp)**2).sum(axis=1).mean()
# print "y=",y
return y # x[ii].mean(axis=1)
elif operation == "plainKNN":
ii = n.execute(y_exp)
ret = x[ii]
return ret
else:
er = "operation unknown:", operation
raise Exception(er)
def approximate_kNN(x, x_exp, y_exp, k=1, ignore_closest_match=False, label_avg=True):
n = mdp.nodes.KNNClassifier(k=k, execute_method="label")
n.train(x_exp, range(len(x_exp)))
if label_avg:
n.stop_training()
ii = n.klabels(y_exp)
if ignore_closest_match and k == 1:
ex = "Error, k==1 but ignoring closest match!"
raise Exception(ex)
elif ignore_closest_match:
ii = ii[:, 1:]
y = x[ii].mean(axis=1)
return y # x[ii].mean(axis=1)
else:
ii = n.execute(y_exp)
ret = x[ii]
return ret
def rank_expanded_signals_max_linearly(x, x_exp, y, y_exp, max_comp=10, max_num_samples_for_ev=None,
max_test_samples_for_ev=None, verbose=False):
""" Third ranking method. More robust and closer to max EV(x; y_i + Y)-EV(x;Y) for all Y, EV computed linearly.
Ordering and scoring of signals respects principle of best incremental feature selection
Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
max_scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print ("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel)
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" %
(index_short, index_long, temp_explained_vars[index_short]))
# Update scores
max_scores[indices_available] = numpy.maximum(max_scores[indices_available],
temp_explained_vars - last_explained_var)
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print ("Selecting index short:", max_explained_var_index_short, end="")
print (" and index_ long:", max_explained_var_index_long)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
# last_score = scores[max_explained_var_index_long]
last_explained_var = temp_explained_vars[max_explained_var_index_short]
print ("brute max_scores = ", max_scores)
print ("brute taken = ", taken)
# Find ordering of variables not yet taken
if max_comp < dim_out:
max_explained_var_indices_short = temp_explained_vars.argsort()[::-1][1:]
# In increasing order, then remove first element, which was already added to taken
for max_explained_var_index_short in max_explained_var_indices_short:
taken.append(indices_available[max_explained_var_index_short])
print ("final taken = ", taken)
# Make scoring decreasing in ordering stored in taken
last_explained_var = max(last_explained_var, 0.01) # For numerical reasons
last_max_score = -numpy.inf
sum_max_scores = 0.0
for i, long_index in enumerate(taken):
current_max_score = max_scores[long_index]
sum_max_scores += current_max_score
if current_max_score > last_max_score and i > 0:
max_scores[long_index] = last_max_score
tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum()
max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1)
last_max_score = max_scores[long_index]
# print "iteration max_scores = ", max_scores
print ("preeliminar max_scores = ", max_scores)
# max_scores *= (last_explained_var / max_scores.sum())**0.5
# NOTE: last_explained_var is not the data variance.
# Here it is the variance up to max_comp components
# 3 options: all features, first max_comp features, output_dim features
max_scores *= (last_explained_var / max_scores.sum()) ** 0.5
print ("final max_scores = ", max_scores)
if (max_scores == 0.0).any():
print ("WARNING, removing 0.0 max_scores!")
max_score_min = (max_scores[max_scores > 0.0]).min()
# TODO:Find reasonable way to fix this, is this causing the distorted reconstructions???
max_scores += max_score_min * 0.001
# max_scores += (max_scores[max_scores>0.0])
return max_scores
def rank_expanded_signals_max(x, x_exp, y, y_exp, max_comp=10, k=1, operation="average", max_num_samples_for_ev=None,
max_test_samples_for_ev=None, offsetting_mode="max_comp features", verbose=False):
""" This Second ranking method more robust and closer to max I(x; y_i + Y)-I(x;Y) for all Y.
Ordering and scoring of signals respects principle of best incremental feature selection
Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
max_scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is notNone and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print ("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
if operation == "linear_rec":
y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel)
else:
y_app_sel = approximate_kNN_op(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True,
operation=operation) # invert from taken variables
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (
index_short, index_long, temp_explained_vars[index_short]))
# Update scores
max_scores[indices_available] = numpy.maximum(max_scores[indices_available],
temp_explained_vars - last_explained_var)
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print("Selecting index short:", max_explained_var_index_short,
" and index_ long:", max_explained_var_index_long)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
# last_score = scores[max_explained_var_index_long]
last_explained_var = temp_explained_vars[max_explained_var_index_short]
print("brute max_scores = ", max_scores)
print("brute taken = ", taken)
# Find ordering of variables not yet taken
if max_comp < dim_out:
max_explained_var_indices_short = \
temp_explained_vars.argsort()[::-1][1:]
# In increasing order, then remove first element, which was already added to taken
for max_explained_var_index_short in max_explained_var_indices_short:
taken.append(indices_available[max_explained_var_index_short])
print("final taken = ", taken)
# Make scoring decreasing in ordering stored in taken
last_explained_var = max(last_explained_var, 0.01) # For numerical reasons
last_max_score = -numpy.inf
sum_max_scores = 0.0
for i, long_index in enumerate(taken):
current_max_score = max_scores[long_index]
sum_max_scores += current_max_score
if current_max_score > last_max_score and i > 0:
max_scores[long_index] = last_max_score
tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum()
max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1)
last_max_score = max_scores[long_index]
# print "iteration max_scores = ", max_scores
print("preeliminar max_scores = ", max_scores)
# Compute explained variance with all features
indices_all_x_selection = random_subindices(x.shape[0], max_num_samples_for_ev)
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
indices_all_y_selection = random_subindices(y.shape[0], max_test_samples_for_ev)
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
if operation == "linear_rec":
y_app_sel = approximate_linearly(x_sel, x_exp_sel, y_exp_sel)
else:
y_app_sel = approximate_kNN_op(x_sel, x_exp_sel, y_exp_sel, k=k, ignore_closest_match=True,
operation=operation) # invert from taken variables
explained_var_all_feats = compute_explained_var(y_sel, y_app_sel)
print("last_explained_var =", last_explained_var)
print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance)
# max_scores *= (last_explained_var / max_scores.sum())**0.5
# NOTE: last_explained_var is not the data variance. It is the variance up to max_comp components
# 3 options: all scores, max_comp scores, output_dim scores (usually all scores)
if offsetting_mode == "max_comp features":
max_scores *= (last_explained_var / max_scores.sum())
elif offsetting_mode == "all features":
print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance)
max_scores *= (explained_var_all_feats / max_scores.sum())
elif offsetting_mode == "all features smart":
max_scores *= (last_explained_var / max_scores.sum())
print("scaled max_scores=", max_scores)
max_scores += (explained_var_all_feats - last_explained_var) / max_scores.shape[0]
print("offsetted max_scores=", max_scores)
elif offsetting_mode == "democratic":
max_scores = numpy.ones_like(max_scores) * explained_var_all_feats / max_scores.shape[0]
print("democractic max_scores=", max_scores)
elif offsetting_mode == "linear":
# Code fixed!!!
max_scores = numpy.arange(dim_out, 0, -1) * explained_var_all_feats / (dim_out * (dim_out + 1) / 2)
print("linear max_scores=", max_scores)
elif offsetting_mode == "sensitivity_based":
sens = sensivity_of_linearly_approximation(x_sel, x_exp_sel)
max_scores = sens * explained_var_all_feats / sens.sum()
print("sensitivity_based max_scores=", max_scores)
else:
ex = "offsetting_mode unknown", offsetting_mode
raise Exception(ex)
print("final max_scores = ", max_scores)
if (max_scores == 0.0).any():
print("WARNING, removing 0.0 max_scores!")
max_score_min = (max_scores[max_scores > 0.0]).min()
max_scores += max_score_min * 0.001
# TODO:Find reasonable way to fix this, is this causing the distorted reconstructions???
# max_scores += (max_scores[max_scores>0.0])
return max_scores
# TODO: Improve: if max_comp < output_dim choose remaining features from the last evaluation of explained variances.
def rank_expanded_signals(x, x_exp, y, y_exp, max_comp=10, k=1, linear=False, max_num_samples_for_ev=None,
max_test_samples_for_ev=None, verbose=False):
""" Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
y_app_sel = approximate_kNN(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True,
label_avg=True) # invert from taken variables
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (
index_short, index_long, temp_explained_vars[index_short]))
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print("Selecting index short:", max_explained_var_index_short)
print(" and index_ long:", max_explained_var_index_long)
# update total explained var & scores
# Add logic to robustly handle strange contributions: 3, 2, 1, 4 => 5, 2.5, 1.25, 1.25 ?
# TODO:FIX NORMALIZATION WHEN FIRST SCORES ARE ZERO OR NEGATIVE!
# TODO:NORMALIZATION SHOULD BE OPTIONAL, SINCE IT WEAKENS THE INTERPRETATION OF THE SCORES
explained_var = max(temp_explained_vars[max_explained_var_index_short], 0.0)
new_score = explained_var - last_explained_var
if verbose:
print("new_score raw = ", new_score)
new_score = max(new_score, 0.0)
if new_score > last_score and iteration > 0:
new_score = last_score # Here some options are available to favour components taken first
scores[max_explained_var_index_long] = new_score
if verbose:
print("tmp scores = ", scores)
# normalize scores, so that they sume up to explained_var
sum_scores = scores.sum()
residual = max(explained_var, 0.0) - sum_scores
if residual > 0.0:
correction = residual / (iteration + 1)
scores[taken] += correction
scores[max_explained_var_index_long] += correction
# scores = scores * explained_var / (sum_scores+1e-6) #TODO:CORRECT THIS; INSTEAD OF FACTOR USE ADDITIVE TERM
if verbose:
print("normalized scores = ", scores, "sum to:", scores.sum(), "explained_var =", explained_var)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
last_score = scores[max_explained_var_index_long]
last_explained_var = explained_var
# handle variables not used, assign equal scores to all of them
preserve_last_evaluation = True
if preserve_last_evaluation and max_comp < dim_out:
# The score of the last feature found will be modified, as well as of not yet found features
# TODO: Take care of negative values
if last_score <= 0.0:
last_score = 0.01 # Just some value is needed here
remaining_output_features = len(temp_explained_vars) # including feature already processed
remaining_ordered_explained_variances_short_index = numpy.argsort(temp_explained_vars)[::-1]
remaining_ordered_explained_variances_long_index = indices_available[
remaining_ordered_explained_variances_short_index]
remaining_ordered_explained_variances = temp_explained_vars[
remaining_ordered_explained_variances_short_index] + 0.0
remaining_total_contribution = last_score
print("last_score=", last_score)
beta = 0.95
remaining_ordered_explained_variances[
remaining_ordered_explained_variances <= 0.0] = 0.0001 # To avoid division over zero, numerical hack
# numpy.clip(remaining_ordered_explained_variances, 0.0, None) fails here!!!!
print("remaining_ordered_explained_variances=", remaining_ordered_explained_variances)
minimum = remaining_ordered_explained_variances.min() # first element
ev_sum = remaining_ordered_explained_variances.sum()
normalized_scores = (remaining_total_contribution / (ev_sum - remaining_output_features * minimum) * beta) * \
(remaining_ordered_explained_variances - minimum) + \
((1.0 - beta) / remaining_output_features) * remaining_total_contribution
print("normalized_scores=", normalized_scores)
print("remaining_ordered_explained_variances_long_index=", remaining_ordered_explained_variances_long_index)
print(scores.dtype)
print(normalized_scores.dtype)
scores[remaining_ordered_explained_variances_long_index] = normalized_scores
else:
# rest_explained_variance = total_variance-last_explained_var
sum_scores = scores.sum()
rest_explained_variance = total_variance - sum_scores
if verbose:
print("rest_explained_variance=", rest_explained_variance)
correction = rest_explained_variance / dim_out
scores += correction
if (scores == 0.0).any():
print("WARNING, removing 0.0 scores!")
scores += 0.0001
# num_unused = dim_out - max_comp
# scores[available_mask] = min(rest_explained_variance / num_unused, last_score)
# sum_scores = scores.sum()
# scores = scores * explained_var / (sum_scores+1e-6)
if verbose:
print("final scores: ", scores)
if verbose and linear and False:
for i in indices_available:
taken.append(i)
scores[taken] = numpy.arange(dim_out - 1, -1, -1) # **2 #WARNING!!! QUADRATIC SCORES!!!
scores = scores * total_variance / scores.sum()
print("Overriding with linear scores:", scores)
return scores
# TODO: Remove this node, it is now obsolete
class IEVMNode(mdp.Node):
""" Node implementing simple Incremental Explained Variance Maximization.
Extracted features are moderately useful for reconstruction, although this node does
itself provide reconstruction.
The expansion function is optional, as well as performing PCA on the scores.
The added variance of the first k-outputs is equal to the explained variance of such k-outputs.
"""
def __init__(self, input_dim=None, output_dim=None, expansion_funcs=None, k=5, max_comp=None,
max_num_samples_for_ev=None, max_test_samples_for_ev=None, use_pca=False, use_sfa=False,
max_preserved_sfa=2.0, second_weighting=False, operation="average", out_sfa_filter=False, **argv):
super(IEVMNode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv)
if expansion_funcs is not None:
self.exp_node = GeneralExpansionNode(funcs=expansion_funcs)
else:
self.exp_node = None
self.sfa_node = None
self.second_weighting = second_weighting
self.use_pca = use_pca
self.use_sfa = use_sfa
if use_sfa and not use_pca:
er = "Combination of use_sfa and use_pca not considered. Please activate use_pca or deactivate use_sfa"
raise Exception(er)
self.k = k
self.max_comp = max_comp
self.max_num_samples_for_ev = max_num_samples_for_ev
self.max_test_samples_for_ev = max_test_samples_for_ev
self.feature_scaling_factor = 0.5 # Factor that prevents amplitudes of features from growing across the network
self.exponent_variance = 0.5
self.operation = operation
self.max_preserved_sfa = max_preserved_sfa
self.out_sfa_filter = out_sfa_filter
@staticmethod
def is_trainable():
return True
def _train(self, x, block_size=None, train_mode=None, node_weights=None, edge_weights=None, scheduler=None,
n_parallel=None, **argv):
num_samples, self.input_dim = x.shape
if self.output_dim is None:
self.output_dim = self.input_dim
if self.max_comp is None:
self.max_comp = min(self.input_dim, self.output_dim)
else:
self.max_comp = min(self.max_comp, self.input_dim, self.output_dim)
print("Training IEVMNode...")
self.x_mean = x.mean(axis=0) # Remove mean before expansion
x = x - self.x_mean
if self.exp_node is not None: # Expand data
print("expanding x...")
exp_x = self.exp_node.execute(x)
else:
exp_x = x
self.expanded_dim = exp_x.shape[1]
self.exp_x_mean = exp_x.mean(axis=0)
self.exp_x_std = exp_x.std(axis=0)
print("self.exp_x_mean=", self.exp_x_mean)
print("self.exp_x_std=", self.exp_x_std)
if (self.exp_x_std == 0).any():
er = "zero-component detected"
raise Exception(er)
n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std # Remove media and variance from expansion
print("ranking n_exp_x ...")
rankings = rank_expanded_signals_max(x, n_exp_x, x, n_exp_x, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=True)
rankings *= self.feature_scaling_factor
print("rankings=", rankings)
if (rankings == 0).any():
er = "zero-component detected"
raise Exception(er)
self.perm1 = numpy.argsort(rankings)[::-1] # Sort in decreasing ranking
self.magn1 = rankings
print("self.perm1=", self.perm1)
s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking
s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first
if self.second_weighting:
print("ranking s_x_1 ...")
rankings_B = rank_expanded_signals_max(x, s_x_1, x, s_x_1, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=False)
print("rankings_B=", rankings_B)
if (rankings_B == 0).any():
er = "zero-component detected"
raise Exception(er)
self.perm1_B = numpy.argsort(rankings_B)[::-1] # Sort in decreasing ranking
self.magn1_B = rankings_B
print("self.perm1_B=", self.perm1_B)
# WARNING, this only works for normalized s_x_1
s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking
s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first
else:
s_x_1B = s_x_1
if self.use_sfa:
self.sfa_node = mdp.nodes.SFANode()
# TODO: Preserve amplitude
self.sfa_node.train(s_x_1B, block_size=block_size, train_mode=train_mode)
# , node_weights=None, edge_weights=None, scheduler = None, n_parallel=None)
self.sfa_node.stop_training()
print("self.sfa_node.d", self.sfa_node.d)
# Adaptive mechanism based on delta values
if isinstance(self.max_preserved_sfa, float):
self.num_sfa_features_preserved = (self.sfa_node.d <= self.max_preserved_sfa).sum()
elif isinstance(self.max_preserved_sfa, int):
self.num_sfa_features_preserved = self.max_preserved_sfa
else:
ex = "Cannot handle type of self.max_preserved_sfa"
print(ex)
raise Exception(ex)
# self.num_sfa_features_preserved = 10
sfa_x = self.sfa_node.execute(s_x_1B)
# TODO: Change internal variables of SFANode, so that we do not need to zero some components
# TODO: Is this equivalent to truncation of the matrices??? PERHAPS IT IS NOT !!!
sfa_x[:, self.num_sfa_features_preserved:] = 0.0
proj_sfa_x = self.sfa_node.inverse(sfa_x)
sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved]
# Notice that sfa_x has WEIGHTED zero-mean, thus we correct this here?
self.sfa_x_mean = sfa_x.mean(axis=0)
self.sfa_x_std = sfa_x.std(axis=0)
print("self.sfa_x_mean=", self.sfa_x_mean)
print("self.sfa_x_std=", self.sfa_x_std)
sfa_x -= self.sfa_x_mean
sfa_removed_x = s_x_1B - proj_sfa_x # Remove sfa projection of data
else:
self.num_sfa_features_preserved = 0
sfa_x = numpy.ones((num_samples, 0))
sfa_removed_x = s_x_1B
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
self.pca_node = mdp.nodes.PCANode(output_dim=pca_out_dim)
self.pca_node.train(sfa_removed_x)
# TODO:check that pca_out_dim > 0
pca_x = self.pca_node.execute(sfa_removed_x)
self.pca_x_mean = pca_x.mean(axis=0)
self.pca_x_std = pca_x.std(axis=0)
print("self.pca_x_std=", self.pca_x_std)
if (self.pca_x_std == 0).any():
er = "zero-component detected"
raise Exception(er)
# TODO: Is this step needed? if heuristic works well this weakens algorithm
n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std
else:
n_pca_x = sfa_removed_x[:, 0:pca_out_dim]
# Concatenate SFA and PCA signals and rank them preserving SFA components in ordering
if self.use_pca or self.use_sfa:
# TODO: Either both signals conserve magnitudes or they are both normalized
sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1)
sfa_pca_rankings = rank_expanded_signals_max(x, sfa_pca_x, x, sfa_pca_x, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev,
verbose=False)
sfa_pca_rankings *= self.feature_scaling_factor
# Only one magnitude normalization by node, but where should it be done? I guess after last transformation
print("sfa_pca_rankings=", sfa_pca_rankings)
if (sfa_pca_rankings == 0).any():
er = "zero-component detected"
raise Exception(er)
self.magn2 = sfa_pca_rankings
perm2a = numpy.arange(self.num_sfa_features_preserved, dtype="int")
perm2b = numpy.argsort(sfa_pca_rankings[self.num_sfa_features_preserved:])[::-1]
self.perm2 = numpy.concatenate((perm2a, perm2b + self.num_sfa_features_preserved))
print("second permutation=", self.perm2)
# WARNING, this only works for normalized sfa_pca_x
s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking
s_x_2 = s_x_2[:, self.perm2] # Permute with slow features first, and then most important signal first
else:
s_x_2 = n_pca_x
# Tuncating output_dim components
s_x_2_truncated = s_x_2[:, 0:self.output_dim]
# Filtering output through SFA
if self.out_sfa_filter:
self.out_sfa_node = mdp.nodes.SFANode()
self.out_sfa_node.train(s_x_2_truncated, block_size=block_size, train_mode=train_mode)
self.out_sfa_node.stop_training()
sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated)
else:
sfa_filtered = s_x_2_truncated
self.stop_training()
# def __init__(self, funcs, input_dim = None, dtype = None, \
# use_pseudoinverse=True, use_hint=False, max_steady_factor=1.5, \
# delta_factor=0.6, min_delta=0.00001):
#
#
#
# self.sfa_node.train(x, **argv)
def _is_invertible(self):
return True
def _execute(self, x):
x_orig = x + 0.0
num_samples = x.shape[0]
zm_x = x - self.x_mean
if self.exp_node:
exp_x = self.exp_node.execute(zm_x)
else:
exp_x = zm_x
n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std
if numpy.isnan(n_exp_x).any() or numpy.isinf(n_exp_x).any():
print("n_exp_x=", n_exp_x)
quit()
n_exp_x[numpy.isnan(n_exp_x)] = 0.0
if numpy.isnan(self.magn1).any():
print("self.magn1=", self.magn1)
quit()
s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking
s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first
if self.second_weighting:
s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking_B
s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first
else:
s_x_1B = s_x_1
if numpy.isnan(s_x_1B).any():
print("s_x_1B=", s_x_1B)
quit()
if self.use_sfa:
sfa_x = self.sfa_node.execute(s_x_1B)
# TODO: Change internal variables of SFANode, so that we do not need to zero some components
sfa_x[:, self.num_sfa_features_preserved:] = 0.0
proj_sfa_x = self.sfa_node.inverse(sfa_x)
sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved]
sfa_x -= self.sfa_x_mean
sfa_removed_x = s_x_1B - proj_sfa_x
else:
sfa_x = numpy.ones((num_samples, 0))
sfa_removed_x = s_x_1B
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
pca_x = self.pca_node.execute(sfa_removed_x)
n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std
else:
n_pca_x = sfa_removed_x[:, 0:pca_out_dim]
if self.use_pca or self.use_sfa:
sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1)
s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking
s_x_2 = s_x_2[:, self.perm2] # Permute with most important signal first
else:
s_x_2 = n_pca_x
if numpy.isnan(s_x_2).any():
print("s_x_2=", s_x_2)
quit()
# Tuncating output_dim components
s_x_2_truncated = s_x_2[:, 0:self.output_dim]
# Filtering output through SFA
if self.out_sfa_filter:
sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated)
else:
sfa_filtered = s_x_2_truncated
verbose = False
if verbose:
print("x[0]=", x_orig[0])
print("x_zm[0]=", x[0])
print("exp_x[0]=", exp_x[0])
print("s_x_1[0]=", s_x_1[0])
print("sfa_removed_x[0]=", sfa_removed_x[0])
print("proj_sfa_x[0]=", proj_sfa_x[0])
print("pca_x[0]=", pca_x[0])
print("n_pca_x[0]=", n_pca_x[0])
print("sfa_x[0]=", sfa_x[0] + self.sfa_x_mean)
print("s_x_2_truncated[0]=", s_x_2_truncated[0])
print("sfa_filtered[0]=", sfa_filtered[0])
return sfa_filtered
# TODO:Code inverse with SFA
def _inverse(self, y):
num_samples = y.shape[0]
if y.shape[1] != self.output_dim:
er = "Serious dimensionality inconsistency:", y.shape[0], self.output_dim
raise Exception(er)
# input_dim = self.input_dim
# De-Filtering output through SFA
sfa_filtered = y
if self.out_sfa_filter:
s_x_2_truncated = self.out_sfa_node.inverse(sfa_filtered)
else:
s_x_2_truncated = sfa_filtered
# De-Tuncating output_dim components
s_x_2_full = numpy.zeros((num_samples, self.expanded_dim))
s_x_2_full[:, 0:self.output_dim] = s_x_2_truncated
if self.use_pca or self.use_sfa:
perm_2_inv = numpy.zeros(self.expanded_dim, dtype="int")
# print "input_dim", input_dim
# print "self.perm2", self.perm2
# print "len(self.perm2)", len(self.perm2)
perm_2_inv[self.perm2] = numpy.arange(self.expanded_dim, dtype="int")
# print perm_2_inv
sfa_pca_x = s_x_2_full[:, perm_2_inv]
sfa_pca_x /= self.magn2 ** self.exponent_variance
sfa_x = sfa_pca_x[:, 0:self.num_sfa_features_preserved]
n_pca_x = sfa_pca_x[:, self.num_sfa_features_preserved:]
else:
# sfa_x = ...?
n_pca_x = s_x_2_full
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
pca_x = n_pca_x * self.pca_x_std + self.pca_x_mean
sfa_removed_x = self.pca_node.inverse(pca_x)
else:
sfa_removed_x = n_pca_x
if self.use_sfa:
sfa_x += self.sfa_x_mean
sfa_x_full = numpy.zeros((num_samples, self.expanded_dim))
sfa_x_full[:, 0:self.num_sfa_features_preserved] = sfa_x
proj_sfa_x = self.sfa_node.inverse(sfa_x_full)
s_x_1B = sfa_removed_x + proj_sfa_x
else:
s_x_1B = sfa_removed_x
if self.second_weighting:
perm_1B_inv = numpy.zeros(self.expanded_dim, dtype="int")
perm_1B_inv[self.perm1_B] = numpy.arange(self.expanded_dim, dtype="int")
s_x_1 = s_x_1B[:, perm_1B_inv]
s_x_1 /= self.magn1_B ** self.exponent_variance
else:
s_x_1 = s_x_1B
perm_1_inv = numpy.zeros(self.expanded_dim, dtype="int")
perm_1_inv[self.perm1] = numpy.arange(self.expanded_dim, dtype="int")
n_exp_x = s_x_1[:, perm_1_inv]
n_exp_x /= self.magn1 ** self.exponent_variance
exp_x = n_exp_x * self.exp_x_std + self.exp_x_mean
if self.exp_node:
zm_x = self.exp_node.inverse(exp_x)
else:
zm_x = exp_x
x = zm_x + self.x_mean
verbose = False
if verbose:
print("x[0]=", x[0])
print("zm_x[0]=", zm_x[0])
print("exp_x[0]=", exp_x[0])
print("s_x_1[0]=", s_x_1[0])
print("proj_sfa_x[0]=", proj_sfa_x[0])
print("sfa_removed_x[0]=", sfa_removed_x[0])
print("pca_x[0]=", pca_x[0])
print("n_pca_x[0]=", n_pca_x[0])
print("sfa_x[0]=", sfa_x[0])
return x
def export_to_libsvm(labels_classes, features, filename):
dim_features = features.shape[1]
filehandle = open(filename, "wb")
if len(features) != len(labels_classes):
er = "number of labels_classes %d does not match number of samples %d!" % (len(labels_classes), len(features))
raise Exception(er)
for i in range(len(features)):
filehandle.write("%d" % labels_classes[i])
for j in range(dim_features):
filehandle.write(" %d:%f" % (j + 1, features[i, j]))
filehandle.write("\n")
filehandle.close()
def is_monotonic_increasing(x):
prev = x[0]
for curr in x[1:]:
if curr <= prev:
return False
prev = curr
return True
def compute_average_labels_for_each_class(classes, labels):
all_classes = numpy.unique(classes)
avg_labels = numpy.zeros(len(all_classes))
for i, cl in enumerate(all_classes):
avg_label = labels[classes == cl].mean()
avg_labels[i] = avg_label
return avg_labels
def map_class_numbers_to_avg_label(all_classes, avg_labels, class_numbers):
if not (is_monotonic_increasing(all_classes)):
er = "Array of class numbers should be monotonically increasing:" + str(all_classes)
raise Exception(er)
if not (is_monotonic_increasing(avg_labels)):
er = "SEVERE WARNING! Array of labels should be monotonically increasing:" + str(avg_labels)
raise Exception(er)
if len(all_classes) != len(avg_labels):
er = "SEVERE WARNING! Array of classes should have the same length as the array of labels: %d vs. %d" % \
(len(all_classes), len(avg_labels))
raise Exception(er)
indices = numpy.searchsorted(all_classes, class_numbers)
return avg_labels[indices]
def map_labels_to_class_number(all_classes, avg_labels, labels):
if not (is_monotonic_increasing(all_classes)):
er = "Array of class numbers should be monotonically increasing:", all_classes
raise Exception(er)
if not (is_monotonic_increasing(avg_labels)):
er = "Array of labels should be monotonically increasing:", avg_labels
raise Exception(er)
if len(all_classes) != len(avg_labels):
er = "Array of classes should have the same length as the array of labels:" + str(len(all_classes)) + \
" vs. " + str(len(avg_labels))
raise Exception(er)
interval_midpoints = (avg_labels[1:] + avg_labels[:-1]) / 2.0
indices = numpy.searchsorted(interval_midpoints, labels)
return all_classes[indices]
def random_boolean_array(size):
return numpy.random.randint(2, size=size) == 1
def generate_random_sigmoid_weights(input_dim, num_features):
# scale_factor = 8.0 / numpy.sqrt(input_dim)
scale_factor = 1.0
c = numpy.random.normal(loc=0.0, scale=scale_factor, size=(input_dim, num_features))
c2 = (numpy.abs(c) ** 1.5)
# print "c2=", c2
# print "c2[0]=", c2[0]
c = 4.0 * | numpy.sign(c) | numpy.sign |
"""
Ovation Prime model modified from Ovation Pyme by lkilcommons: https://github.com/lkilcommons/OvationPyme
Note: this is a test version. Acknowledgement to the authors will be added after the test.
"""
import os
import datetime
from collections import OrderedDict
import numpy as np
from scipy import interpolate
# from ovationprime import ovation_utilities
# from ovationprime.ovation_utilities import robinson_auroral_conductance
# from ovationprime.ovation_utilities import brekke_moen_solar_conductance
# import geospacepy
# from geospacepy import special_datetime, sun, satplottools
import aacgmv2 # available on pip
# import apexpy
from logbook import Logger
log = Logger('OvationPyme.ovation_prime')
# Determine where this module's source file is located
# to determine where to look for the tables
src_file_dir = os.path.dirname(os.path.realpath(__file__))
ovation_datadir = os.path.join(src_file_dir, 'data')
def _check_for_old_jtype(estimator, type_of_flux):
"""Check the type of flux (2nd constructor argument) of
a FluxEstimator or SeasonalFluxEstimator class and
raise an extensive error to inform user that they need
to modify their calling function, and why"""
name = estimator.__class__.__name__
explaination = ('Constructor interface to {} has changed'.format(name)
+ ' now the only valid second argument values'
+ ' (for type of flux) are "energy" or "number".\n'
+ ' Formerly, the second argument could take values'
+ ' which confused types of flux with types of aurora'
+ ' (e.g. you could specify ion or electron, which'
+ ' is a property of the auroral type (choose "ion"'
+ ' auroral type to get ion fluxes).\n'
+ ' If you wish to calculate average energy, you'
+ ' will need to switch from a FluxEstimator class'
+ ' to an AverageEnergyEstimator class')
if type_of_flux not in ['energy', 'number']:
raise RuntimeError('{} is not a valid fluxtype.\n{}'.format(type_of_flux,
explaination))
class LatLocaltimeInterpolator(object):
def __init__(self, mlat_grid, mlt_grid, var):
self.mlat_orig = mlat_grid
self.mlt_orig = mlt_grid
self.zvar = var
n_north, n_south = np.count_nonzero(self.mlat_orig > 0.), np.count_nonzero(self.mlat_orig < 0.)
if n_south == 0.:
self.hemisphere = 'N'
elif n_north == 0.:
self.hemisphere = 'S'
else:
raise ValueError(
'Latitude grid contains northern (N={0}) and southern (N={1}) values.'.format(n_north, n_south) + \
' Can only interpolate one hemisphere at a time.')
def interpolate(self, new_mlat_grid, new_mlt_grid, method='nearest'):
"""
Rectangularize and Interpolate (using Linear 2D interpolation)
"""
X0, Y0 = satplottools.latlt2cart(self.mlat_orig.flatten(), self.mlt_orig.flatten(), self.hemisphere)
X, Y = satplottools.latlt2cart(new_mlat_grid.flatten(), new_mlt_grid.flatten(), self.hemisphere)
interpd_zvar = interpolate.griddata((X0, Y0), self.zvar.flatten(), (X, Y), method=method, fill_value=0.)
return interpd_zvar.reshape(new_mlat_grid.shape)
class BinCorrector(object):
"""
We've found that often there are strange outlier bins that show up in
OvationPyme results. This attempts to identify them by computing a numerical
derivative around each ring of constant latitude.
"""
def __init__(self, mlat_grid, mlt_grid):
self.mlat_grid = mlat_grid
self.mlats = self.mlat_grid[:, 0].flatten()
self.mlt_grid = mlt_grid
self.mlts = self.mlt_grid[0, :].flatten()
self.dy_thresh = None
def fix(self, y_grid, min_mlat=49, max_mlat=75, label=''):
"""
Compute derivatives and attempt to identify bad bins
Assumes mlat varies along the first dimension of the gridded location
arrays
"""
debug = False
plot = False
bad_bins = np.zeros_like(y_grid, dtype=bool)
y_grid_corr = y_grid.copy()
if self.dy_thresh is None:
self.dy_thresh = 3. * np.nanstd(np.diff(y_grid.flatten()))
wraparound = lambda x, nwrap: np.concatenate([x[-1 * (nwrap + 1):-1], x, x[:nwrap]])
for i_mlat, mlat in enumerate(self.mlats):
if not (np.abs(mlat) >= min_mlat and np.abs(mlat) <= max_mlat):
if debug:
log.debug('MLAT ring at {0} mlat is not between'.format(mlat)
+ ' {0} and {1}'.format(min_mlat, max_mlat)
+ ' skipping')
continue
mlts_nowrap = self.mlt_grid[i_mlat, :].copy()
mlts_nowrap[mlts_nowrap < 0] += 24
mlts_nowrap[-1] = 23.9
y = y_grid[i_mlat, :]
# Wrap around first and last nwarp indicies in MLT
# this prevents out of bounds errors in the spline/derviative
nwrap = 4 # Pchip is cubic so order+1
mlts = wraparound(mlts_nowrap, nwrap)
mlts[:nwrap] -= 24. # to keep mlt in increasing order
mlts[-1 * nwrap:] += 24.
y = wraparound(y, nwrap)
# y_i = interpolate.PchipInterpolator(mlts, y)
dy = np.diff(np.concatenate([y[:1], y])) # compute 1st derivative of spline
i_dy = interpolate.interp1d(mlts, dy, kind='nearest')
mlt_mask = | np.ones_like(mlts, dtype=bool) | numpy.ones_like |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Characterization script
------------------
Built for characterizing VIRUS instrument as well as LRS2 on HET
Incomplete Documentation
"""
import matplotlib
matplotlib.use('agg')
import argparse as ap
import numpy as np
import glob
import os.path as op
import os
import sys
from amplifier import Amplifier
from utils import biweight_location, biweight_midvariance
from CreateTexWriteup import CreateTex
from distutils.dir_util import mkpath
from astropy.io import fits
from operator import itemgetter
import logging
from scipy.signal import medfilt2d
import matplotlib.pyplot as plt
from fiber_utils import fit_continuum_sky, find_maxima
from utils import biweight_bin
matplotlib.rcParams['font.sans-serif'] = "Meiryo"
matplotlib.rcParams['font.family'] = "sans-serif"
# plt.style.use('seaborn-colorblind')
cmap = plt.get_cmap('Greys_r')
AMPS = ["LL", "LU", "RU", "RL"]
def write_fits(hdu, name):
try:
hdu.writeto(name, overwrite=True)
except:
hdu.writeto(name, clobber=True)
def setup_logging():
'''Setup Logging for MCSED, which allows us to track status of calls and
when errors/warnings occur.
Returns
-------
log : class
log.info() is for general print and log.error() is for raise cases
'''
log = logging.getLogger('characterize')
if not len(log.handlers):
# Set format for logger
fmt = '[%(levelname)s - %(asctime)s] %(message)s'
fmt = logging.Formatter(fmt)
# Set level of logging
level = logging.INFO
# Set handler for logging
handler = logging.StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
# Build log with name, mcsed
log = logging.getLogger('characterize')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
return log
def parse_args(argv=None):
"""Parse the command line arguments
Parameters
----------
argv : list of string
arguments to parse; if ``None``, ``sys.argv`` is used
Returns
-------
Namespace
parsed arguments
"""
description = '''Characterize a VIRUS calibration data set
This script is used to characterized a set of calibration data,
and can be run either on a dataset from the lab or a dataset from
the mountain.
(Note that the line breaks may require multiple copy and pastes)
Example calls are as follows:
python Panacea/characterize.py --rootdir '/data/characterization_lab'
--output 'Characterized' -bd 20170909 -bo 3090010 -dd 20170909
-do 3090012 -xd 20170909 -xo 3090005 -pd 20170909 -po 3090009
-fd 20170908 -fo 3090001 --specid 309 --ifuslot 999 -q
The description of each input parameter is listed below.'''
parser = ap.ArgumentParser(description=description,
formatter_class=ap.RawTextHelpFormatter)
parser.add_argument("--ifuslot", nargs='?', type=str,
help='''Single ifuslot value. [REQUIRED]
Ex: "075".''', default=None)
parser.add_argument("-us", "--use_structure",
help='''Use well defined structure.''',
action="count", default=0)
parser.add_argument("--date", nargs='?', type=str,
help='''If using "use_structure then [REQUIRED].
Ex: "20170912".''', default=None)
parser.add_argument("--specid", nargs='?', type=str,
help='''Single specid value. [REQUIRED]
Ex: "304".''', default=None)
parser.add_argument("--instr", nargs='?', type=str,
help='''Instrument to process.
Default: "camra"
Ex: "camra" for lab data,
"virus" for mountain.''', default="camra")
parser.add_argument("--output", nargs='?', type=str,
help='''Output Directory
Default: \"characterized"''',
default="characterized")
parser.add_argument("--rootdir", nargs='?', type=str,
help='''Root Directory
Default: \"/work/03946/hetdex/maverick\"''',
default="/work/03946/hetdex/maverick")
obstype = ['bia', 'drk', 'pxf', 'ptc', 'msk', 'ldl', 'arc']
obsletter = ['b', 'd', 'x', 'p', 'm', 'l', 'a']
obsname = ['Bias', 'Dark', 'Pixel Flat', 'Photon Transfer Curve',
'Masked Fiber Flat', 'LDLS Fiber Flat', 'Arc Lamp']
for t, l, n in zip(obstype, obsletter, obsname):
parser.add_argument("-%sd" % l, "--%sdir_date" % t, nargs='?',
type=str, help=''' %s Directory Date.''' % n,
default=None)
parser.add_argument("-%so" % l, "--%sdir_obsid" % t, nargs='?',
type=str, help=''' %s Directory Observation ID.'''
% n,
default=None)
parser.add_argument("-%se" % l, "--%sdir_expnum" % t, nargs='?',
type=str, help=''' %s Directory Exposure Number.'''
% n,
default=None)
parser.add_argument("-q", "--quick", help='''Quicker Version.''',
action="count", default=0)
parser.add_argument("-dcb", "--dont_check_bias",
help='''Don't make masterbias.''',
action="count", default=0)
parser.add_argument("-dcd", "--dont_check_dark",
help='''Don't make masterdark.''',
action="count", default=0)
parser.add_argument("-dcr", "--dont_check_readnoise",
help='''Don't check the readnoise.''',
action="count", default=0)
parser.add_argument("-dcg", "--dont_check_gain",
help='''Don't check the gain.''',
action="count", default=0)
parser.add_argument("-dcp", "--dont_check_pixelflat",
help='''Don't make pixelflat.''',
action="count", default=0)
parser.add_argument("-dcm", "--dont_check_mask",
help='''Don't check masked fiber flats''',
action="count", default=0)
parser.add_argument("-dcl", "--dont_check_ldls",
help='''Don't check ldls fiber flats''',
action="count", default=0)
args = parser.parse_args(args=argv)
return args
def read_in_raw(args):
log = setup_logging()
# Check that the arguments are filled
if args.ifuslot:
args.ifuslot = "%03d" % int(args.ifuslot)
else:
msg = 'No IFUSLOT was provided, exiting now.'
log.error(msg)
sys.exit(1)
labels = ['dir_date', 'dir_obsid', 'dir_expnum']
observations = []
if args.use_structure:
if args.date is None:
msg = '"use_structure" is True but "--date" was not set.'
msg += ' Exiting now.'
log.error(msg)
sys.exit(1)
args.biadir_date = args.date
args.biadir_obsid = '%03d%04d' % (int(args.specid), 10)
args.drkdir_date = args.date
args.drkdir_obsid = '%03d%04d' % (int(args.specid), 12)
args.ptcdir_date = args.date
args.ptcdir_obsid = '%03d%04d' % (int(args.specid), 9)
args.pxfdir_date = args.date
args.pxfdir_obsid = '%03d%04d' % (int(args.specid), 5)
args.mskdir_date = args.date
args.mskdir_obsid = '%03d%04d' % (int(args.specid), 3)
args.ldldir_date = args.date
args.ldldir_obsid = '%03d%04d' % (int(args.specid), 1)
args.arcdir_date = args.date
args.arcdir_obsid = '%03d%04d' % (int(args.specid), 2)
if not args.dont_check_bias:
observations.append('bia')
if not args.dont_check_dark:
observations.append('drk')
if not args.dont_check_gain:
observations.append('ptc')
if not args.dont_check_pixelflat:
observations.append('pxf')
if not args.dont_check_mask:
observations.append('msk')
if not args.dont_check_ldls:
observations.append('ldl')
observations.append('arc')
for obs in observations:
amp_list = []
for label in labels[:2]:
getattr(args, obs+label)
if getattr(args, obs+label) is None:
msg = '%s%s was not provided.' % (obs, label)
msg += ' Exiting now.'
log.error(msg)
sys.exit(1)
else:
setattr(args, obs+label,
getattr(args, obs+label).replace(" ", "").split(','))
if getattr(args, obs+labels[2]) is not None:
setattr(args, obs+labels[2],
getattr(args, obs+labels[2]).replace(" ", "").split(','))
for date in getattr(args, obs+labels[0]):
for obsid in getattr(args, obs+labels[1]):
if getattr(args, obs+labels[2]) is not None:
for expnum in getattr(args, obs+labels[2]):
folder = op.join(date,
args.instr,
"{:s}{:07d}".format(args.instr,
int(obsid)),
"exp{:02d}".format(int(expnum)),
args.instr)
filepath = op.join(args.rootdir, folder,
'*_%s*.fits' % args.ifuslot)
files = sorted(glob.glob(filepath))
if not len(files):
print('Found no files for path: %s' % filepath)
for fn in files:
amp = op.basename(fn).split('_')[1][-2:]
amp_list.append([fn, obs, amp])
else:
folder = op.join(date, args.instr,
"{:s}{:07d}".format(args.instr,
int(obsid)))
filepath = op.join(args.rootdir, folder, '*',
args.instr,
'*_%s*.fits'
% args.ifuslot)
files = sorted(glob.glob(filepath))
if not len(files):
print('Found no files for path: %s' % filepath)
for fn in files:
amp = op.basename(fn).split('_')[1][-2:]
amp_list.append([fn, obs, amp])
setattr(args, obs + '_list', amp_list)
return args
def make_plot(image_dict, outfile_name, vmin=-5, vmax=15):
a, b = image_dict[AMPS[0]].shape
fig = plt.figure(figsize=((1.*b/a)*4, 4))
for i, amp in enumerate(AMPS):
ax = plt.subplot(2, 2, i+1)
ax.imshow(image_dict[amp], vmin=vmin, vmax=vmax, cmap=cmap,
origin='lower', interpolation='none')
ax.text(b*.1, a*.7, amp, fontsize=24, color='r')
ax.set_xticks([])
ax.set_yticks([])
plt.subplots_adjust(wspace=0.025, hspace=0.025)
fig.savefig(outfile_name)
def make_ptc_plot(mn_dict, vr_dict, gain, rd, outfile_name, lowlim=100,
highlim=50000):
fig = plt.figure(figsize=(6, 6))
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True,
figsize=(6, 6))
xhl = np.log10(highlim)
xll = np.log10(lowlim)
yhl = np.log10(np.sqrt(highlim))
yll = np.log10(np.sqrt(lowlim))
cnt = 0
x = np.logspace(xll, xhl)
for i, row in enumerate(ax):
for j, cell in enumerate(row):
amp = AMPS[cnt]
cell.plot(x, np.sqrt(1./gain[amp]*x), 'r', label='Shot')
cell.plot(x, np.sqrt(rd[amp])*np.ones(x.shape), 'g',
label='Read Noise')
cell.plot(x, np.sqrt(1./gain[amp]*x+rd[amp]), 'k',
label='Shot+Read')
cell.plot(mn_dict[amp], np.sqrt(vr_dict[amp]), label='Measured')
cell.text(10**(0.8*(xhl-xll)+xll), 10**(0.3*(yhl-yll)+yll), amp,
fontsize=24, color='r')
cell.set_xlim([lowlim+0.5, highlim])
cell.set_ylim([np.sqrt(lowlim)+1.5, np.sqrt(highlim)])
cell.set_xscale('log')
cell.set_yscale('log')
if i == 0 and j == 0:
cell.legend(loc='best', fancybox=True, framealpha=0.5)
cnt += 1
fig.text(0.5, 0.025, 'Signal', ha='center', fontsize=18)
fig.text(0.025, 0.5, 'Noise', va='center', rotation='vertical',
fontsize=18)
plt.subplots_adjust(wspace=0.00, hspace=0.00)
fig.savefig(outfile_name)
def check_bias(args, amp, folder, edge=3, width=10):
# Create empty lists for the left edge jump, right edge jump, and structure
left_edge, right_edge, structure, overscan = [], [], [], []
bia_list = []
for itm in args.bia_list:
if itm[2] == amp:
bia_list.append(Amplifier(itm[0], '', name=itm[1]))
bia_list[-1].subtract_overscan()
bia_list[-1].trim_image()
# Select only the bias frames that match the input amp, e.g., "RU"
sel = [i for i, v in enumerate(bia_list) if v.amp == amp]
log = bia_list[sel[0]].log
overscan_list = [[v.overscan_value for i, v in enumerate(bia_list)
if v.amp == amp]]
overscan = biweight_location(overscan_list)
log.info('Overscan value for %s: %0.3f' % (amp, overscan))
# Loop through the bias list and measure the jump/structure
big_array = np.array([v.image for v in itemgetter(*sel)(bia_list)])
if args.quick:
func = np.median
else:
func = biweight_location
masterbias = func(big_array, axis=(0,))
a, b = masterbias.shape
hdu = fits.PrimaryHDU(np.array(masterbias, dtype='float32'))
log.info('Writing masterbias_%s.fits' % (amp))
write_fits(hdu,
op.join(folder, 'masterbias_%s_%s.fits' % (args.specid, amp)))
left_edge = func(masterbias[:, edge:edge+width])
right_edge = func(masterbias[:, (b-width-edge):(b-edge)])
structure = func(masterbias[:, edge:(b-edge)], axis=(0,))
log.info('Left edge - Overscan, Right edge - Overscan: %0.3f, %0.3f'
% (left_edge, right_edge))
return left_edge, right_edge, structure, overscan, masterbias
def check_darks(args, amp, folder, masterbias, edge=3, width=10):
# Create empty lists for the left edge jump, right edge jump, and structure
dark_counts = []
drk_list = []
for itm in args.drk_list:
if itm[2] == amp:
drk_list.append(Amplifier(itm[0], '', name=itm[1]))
drk_list[-1].subtract_overscan()
drk_list[-1].trim_image()
# Select only the dark frames that match the input amp, e.g., "RU"
sel = [i for i, v in enumerate(drk_list) if v.amp == amp]
log = drk_list[sel[0]].log
if len(sel) <= 2 or args.quick:
func = np.median
else:
func = biweight_location
log.info('Writing masterdark_%s.fits' % (amp))
if len(sel) == 1:
big_array = (v.image - masterbias)[np.newaxis, :, :]
else:
big_array = np.array([v.image - masterbias
for v in itemgetter(*sel)(drk_list)])
masterdark = func(big_array, axis=(0,))
a, b = masterdark.shape
hdu = fits.PrimaryHDU(np.array(masterdark, dtype='float32'))
write_fits(hdu,
op.join(folder, 'masterdark_%s_%s.fits' % (args.specid, amp)))
# Loop through the bias list and measure the jump/structure
for s in sel:
am = drk_list[s]
a, b = am.image.shape
dark_counts.append(func(am.image - masterbias) / am.exptime)
s = biweight_location(dark_counts)
log.info('Average Dark counts/s: %0.5f' % s)
return s, masterdark
def measure_readnoise(args, amp):
# Select only the bias frames that match the input amp, e.g., "RU"
bia_list = []
for itm in args.bia_list:
if itm[2] == amp:
bia_list.append(Amplifier(itm[0], '', name=itm[1]))
bia_list[-1].subtract_overscan()
bia_list[-1].trim_image()
sel = [i for i, v in enumerate(bia_list) if v.amp == amp]
log = bia_list[sel[0]].log
# Make array of all bias images for given amp
array_images = np.array([bia.image for bia in
itemgetter(*sel)(bia_list)])
# Measure the biweight midvariance (sigma) for a given pixel and take
# the biweight average over all sigma to reduce the noise in the first
# measurement.
if args.quick:
func1 = np.median
func2 = np.std
else:
func1 = biweight_location
func2 = biweight_midvariance
S = func1(func2(array_images, axis=(0,)))
log.info("RDNOISE(ADU) for %s: %01.3f" % (amp, S))
return S
def measure_gain(args, amp, rdnoise, flow=500, fhigh=35000, fnum=50):
ptc_list = []
for itm in args.ptc_list:
if itm[2] == amp:
ptc_list.append(Amplifier(itm[0], '', name=itm[1]))
ptc_list[-1].subtract_overscan()
ptc_list[-1].trim_image()
sel = [i for i, v in enumerate(ptc_list) if v.amp == amp]
log = ptc_list[sel[0]].log
s_sel = list(np.array(sel)[
np.array([ptc_list[i].basename for i in sel]).argsort()])
npairs = len(sel) / 2
a, b = ptc_list[sel[0]].image.shape
array_avg = np.zeros((npairs, a, b))
array_diff = np.zeros((npairs, a, b))
if args.quick:
func1 = np.median
func2 = np.std
else:
func1 = biweight_location
func2 = biweight_midvariance
for i in xrange(npairs):
F1 = ptc_list[s_sel[2*i]].image
F2 = ptc_list[s_sel[2*i+1]].image
m1 = func1(F1)
m2 = func1(F2)
array_avg[i, :, :] = (F1 + F2) / 2.
array_diff[i, :, :] = F1 * m2 / m1 - F2
bins = np.logspace(np.log10(flow), np.log10(fhigh), fnum)
gn = []
array_avg = array_avg.ravel()
array_diff = array_diff.ravel()
mn_list = []
vr_list = []
for i in xrange(len(bins)-1):
loc = np.where((array_avg > bins[i]) * (array_avg < bins[i+1]))[0]
if len(loc) > 1e3:
std = func2(array_diff[loc])
vr = (std**2) / 2.
vr_c = (std**2 - 2.*rdnoise**2) / 2.
mn = func1(array_avg[loc])
log.info("%s | Gain: %01.3f | RDNOISE (e-): %01.3f | <ADU>: %0.1f"
" | VAR: %0.1f | Pixels: %i"
% (amp, mn / vr_c, mn / vr_c * rdnoise, mn, vr, len(loc)))
gn.append(mn / vr_c)
mn_list.append(mn)
vr_list.append(vr)
sel = np.where((np.array(mn_list) > 1000.)*(np.array(mn_list) < 15000.))[0]
if len(sel) > 2:
s = func1(np.array(gn)[sel])
log.info("Average Gain measurement for %s: %0.3f"
% (amp, s))
else:
log.warning("Not enough points for gain measurement, using -99.0")
s = -99.
return s, mn_list, vr_list, rdnoise**2
def make_pixelflats(args, amp, folder):
pxf_list = []
for itm in args.pxf_list:
if itm[2] == amp:
pxf_list.append(Amplifier(itm[0], '', name=itm[1]))
pxf_list[-1].subtract_overscan()
pxf_list[-1].trim_image()
sel = [i for i, v in enumerate(pxf_list) if v.amp == amp]
log = pxf_list[sel[0]].log
a, b = pxf_list[sel[0]].image.shape
masterflat = np.zeros((len(sel), a, b))
for i, am in enumerate(itemgetter(*sel)(pxf_list)):
masterflat[i, :, :] = am.image
masterflat = np.median(masterflat, axis=(0,))
smooth = medfilt2d(masterflat, (151, 1))
masterflat = np.where(masterflat < 1e-8, 0.0, smooth / masterflat)
smooth = medfilt2d(masterflat, (1, 151))
pixflat = np.where(masterflat < 1e-8, 0.0, smooth / masterflat)
hdu = fits.PrimaryHDU(np.array(pixflat, dtype='float32'))
log.info('Writing pixelflat_%s.fits' % amp)
write_fits(hdu, op.join(folder, 'pixelflat_%s.fits' % amp))
return masterflat, pixflat
def power_law(x, c1, c2=.5, c3=.15, c4=1., sig=2.5):
return c1 / (c2 + c3 * np.power(np.abs(x / sig), c4))
def make_master_image(args, amp_list, masterbias, masterdark, use_mean=False):
''' Make a master image from a selection in a list '''
if len(amp_list) <= 2 or args.quick:
if use_mean:
func = np.mean
else:
func = np.median
else:
func = biweight_location
big_array = np.array([v.image - masterbias - masterdark
for v in amp_list])
master = func(big_array, axis=(0,))
return master
def get_average_spec(fibers, nbins=1000):
masterwave = []
masterspec = []
for fib, fiber in enumerate(fibers):
masterwave.append(fiber.wavelength)
masterspec.append(fiber.spectrum)
masterwave = np.hstack(masterwave)
masterspec = np.hstack(masterspec)
nwave = np.linspace(masterwave.min(), masterwave.max(), nbins)
return nwave, biweight_bin(nwave, masterwave, masterspec)
def check_ldls(args, amp, masterbias, masterdark, outname, folder, gain):
''' Works on contrast/fibermodel/wavelength/trace '''
# Select only the bias frames that match the input amp, e.g., "RU"
ldl_list = []
for itm in args.ldl_list:
if itm[2] == amp:
ldl_list.append(Amplifier(itm[0], '', name=itm[1]))
ldl_list[-1].subtract_overscan()
ldl_list[-1].trim_image()
sel = [i for i, v in enumerate(ldl_list) if v.amp == amp]
log = ldl_list[sel[0]].log
log.info('Writing masterflat_%s.fits' % (amp))
masterflat = make_master_image(args, ldl_list, masterbias, masterdark)
A = ldl_list[sel[0]]
A.image = masterflat
A.orient_image()
hdu = fits.PrimaryHDU(np.array(A.image, dtype='float32'))
write_fits(hdu, op.join(folder, 'masterflat_%s_%s.fits'
% (args.specid, amp)))
A.image_prepped = True
A.use_trace_ref = False
A.refit = True
A.use_pixelflat = False
A.gain = gain
A.multiply_gain()
A.check_fibermodel = True
A.check_trace = False
A.path = folder
A.get_fibermodel()
os.rename(op.join(folder, 'fibmodel_%s.png' % A.basename),
op.join(folder, 'contrast_%s.png' % amp))
A.fibers = get_wavelength_from_arc(args, amp, masterbias, masterdark,
outname, folder, A.fibers)
A.fiberextract()
wave, avgspec = get_average_spec(A.fibers)
waven = np.vstack([fiber.wavelength for fiber in A.fibers])
specn = np.vstack([fiber.spectrum for fiber in A.fibers])
waver, specr = rectify(waven, specn)
hdu = fits.PrimaryHDU(np.array(specr, dtype='float32'))
hdu.header['CRVAL1'] = waver[0]
hdu.header['CDELT1'] = waver[1] - wave[0]
write_fits(hdu, op.join(folder, 'Femasterflat_%s_%s.fits'
% (args.specid, amp)))
colors = plt.get_cmap('RdBu')(np.linspace(0., 1., len(A.fibers)))
fig = plt.figure(figsize=(12, 8))
for i, fiber in enumerate(A.fibers):
plt.plot(fiber.wavelength, fiber.spectrum, color=colors[i],
alpha=0.3)
plt.plot(wave, avgspec, color='magenta', lw=4, label='Average')
plt.xlim([3480, 5530])
plt.ylim([0., 300000.])
plt.xlabel('Wavelength')
plt.ylabel('e- per exposure')
plt.legend()
plt.savefig(op.join(folder, 'ldls_spectra_%s.png' % amp))
plt.close(fig)
return masterflat
def get_wavelength_from_arc(args, amp, masterbias, masterdark, outname, folder,
fibers):
# Select only the bias frames that match the input amp, e.g., "RU"
arc_list = []
for itm in args.arc_list:
if itm[2] == amp:
arc_list.append(Amplifier(itm[0], '', name=itm[1]))
arc_list[-1].subtract_overscan()
arc_list[-1].trim_image()
sel = [i for i, v in enumerate(arc_list) if v.amp == amp]
log = arc_list[sel[0]].log
log.info('Writing masterarc_%s.fits' % (amp))
masterflat = make_master_image(args, arc_list, masterbias, masterdark,
use_mean=True)
A = arc_list[sel[0]]
A.image = masterflat
A.orient_image()
A.image_prepped = True
hdu = fits.PrimaryHDU(np.array(A.image, dtype='float32'))
write_fits(hdu, op.join(folder, 'masterarc_%s_%s.fits'
% (args.specid, amp)))
A.fibers = list(fibers)
A.fiberextract()
wave_list = [[3652.1026, 78], [4046.5539, 277], [4077.8298, 293],
[4358.3253, 435], [4678.149, 596], [4799.912, 658],
[5085.822, 808], [5460.7366, 1005]]
if len(A.fibers[0].spectrum) > 1032:
thresh = 1e4
else:
thresh = 1e2
for fiber in A.fibers:
y = fiber.spectrum
x = np.arange(len(y))
d1 = np.diff(y)
selu = np.where(d1 > thresh)[0]
sell = np.where(d1 < -thresh)[0]
ind = []
for i in selu:
cont = True
for j in ind:
if np.abs(j - i) < 5:
cont = False
if cont:
u = selu[np.where(np.abs(selu - i) < 10)[0]]
l = sell[np.where(np.abs(sell - i) < 10)[0]]
v = (u.sum() + l.sum()) / (len(u) + len(l))
ind.append(v)
fac = len(y) / 1032
pr = np.array(ind) / fac
d = []
off = 0.0
for wvi in wave_list:
loc = np.argmin(np.abs(pr - wvi[1]))
if np.abs(pr[loc] - wvi[1] - off) < 15*fac:
off = pr[loc] - wvi[1]
d.append([pr[loc]*fac, wvi[0]])
d = np.array(d)
p0 = np.polyfit(d[:, 0] / (len(y)*1.), d[:, 1], 3)
fiber.wavelength = np.polyval(p0, x / (len(y)*1.))
return A.fibers
def check_masked_fibers(args, amp, masterbias, masterdark, outname, folder):
# Select only the bias frames that match the input amp, e.g., "RU"
msk_list = []
for itm in args.msk_list:
if itm[2] == amp:
msk_list.append(Amplifier(itm[0], '', name=itm[1]))
msk_list[-1].subtract_overscan()
msk_list[-1].trim_image()
sel = [i for i, v in enumerate(msk_list) if v.amp == amp]
log = msk_list[sel[0]].log
log.info('Writing mastermaskflat_%s.fits' % (amp))
mastermaskflat = make_master_image(args, msk_list, masterbias, masterdark)
A = msk_list[sel[0]]
A.image = mastermaskflat
A.orient_image()
hdu = fits.PrimaryHDU(np.array(A.image, dtype='float32'))
write_fits(hdu, op.join(folder, 'mastermaskedflat_%s_%s.fits'
% (args.specid, amp)))
A.image_prepped = True
A.use_trace_ref = False
A.refit = True
A.use_pixelflat = False
A.trace_y_window = 50.
A.trace_repeat_length = 40
A.gain = 1.
A.check_trace = False
A.get_trace()
n, d = A.image.shape
col = np.arange(d)
nwave = 3
fsize = 15
radius = 5.
fibs = [2, 5]
cols = np.arange(d)
f, ax = plt.subplots(len(fibs), nwave, sharey=True, sharex=True,
figsize=(nwave*4, len(fibs)*4))
stot = 0
for fiber in A.fibers:
llim = np.array(np.max([ | np.zeros((d,)) | numpy.zeros |
import numpy as np
import random
from collections import namedtuple, deque
from models import DQN, DuelingDQN
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-2 # for soft update of target parameters
LR = 4.85e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class DQNAgent():
"""Interacts with and learns from the environment."""
def __init__(self, name, state_size, action_size, use_double_dqn=False, use_dueling=False, seed=0, lr_decay=0.9999, use_prioritized_replay=False):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.name = name
self.state_size = state_size
self.action_size = action_size
self.use_double_dqn = use_double_dqn
self.use_dueling = use_dueling
self.seed = random.seed(seed)
self.use_prioritized_replay = use_prioritized_replay
# Q-Network
if use_dueling:
self.qnetwork_local = DuelingDQN(state_size, action_size, seed).to(device)
self.qnetwork_target = DuelingDQN(state_size, action_size, seed).to(device)
else:
self.qnetwork_local = DQN(state_size, action_size, seed).to(device)
self.qnetwork_target = DQN(state_size, action_size, seed).to(device)
self.qnetwork_target.eval()
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
self.lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, lr_decay)
# Replay memory
if self.use_prioritized_replay:
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, seed, alpha=0.2, beta=0.8, beta_scheduler=1.0)
else:
self.memory = ReplayBuffer(BUFFER_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample(BATCH_SIZE)
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
# Epsilon-greedy action selection
if random.random() > eps:
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
if self.use_prioritized_replay:
states, actions, rewards, next_states, dones, indices, weights = experiences
else:
states, actions, rewards, next_states, dones = experiences
with torch.no_grad():
# Get max predicted Q values (for next states) from target model
if self.use_double_dqn:
best_local_actions = self.qnetwork_local(states).max(1)[1].unsqueeze(1)
Q_targets_next = self.qnetwork_target(next_states).gather(1, best_local_actions).max(1)[0].unsqueeze(1)
else:
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
if self.use_prioritized_replay:
Q_targets.sub_(Q_expected)
Q_targets.squeeze_()
Q_targets.pow_(2)
with torch.no_grad():
td_error = Q_targets.detach()
#td_error.pow_(0.5)
td_error.mul_(weights)
self.memory.update_priorities(indices, td_error)
Q_targets.mul_(weights)
loss = Q_targets.mean()
else:
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
seed (int): random seed
"""
self.memory = deque(maxlen=buffer_size)
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self, batch_size):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy( | np.vstack([e.done for e in experiences if e is not None]) | numpy.vstack |
from __future__ import print_function
import numpy as np
from scipy.ndimage import rotate
import scipy.io
import math
import tensorflow as tf
import matplotlib.pylab as plt
import numpy as np
import random
import scipy
import cv2
from skimage.transform import rescale, resize, downscale_local_mean
from scipy import ndimage
import os
from numpy import *
import imageio
refPt = []
sequence_length=100
def gaussian(px, py, desv=30./2.5):
x=np.linspace(1.0, 256.0, num=256)
y = np.linspace(1.0, 212.0, num=212)
X, Y = np.meshgrid(x, y)
px = np.float(px);
py = np.float(py);
z = (exp(-(np.square((X - px)/desv)/ 2) - (np.square((Y - py)/desv)/ 2)))
z = z * 255
z=np.expand_dims(z,axis=2)
z = | np.uint8(z) | numpy.uint8 |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.convert.to_list
def test_keep_None_in_place_test():
v2_array = ak._v2.highlevel.Array([[3, 2, 1], [], None, [4, 5]]).layout
assert to_list(v2_array.argsort(axis=1)) == [
[2, 1, 0],
[],
None,
[0, 1],
]
assert to_list(v2_array.sort(axis=1)) == [
[1, 2, 3],
[],
None,
[4, 5],
]
assert to_list(v2_array.sort(axis=1)) == [[1, 2, 3], [], None, [4, 5]]
assert v2_array.typetracer.sort(axis=1).form == v2_array.argsort(axis=1).form
assert to_list(v2_array.argsort(axis=1)) == [[2, 1, 0], [], None, [0, 1]]
def test_keep_None_in_place_test_2():
v2_array = ak._v2.highlevel.Array([[3, 2, 1], [], None, [4, 5]]).layout
assert v2_array.typetracer.argsort(axis=1).form == v2_array.argsort(axis=1).form
@pytest.mark.skip(reason="FIXME: v2 highlevel argsort has not been implemented yet")
def test_empty_slice():
electron = ak._v2.highlevel.Array(
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index64(np.array([0, 0, 1], np.int64)),
ak._v2.contents.RecordArray(
[ak._v2.contents.NumpyArray(np.array([1.0]))],
["pt"],
parameters={"__record__": "Electron"},
),
)
)
v2_electron = electron.layout[[[], []]]
assert to_list(v2_electron) == [[], []]
id = ak._v2.operations.structure.argsort(electron, axis=1)
assert to_list(v2_electron[id]) == [[], []]
assert v2_electron.typetracer[id].form == v2_electron[id].form
def test_masked():
v2_array = ak._v2.highlevel.Array([[0, 1, 2, 3], [3, 3, 3, 2, 1]])
is_valid = v2_array != 3
v2_array_mask = ak._v2.highlevel.Array(
ak._v2.contents.ListOffsetArray(
v2_array.layout.offsets,
ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(is_valid.layout.content.data),
v2_array.layout.content,
valid_when=True,
),
)
)
assert to_list(v2_array_mask) == [
[0, 1, 2, None],
[None, None, None, 2, 1],
]
assert to_list(v2_array_mask.layout.sort(axis=1)) == [
[0, 1, 2, None],
[1, 2, None, None, None],
]
assert (
v2_array_mask.layout.typetracer.sort(axis=1).form
== v2_array_mask.layout.sort(axis=1).form
)
def test_v1_argsort_and_v2_sort():
v2_array = ak._v2.highlevel.Array([1, 2, None, 3, 0, None]).layout
assert to_list(v2_array.sort()) == [
0,
1,
2,
3,
None,
None,
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_v1_argsort_2d_and_v2_sort():
v2_array = ak._v2.highlevel.Array(
[[1, 2, None, 3, 0, None], [1, 2, None, 3, 0, None]]
).layout
assert to_list(v2_array.sort()) == [
[
0,
1,
2,
3,
None,
None,
],
[
0,
1,
2,
3,
None,
None,
],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_nan():
v2_array = ak._v2.highlevel.Array([1, 2, np.nan, 3, 0, np.nan]).layout
assert str(to_list(v2_array.sort())) == "[nan, nan, 0.0, 1.0, 2.0, 3.0]"
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_sort_strings():
v2_array = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight"]
).layout
assert to_list(v2_array) == [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
]
assert to_list(v2_array.sort()) == [
"eight",
"five",
"four",
"one",
"seven",
"six",
"three",
"two",
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_sort_nested_strings():
v2_array = ak._v2.highlevel.Array(
[["one", "two"], ["three", "four", "five"], ["six"], ["seven", "eight"]]
).layout
assert to_list(v2_array) == [
["one", "two"],
["three", "four", "five"],
["six"],
["seven", "eight"],
]
assert to_list(v2_array.sort()) == [
["one", "two"],
["five", "four", "three"],
["six"],
["eight", "seven"],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_sort_invalid_axis():
v2_array = ak._v2.operations.convert.from_numpy(
np.array([[3.3, 2.2], [1.1, 5.5], [4.4, 6.6]]),
regulararray=True,
highlevel=False,
)
with pytest.raises(ValueError) as err:
v2_array.sort(axis=3)
assert str(err.value).startswith(
"axis=3 exceeds the depth of the nested list structure (which is 2)"
)
def test_numpy_array_iscontiguous():
matrix = np.arange(64).reshape(8, -1)
v2_layout = ak._v2.contents.NumpyArray(matrix[:, 0])
assert not v2_layout.is_contiguous
assert to_list(v2_layout) == [0, 8, 16, 24, 32, 40, 48, 56]
matrix2 = np.arange(64).reshape(8, -1)
v2_array = ak._v2.contents.NumpyArray(matrix2[:, 0])
assert not v2_array.is_contiguous
assert to_list(v2_array.sort()) == [0, 8, 16, 24, 32, 40, 48, 56]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_numpyarray_sort():
v2_array = ak._v2.operations.convert.from_numpy(
np.array([3.3, 2.2, 1.1, 5.5, 4.4]), regulararray=True, highlevel=False
)
assert to_list(np.sort( | np.asarray(v2_array) | numpy.asarray |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.