prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from reinforcement_learning.market.random_agent import RandomAgent
import sys
sys.path.insert(0, '../../../etf_data')
from etf_data_loader import load_all_data_from_file2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import seaborn as sns
def gen_random_date(year_low, year_high):
y = np.random.randint(year_low, year_high)
m = | np.random.randint(1, 12) | numpy.random.randint |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from numpy.testing import assert_allclose
import numpy as np
import pytest
from thermo import eos
from thermo.eos import *
from thermo.utils import allclose_variable
from scipy.misc import derivative
from scipy.constants import R
from math import log, exp, sqrt
@pytest.mark.slow
@pytest.mark.sympy
def test_PR_with_sympy():
# Test with hexane
from sympy import Rational, symbols, sqrt, solve, diff, integrate, N
P, T, V = symbols('P, T, V')
Tc = Rational('507.6')
Pc = 3025000
omega = Rational('0.2975')
X = (-1 + (6*sqrt(2)+8)**Rational(1,3) - (6*sqrt(2)-8)**Rational(1,3))/3
c1 = (8*(5*X+1)/(49-37*X)) # 0.45724
c2 = (X/(X+3)) # 0.07780
R = Rational('8.3144598')
a = c1*R**2*Tc**2/Pc
b = c2*R*Tc/Pc
kappa = Rational('0.37464')+ Rational('1.54226')*omega - Rational('0.26992')*omega**2
a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2
PR_formula = R*T/(V-b) - a_alpha/(V*(V+b)+b*(V-b)) - P
# First test - volume, liquid
T_l, P_l = 299, 1000000
PR_obj_l = PR(T=T_l, P=P_l, Tc=507.6, Pc=3025000, omega=0.2975)
solns = solve(PR_formula.subs({T: T_l, P:P_l}))
solns = [N(i) for i in solns]
V_l_sympy = float([i for i in solns if i.is_real][0])
V_l_sympy = 0.00013022208100139964
assert_allclose(PR_obj_l.V_l, V_l_sympy)
def numeric_sub_l(expr):
return float(expr.subs({T: T_l, P:P_l, V:PR_obj_l.V_l}))
# First derivatives
dP_dT = diff(PR_formula, T)
assert_allclose(numeric_sub_l(dP_dT), PR_obj_l.dP_dT_l)
dP_dV = diff(PR_formula, V)
assert_allclose(numeric_sub_l(dP_dV), PR_obj_l.dP_dV_l)
dV_dT = -diff(PR_formula, T)/diff(PR_formula, V)
assert_allclose(numeric_sub_l(dV_dT), PR_obj_l.dV_dT_l)
dV_dP = -dV_dT/diff(PR_formula, T)
assert_allclose(numeric_sub_l(dV_dP), PR_obj_l.dV_dP_l)
# Checks out with solve as well
dT_dV = 1/dV_dT
assert_allclose(numeric_sub_l(dT_dV), PR_obj_l.dT_dV_l)
dT_dP = 1/dP_dT
assert_allclose(numeric_sub_l(dT_dP), PR_obj_l.dT_dP_l)
# Second derivatives of two variables, easy ones
d2P_dTdV = diff(dP_dT, V)
assert_allclose(numeric_sub_l(d2P_dTdV), PR_obj_l.d2P_dTdV_l)
d2P_dTdV = diff(dP_dV, T)
assert_allclose(numeric_sub_l(d2P_dTdV), PR_obj_l.d2P_dTdV_l)
# Second derivatives of one variable, easy ones
d2P_dT2 = diff(dP_dT, T)
assert_allclose(numeric_sub_l(d2P_dT2), PR_obj_l.d2P_dT2_l)
d2P_dT2_maple = -506.20125231401374
assert_allclose(d2P_dT2_maple, PR_obj_l.d2P_dT2_l)
d2P_dV2 = diff(dP_dV, V)
assert_allclose(numeric_sub_l(d2P_dV2), PR_obj_l.d2P_dV2_l)
d2P_dV2_maple = 4.482165856520912834998e+17
assert_allclose(d2P_dV2_maple, PR_obj_l.d2P_dV2_l)
# Second derivatives of one variable, Hard ones - require a complicated identity
d2V_dT2 = (-(d2P_dT2*dP_dV - dP_dT*d2P_dTdV)*dP_dV**-2
+(d2P_dTdV*dP_dV - dP_dT*d2P_dV2)*dP_dV**-3*dP_dT)
assert_allclose(numeric_sub_l(d2V_dT2), PR_obj_l.d2V_dT2_l)
d2V_dT2_maple = 1.16885136854333385E-9
assert_allclose(d2V_dT2_maple, PR_obj_l.d2V_dT2_l)
d2V_dP2 = -d2P_dV2/dP_dV**3
assert_allclose(numeric_sub_l(d2V_dP2), PR_obj_l.d2V_dP2_l)
d2V_dP2_maple = 9.10336131405833680E-21
assert_allclose(d2V_dP2_maple, PR_obj_l.d2V_dP2_l)
d2T_dP2 = -d2P_dT2*dP_dT**-3
assert_allclose(numeric_sub_l(d2T_dP2), PR_obj_l.d2T_dP2_l)
d2T_dP2_maple = 2.564684443971313e-15
assert_allclose(d2T_dP2_maple, PR_obj_l.d2T_dP2_l)
d2T_dV2 = (-(d2P_dV2*dP_dT - dP_dV*d2P_dTdV)*dP_dT**-2
+(d2P_dTdV*dP_dT - dP_dV*d2P_dT2)*dP_dT**-3*dP_dV)
assert_allclose(numeric_sub_l(d2T_dV2), PR_obj_l.d2T_dV2_l)
d2T_dV2_maple = -291578941281.8895
assert_allclose(d2T_dV2_maple, PR_obj_l.d2T_dV2_l)
# Second derivatives of two variable, Hard ones - require a complicated identity
d2T_dPdV = -(d2P_dTdV*dP_dT - dP_dV*d2P_dT2)*dP_dT**-3
assert_allclose(numeric_sub_l(d2T_dPdV), PR_obj_l.d2T_dPdV_l)
d2T_dPdV_maple = 0.0699417049626260466429
assert_allclose(d2T_dPdV_maple, PR_obj_l.d2T_dPdV_l)
d2V_dPdT = -(d2P_dTdV*dP_dV - dP_dT*d2P_dV2)*dP_dV**-3
assert_allclose(numeric_sub_l(d2V_dPdT), PR_obj_l.d2V_dPdT_l)
d2V_dPdT_maple = -3.772507759880541967e-15
assert_allclose(d2V_dPdT_maple, PR_obj_l.d2V_dPdT_l)
# Cv integral, real slow
# The Cv integral is possible with a more general form, but not here
# The S and H integrals don't work in Sympy at present
def test_PR_quick():
# Test solution for molar volumes
eos = PR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
Vs_fast = eos.volume_solutions(299, 1E6, eos.b, eos.delta, eos.epsilon, eos.a_alpha)
Vs_slow = eos.volume_solutions(299, 1E6, eos.b, eos.delta, eos.epsilon, eos.a_alpha, quick=False)
Vs_expected = [(0.00013022208100139953-0j), (0.001123630932618011+0.0012926962852843173j), (0.001123630932618011-0.0012926962852843173j)]
assert_allclose(Vs_fast, Vs_expected)
assert_allclose(Vs_slow, Vs_expected)
# Test of a_alphas
a_alphas = [3.801259426590328, -0.006647926028616357, 1.6930127618563258e-05]
a_alphas_fast = eos.a_alpha_and_derivatives(299, quick=True)
assert_allclose(a_alphas, a_alphas_fast)
a_alphas_slow = eos.a_alpha_and_derivatives(299, quick=False)
| assert_allclose(a_alphas, a_alphas_slow) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
"""
.. _tut-ieeg-localize:
========================================
Locating Intracranial Electrode Contacts
========================================
Intracranial electrophysiology recording contacts are generally localized
based on a post-implantation computed tomography (CT) image and a
pre-implantation magnetic resonance (MR) image. The CT image has greater
intensity than the background at each of the electrode contacts and
for the skull. Using the skull, the CT can be aligned to MR-space.
Contact locations in MR-space are the goal because this is the image from which
brain structures can be determined using the
:ref:`tut-freesurfer-reconstruction`. Contact locations in MR-space can also
be translated to a template space such as ``fsaverage`` for group comparisons.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nibabel as nib
from dipy.align import (affine_registration, center_of_mass, translation,
rigid, affine, resample)
from dipy.align.reslice import reslice
import mne
from mne.datasets import fetch_fsaverage
print(__doc__)
np.set_printoptions(suppress=True) # suppress scientific notation
# paths to mne datasets - sample sEEG and FreeSurfer's fsaverage subject
# which is in MNI space
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subjects_dir = op.join(sample_path, 'subjects')
# use mne-python's fsaverage data
fetch_fsaverage(subjects_dir=subjects_dir, verbose=True) # downloads if needed
###############################################################################
# Freesurfer recon-all
# ====================
#
# The first step is the most time consuming; the freesurfer reconstruction.
# This process segments out the brain from the rest of the MR image and
# determines which voxels correspond to each brain area based on a template
# deformation. This process takes approximately 8 hours so plan accordingly.
#
# .. code-block:: bash
#
# $ export SUBJECT=sample_seeg
# $ export SUBJECTS_DIR=$MY_DATA_DIRECTORY
# $ recon-all -subjid $SUBJECT -sd $SUBJECTS_DIR \
# -i $MISC_PATH/seeg/sample_seeg_T1.mgz -all -deface
#
# .. note::
# You may need to include an additional ``-cw256`` flag which can be added
# to the end of the recon-all command if your MR scan is not
# ``256 x 256 x 256`` voxels.
#
# .. note::
# Using the ``--deface`` flag will create a defaced, anonymized T1 image
# located at ``$MY_DATA_DIRECTORY/$SUBJECT/mri/orig_defaced.mgz``,
# which is helpful for when you publish your data. You can also use
# :func:`mne_bids.write_anat` and pass ``deface=True``.
###############################################################################
# Aligning the CT to the MR
# =========================
#
# Let's load our T1 and CT images and visualize them. You can hardly
# see the CT, it's so misaligned that it is mostly out of view but there is a
# part of the skull upsidedown and way off center in the middle plot.
# Clearly, we need to align the CT to the T1 image.
def plot_overlay(image, compare, title, thresh=None):
"""Define a helper function for comparing plots."""
image = nib.orientations.apply_orientation(
image.get_fdata().copy(), nib.orientations.axcodes2ornt(
nib.orientations.aff2axcodes(image.affine)))
compare = nib.orientations.apply_orientation(
compare.get_fdata().copy(), nib.orientations.axcodes2ornt(
nib.orientations.aff2axcodes(compare.affine)))
if thresh is not None:
compare[compare < np.quantile(compare, thresh)] = np.nan
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
fig.suptitle(title)
for i, ax in enumerate(axes):
ax.imshow(np.take(image, [image.shape[i] // 2], axis=i).squeeze().T,
cmap='gray')
ax.imshow(np.take(compare, [compare.shape[i] // 2],
axis=i).squeeze().T, cmap='gist_heat', alpha=0.5)
ax.invert_yaxis()
ax.axis('off')
fig.tight_layout()
T1 = nib.load(op.join(misc_path, 'seeg', 'sample_seeg_T1.mgz'))
CT_orig = nib.load(op.join(misc_path, 'seeg', 'sample_seeg_CT.mgz'))
# resample to T1 shape
CT_resampled = resample(moving=CT_orig.get_fdata(),
static=T1.get_fdata(),
moving_affine=CT_orig.affine,
static_affine=T1.affine,
between_affine=None)
plot_overlay(T1, CT_resampled, 'Unaligned CT Overlaid on T1', thresh=0.95)
###############################################################################
# Now we need to align our CT image to the T1 image.
#
# .. code-block:: python
#
# # normalize intensities
# mri_to = T1.get_fdata().copy()
# mri_to /= mri_to.max()
# ct_from = CT_orig.get_fdata().copy()
# ct_from /= ct_from.max()
#
# # downsample for speed
# zooms = (5, 5, 5)
# mri_to, affine_to = reslice(
# mri_to, affine=T1.affine,
# zooms=T1.header.get_zooms()[:3], new_zooms=zooms)
# ct_from, affine_from = reslice(
# ct_from, affine=CT_orig.affine,
# zooms=CT_orig.header.get_zooms()[:3], new_zooms=zooms)
#
# # first optimize the translation on the zoomed images using
# # ``factors`` which looks at the image at different scales
# reg_affine = affine_registration(
# moving=ct_from,
# static=mri_to,
# moving_affine=affine_from,
# static_affine=affine_to,
# nbins=32,
# metric='MI',
# pipeline=[center_of_mass, translation],
# level_iters=[100, 100, 10],
# sigmas=[3.0, 1.0, 0.0],
# factors=[4, 2, 1])[1]
#
# CT_translated = resample(moving=CT_orig.get_fdata(),
# static=T1.get_fdata(),
# moving_affine=CT_orig.affine,
# static_affine=T1.affine,
# between_affine=reg_affine)
#
# # Now, fine-tune the registration
# reg_affine = affine_registration(
# moving=CT_translated.get_fdata(),
# static=T1.get_fdata(),
# moving_affine=CT_translated.affine,
# static_affine=T1.affine,
# nbins=32,
# metric='MI',
# pipeline=[rigid],
# level_iters=[100, 100, 10],
# sigmas=[3.0, 1.0, 0.0],
# factors=[4, 2, 1])[1]
#
# CT_aligned = resample(moving=CT_translated.get_fdata(),
# static=T1.get_fdata(),
# moving_affine=CT_translated.affine,
# static_affine=T1.affine,
# between_affine=reg_affine)
###############################################################################
# The previous section takes several minutes to execute so the results are
# presented here pre-computed for convenience.
alignment_affine = np.array([
[0.99235816, -0.03412124, 0.11857915, -133.22262329],
[0.04601133, 0.99402046, -0.09902669, -97.64542095],
[-0.11449119, 0.10372593, 0.98799428, -84.39915646],
[0., 0., 0., 1.]])
CT_aligned = resample(moving=CT_orig.get_fdata(),
static=T1.get_fdata(),
moving_affine=CT_orig.affine,
static_affine=T1.affine,
between_affine=alignment_affine)
plot_overlay(T1, CT_aligned, 'Aligned CT Overlaid on T1', thresh=0.95)
###############################################################################
# We can now see how the CT image looks properly aligned to the T1 image.
#
# .. note::
# The hyperintense skull is actually aligned to the hypointensity between
# the brain and the scalp. The brighter area surrounding the skull in the
# MR is actually subcutaneous fat.
# make low intensity parts of the CT transparent for easier visualization
CT_data = CT_aligned.get_fdata().copy()
CT_data[CT_data < | np.quantile(CT_data, 0.95) | numpy.quantile |
# -*- coding: utf-8 -*-
"""MCIR for PET with primal-dual algorithms.
Usage:
PET_MCIR_PD [--help | options]
Options:
-T <pattern>, --trans=<pattern> transformation pattern, * or % wildcard
(e.g., tm_ms*.txt). Enclose in quotations.
-t <str>, --trans_type=<str> transformation type (tm, disp, def)
[default: tm]
-S <pattern>, --sino=<pattern> sinogram pattern, * or % wildcard
(e.g., sino_ms*.hs). Enclose in quotations.
-a <pattern>, --attn=<pattern> attenuation pattern, * or % wildcard
(e.g., attn_ms*.hv). Enclose in quotations.
-R <pattern>, --rand=<pattern> randoms pattern, * or % wildcard
(e.g., rand_ms*.hs). Enclose in quotations.
-n <norm>, --norm=<norm> ECAT8 bin normalization file
-e <int>, --epoch=<int> num epochs [default: 10]
-r <string>, --reg=<string> regularisation ("None","FGP_TV","explicit_TV", ...)
[default: None]
-o <outp>, --outp=<outp> output file prefix [default: recon]
--outpath=<string> output folder path [default: './']
--param_path=<string> param folder path [default: './']
--nxny=<nxny> image x and y dimension [default: 127]
--dxdy=<dxdy> image x and y spacing
(default: determined by scanner)
-I <str>, --initial=<str> Initial estimate
--visualisations show visualisations
--nifti save output as nifti
--gpu use GPU projector
-v <int>, --verbosity=<int> STIR verbosity [default: 0]
-s <int>, --save_interval=<int> save every x iterations [default: 10]
--descriptive_fname option to have descriptive filenames
--update_obj_fn_interval=<int> frequency to update objective function
[default: 1]
--alpha=<val> regularisation strength (if used)
[default: 0.5]
--reg_iters=<val> Number of iterations for the regularisation
subproblem [default: 100]
--precond Use preconditioning
--numSegsToCombine=<val> Rebin all sinograms, with a given number of
segments to combine. Increases speed.
--numViewsToCombine=<val> Rebin all sinograms, with a given number of
views to combine. Increases speed.
--normaliseDataAndBlock Normalise raw data and block operator by
multiplying by 1./normK.
--algorithm=<string> Which algorithm to run [default: spdhg]
--numThreads=<int> Number of threads to use
--numSubsets=<int> Number of physical subsets to use [default: 1]
--gamma=<val> parameter controlling primal-dual trade-off (>1 promotes dual)
[default: 1.]
--PowerMethod_iters=<val> number of iterations for the computation of operator norms
with the power method [default: 10]
--templateAcqData Use template acd data
--StorageSchemeMemory Use memory storage scheme
"""
# SyneRBI Synergistic Image Reconstruction Framework (SIRF)
# Copyright 2020 University College London.
#
# This is software developed for the Collaborative Computational
# Project in Synergistic Reconstruction for Biomedical Imaging
# (formerly CCP PETMR)
# (http://www.ccpsynerbi.ac.uk/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from os import path
import os
from glob import glob
from docopt import docopt
import matplotlib.pyplot as plt
import numpy as np
from sirf.Utilities import error, show_2D_array, examples_data_path
import sirf.Reg as reg
import sirf.STIR as pet
from cil.framework import BlockDataContainer, ImageGeometry, BlockGeometry
from cil.optimisation.algorithms import PDHG, SPDHG
from cil.optimisation.functions import \
KullbackLeibler, BlockFunction, IndicatorBox, MixedL21Norm, ScaledFunction
from cil.optimisation.operators import \
CompositionOperator, BlockOperator, LinearOperator, GradientOperator, ScaledOperator
from cil.plugins.ccpi_regularisation.functions import FGP_TV
from ccpi.filters import regularisers
from cil.utilities.multiprocessing import NUM_THREADS
__version__ = '0.1.0'
args = docopt(__doc__, version=__version__)
###########################################################################
# Global set-up
###########################################################################
# storage scheme
if args['--StorageSchemeMemory']:
pet.AcquisitionData.set_storage_scheme('memory')
else:
pet.AcquisitionData.set_storage_scheme('default')
# Verbosity
pet.set_verbosity(int(args['--verbosity']))
if int(args['--verbosity']) == 0:
msg_red = pet.MessageRedirector(None, None, None)
# Number of threads
numThreads = args['--numThreads'] if args['--numThreads'] else NUM_THREADS
pet.set_max_omp_threads(numThreads)
if args['--templateAcqData']:
template_acq_data = pet.AcquisitionData('Siemens_mMR', span=11, max_ring_diff=15, view_mash_factor=1)
def main():
"""Run main function."""
use_gpu = True if args['--gpu'] else False
###########################################################################
# Parse input files
###########################################################################
[num_ms, trans_files, sino_files, attn_files, rand_files] = \
get_filenames(args['--trans'],args['--sino'],args['--attn'],args['--rand'])
###########################################################################
# Read input
###########################################################################
[trans, sinos_raw, attns, rands_raw] = \
read_files(trans_files, sino_files, attn_files, rand_files, args['--trans_type'])
sinos = pre_process_sinos(sinos_raw, num_ms)
rands = pre_process_sinos(rands_raw, num_ms)
###########################################################################
# Initialise recon image
###########################################################################
image = get_initial_estimate(sinos,use_gpu)
###########################################################################
# Set up resamplers
###########################################################################
if trans is None:
resamplers = None
else:
resamplers = [get_resampler(image, trans=tran) for tran in trans]
###########################################################################
# Resample attenuation images (if necessary)
###########################################################################
resampled_attns = resample_attn_images(num_ms, attns, trans, use_gpu, image)
print ("resampled_attns", len (resampled_attns))
###########################################################################
# Set up acquisition models (one per motion state)
###########################################################################
acq_models, masks = set_up_acq_models(
num_ms, sinos, rands, resampled_attns, image, use_gpu)
###########################################################################
# Set up reconstructor
###########################################################################
if args['--reg']=='explicit_TV':
[F, G, K, normK, tau, sigma, use_axpby, prob, gamma] = set_up_explicit_reconstructor(
use_gpu, num_ms, image, acq_models, resamplers, masks, sinos, rands)
else:
[F, G, K, normK, tau, sigma, use_axpby, prob, gamma] = set_up_reconstructor(
use_gpu, num_ms, acq_models, resamplers, masks, sinos, rands)
###########################################################################
# Get output filename
###########################################################################
outp_file = get_output_filename(
attn_files, normK, sigma, tau, sino_files, resamplers, use_gpu)
###########################################################################
# Get algorithm
###########################################################################
algo, num_iter = get_algo(F, G, K, normK, tau, sigma, gamma, use_axpby, prob, outp_file,image)
###########################################################################
# Create save call back function
###########################################################################
save_callback = get_save_callback_function(outp_file, num_iter)
###########################################################################
# Run the reconstruction
###########################################################################
# algo.run(num_iter, verbose=2, print_interval=1, callback=save_callback)
algo.run(num_iter, verbose=2, callback=save_callback)
def get_filenames(trans, sino, attn, rand):
"""Get filenames."""
trans_pattern = str(trans).replace('%', '*')
sino_pattern = str(sino).replace('%', '*')
attn_pattern = str(attn).replace('%', '*')
rand_pattern = str(rand).replace('%', '*')
if sino_pattern is None:
raise AssertionError("--sino missing")
trans_files = sorted(glob(trans_pattern))
sino_files = sorted(glob(sino_pattern))
attn_files = sorted(glob(attn_pattern))
rand_files = sorted(glob(rand_pattern))
num_ms = len(sino_files)
# Check some sinograms found
if num_ms == 0:
raise AssertionError("No sinograms found at {}!".format(sino_pattern))
# Should have as many trans as sinos
if len(trans_files) > 0 and num_ms != len(trans_files):
raise AssertionError("#trans should match #sinos. "
"#sinos = " + str(num_ms) +
", #trans = " + str(len(trans_files)))
# If any rand, check num == num_ms
if len(rand_files) > 0 and len(rand_files) != num_ms:
raise AssertionError("#rand should match #sinos. "
"#sinos = " + str(num_ms) +
", #rand = " + str(len(rand_files)))
# For attn, there should be 0, 1 or num_ms images
if len(attn_files) > 1 and len(attn_files) != num_ms:
raise AssertionError("#attn should be 0, 1 or #sinos")
return [num_ms, trans_files, sino_files, attn_files, rand_files]
def read_files(trans_files, sino_files, attn_files, rand_files, trans_type):
"""Read files."""
if trans_files == []:
trans = None
else:
if trans_type == "tm":
trans = [reg.AffineTransformation(file) for file in trans_files]
elif trans_type == "disp":
trans = [reg.NiftiImageData3DDisplacement(file)
for file in trans_files]
elif trans_type == "def":
trans = [reg.NiftiImageData3DDeformation(file)
for file in trans_files]
else:
raise error("Unknown transformation type")
sinos_raw = [pet.AcquisitionData(file) for file in sino_files]
attns = [pet.ImageData(file) for file in attn_files]
# fix a problem with the header which doesn't allow
# to do algebra with randoms and sinogram
rands_arr = [pet.AcquisitionData(file).as_array() for file in rand_files]
rands_raw = [ s * 0 for s in sinos_raw ]
for r,a in zip(rands_raw, rands_arr):
r.fill(a)
return [trans, sinos_raw, attns, rands_raw]
def pre_process_sinos(sinos_raw, num_ms):
"""Preprocess raw sinograms.
Make positive if necessary and do any required rebinning."""
# If empty (e.g., no randoms), return
if not sinos_raw:
return sinos_raw
# Loop over all sinograms
sinos = [0]*num_ms
for ind in range(num_ms):
# If any sinograms contain negative values
# (shouldn't be the case), set them to 0
sino_arr = sinos_raw[ind].as_array()
if (sino_arr < 0).any():
print("Input sinogram " + str(ind) +
" contains -ve elements. Setting to 0...")
sinos[ind] = sinos_raw[ind].clone()
sino_arr[sino_arr < 0] = 0
sinos[ind].fill(sino_arr)
else:
sinos[ind] = sinos_raw[ind]
# If rebinning is desired
segs_to_combine = 1
if args['--numSegsToCombine']:
segs_to_combine = int(args['--numSegsToCombine'])
views_to_combine = 1
if args['--numViewsToCombine']:
views_to_combine = int(args['--numViewsToCombine'])
if segs_to_combine * views_to_combine > 1:
sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine)
# only print first time
if ind == 0:
print("Rebinned sino dimensions: {sinos[ind].dimensions()}")
return sinos
def get_initial_estimate(sinos, use_gpu):
"""Get initial estimate."""
# from the arguments
initial_estimate = args['--initial']
nxny = int(args['--nxny'])
if initial_estimate:
image = pet.ImageData(initial_estimate)
elif args['--templateAcqData']:
image = sinos[0].create_uniform_image(0., (127, 220, 220))
image.initialise(dim=(127, 220, 220), vsize=(2.03125, 1.7080754, 1.7080754))
else:
# Create image based on ProjData
image = sinos[0].create_uniform_image(0.0, (nxny, nxny))
# If using GPU, need to make sure that image is right size.
if use_gpu:
dim = (127, 320, 320)
spacing = (2.03125, 2.08626, 2.08626)
# elif non-default spacing desired
elif args['--dxdy']:
dim = image.dimensions()
dxdy = float(args['--dxdy'])
spacing = (image.voxel_sizes()[0], dxdy, dxdy)
if use_gpu or args['--dxdy']:
image.initialise(dim=dim,
vsize=spacing)
image.fill(0.0)
return image
def get_resampler(image, ref=None, trans=None):
"""Return a NiftyResample object for the specified transform and image."""
if ref is None:
ref = image
resampler = reg.NiftyResample()
resampler.set_reference_image(ref)
resampler.set_floating_image(image)
resampler.set_padding_value(0)
resampler.set_interpolation_type_to_linear()
if trans is not None:
resampler.add_transformation(trans)
return resampler
def resample_attn_images(num_ms, attns, trans, use_gpu, image):
"""Resample attenuation images if necessary."""
resampled_attns = None
if trans is None:
resampled_attns = attns
else:
if len(attns) > 0:
resampled_attns = [0]*num_ms
# if using GPU, dimensions of attn and recon images have to match
ref = image if use_gpu else None
for i in range(num_ms):
# if we only have 1 attn image, then we need to resample into
# space of each gate. However, if we have num_ms attn images,
# then assume they are already in the correct position, so use
# None as transformation.
tran = trans[i] if len(attns) == 1 else None
# If only 1 attn image, then resample that. If we have num_ms
# attn images, then use each attn image of each frame.
attn = attns[0] if len(attns) == 1 else attns[i]
resam = get_resampler(attn, ref=ref, trans=tran)
resampled_attns[i] = resam.forward(attn)
return resampled_attns
def set_up_acq_models(num_ms, sinos, rands, resampled_attns, image, use_gpu):
"""Set up acquisition models."""
print("Setting up acquisition models...")
# From the arguments
algo = str(args['--algorithm'])
nsub = int(args['--numSubsets']) if args['--numSubsets'] and algo=='spdhg' else 1
norm_file = args['--norm']
verbosity = int(args['--verbosity'])
if not use_gpu:
acq_models = [pet.AcquisitionModelUsingRayTracingMatrix() for k in range(nsub * num_ms)]
else:
acq_models = [pet.AcquisitionModelUsingNiftyPET() for k in range(nsub * num_ms)]
for acq_model in acq_models:
acq_model.set_use_truncation(True)
acq_model.set_cuda_verbosity(verbosity)
acq_model.set_num_tangential_LORs(10)
# create masks
im_one = image.clone().allocate(1.)
masks = []
# If present, create ASM from ECAT8 normalisation data
asm_norm = None
if norm_file:
if not path.isfile(norm_file):
raise error("Norm file not found: " + norm_file)
asm_norm = pet.AcquisitionSensitivityModel(norm_file)
# Loop over each motion state
for ind in range(num_ms):
# Create attn ASM if necessary
asm_attn = None
if resampled_attns:
s = sinos[ind]
ra = resampled_attns[ind]
am = pet.AcquisitionModelUsingRayTracingMatrix()
asm_attn = get_asm_attn(s,ra,am)
# Get ASM dependent on attn and/or norm
asm = None
if asm_norm and asm_attn:
if ind == 0:
print("ASM contains norm and attenuation...")
asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn)
elif asm_norm:
if ind == 0:
print("ASM contains norm...")
asm = asm_norm
elif asm_attn:
if ind == 0:
print("ASM contains attenuation...")
asm = asm_attn
# Loop over physical subsets
for k in range(nsub):
current = k * num_ms + ind
if asm:
acq_models[current].set_acquisition_sensitivity(asm)
#KT we'll set the background in the KL function below
#KTif len(rands) > 0:
#KT acq_models[ind].set_background_term(rands[ind])
# Set up
acq_models[current].set_up(sinos[ind], image)
acq_models[current].num_subsets = nsub
acq_models[current].subset_num = k
# compute masks
if ind==0:
mask = acq_models[current].direct(im_one)
masks.append(mask)
# rescale by number of gates
if num_ms > 1:
acq_models[current] = ScaledOperator(acq_models[current], 1./num_ms)
return acq_models, masks
def get_asm_attn(sino, attn, acq_model):
"""Get attn ASM from sino, attn image and acq model."""
asm_attn = pet.AcquisitionSensitivityModel(attn, acq_model)
# temporary fix pending attenuation offset fix in STIR:
# converting attenuation into 'bin efficiency'
asm_attn.set_up(sino)
bin_eff = pet.AcquisitionData(sino)
bin_eff.fill(1.0)
asm_attn.unnormalise(bin_eff)
asm_attn = pet.AcquisitionSensitivityModel(bin_eff)
return asm_attn
def set_up_reconstructor(use_gpu, num_ms, acq_models, resamplers, masks, sinos, rands=None):
"""Set up reconstructor."""
# From the arguments
algo = str(args['--algorithm'])
regularizer = str(args['--reg'])
r_iters = int(args['--reg_iters'])
r_alpha = float(args['--alpha'])
nsub = int(args['--numSubsets']) if args['--numSubsets'] and algo=='spdhg' else 1
precond = True if args['--precond'] else False
param_path = str(args['--param_path'])
normalise = True if args['--normaliseDataAndBlock'] else False
gamma = float(args['--gamma'])
output_name = str(args['--outp'])
if not os.path.exists(param_path):
os.makedirs(param_path)
if normalise:
raise error('options {} and regularization={} are not yet implemented together'.format(normalise, regularizer))
# We'll need an additive term (eta). If randoms are present, use them
# Else, use a scaled down version of the sinogram
etas = rands if rands else [sino * 0 + 1e-5 for sino in sinos]
# Create composition operators containing linear
# acquisition models and resamplers,
# and create data fit functions
if nsub == 1:
if resamplers is None:
#KT C = [am.get_linear_acquisition_model() for am in acq_models]
C = [am for am in acq_models]
else:
C = [CompositionOperator(
#KTam.get_linear_acquisition_model(),
am,
res, preallocate=True)
for am, res in zip(*(acq_models, resamplers))]
fi = [KullbackLeibler(b=sino, eta=eta, mask=masks[0].as_array(),use_numba=True)
for sino, eta in zip(sinos, etas)]
else:
C = [am for am in acq_models]
fi = [None] * (num_ms * nsub)
for (k,i) in np.ndindex((nsub,num_ms)):
# resample if needed
if resamplers is not None:
C[k * num_ms + i] = CompositionOperator(
#KTam.get_linear_acquisition_model(),
C[k * num_ms + i],
resamplers[i], preallocate=True)
fi[k * num_ms + i] = KullbackLeibler(b=sinos[i], eta=etas[i], mask=masks[k].as_array(),use_numba=True)
if regularizer == "FGP_TV":
r_tolerance = 1e-7
r_iso = 0
r_nonneg = 1
r_printing = 0
device = 'gpu' if use_gpu else 'cpu'
G = FGP_TV(r_alpha, r_iters, r_tolerance,
r_iso, r_nonneg, r_printing, device)
if precond:
FGP_TV.proximal = precond_proximal
elif regularizer == "None":
G = IndicatorBox(lower=0)
else:
raise error("Unknown regularisation")
F = BlockFunction(*fi)
K = BlockOperator(*C)
if algo == 'spdhg':
prob = [1./ len(K)] * len(K)
else:
prob = None
if not precond:
if algo == 'pdhg':
# we want the norm of the whole physical BlockOp
normK = get_proj_norm(BlockOperator(*C),param_path)
sigma = gamma/normK
tau = 1/(normK*gamma)
elif algo == 'spdhg':
# we want the norm of each component
normK = get_proj_normi(BlockOperator(*C),nsub,param_path)
# we'll let spdhg do its default implementation
sigma = None
tau = None
use_axpby = False
else:
normK=None
if algo == 'pdhg':
tau = K.adjoint(K.range_geometry().allocate(1.))
# CD take care of edge of the FOV
filter = pet.TruncateToCylinderProcessor()
filter.apply(tau)
backproj_np = tau.as_array()
vmax = np.max(backproj_np[backproj_np>0])
backproj_np[backproj_np==0] = 10 * vmax
tau_np = 1/backproj_np
tau.fill(tau_np)
# apply filter second time just to be sure
filter.apply(tau)
tau_np = tau.as_array()
tau_np[tau_np==0] = 1 / (10 * vmax)
elif algo == 'spdhg':
taus_np = []
for (Ki,pi) in zip(K,prob):
tau = Ki.adjoint(Ki.range_geometry().allocate(1.))
# CD take care of edge of the FOV
filter = pet.TruncateToCylinderProcessor()
filter.apply(tau)
backproj_np = tau.as_array()
vmax = | np.max(backproj_np[backproj_np>0]) | numpy.max |
import numpy as np
from NumbaLSODA import lsoda_sig, lsoda
from scipy.integrate import solve_ivp
import timeit
import numba as nb
# NumbaLSODA
@nb.cfunc(lsoda_sig,boundscheck=False)
def f_nb(t, u_, du_, p_):
u = nb.carray(u_, (3,))
p = nb.carray(p_, (3,))
sigma, rho, beta = p
x, y, z = u
du_[0] = sigma * (y - x)
du_[1] = x * (rho - z) - y
du_[2] = x * y - beta * z
funcptr = f_nb.address
# scipy
@nb.njit
def f_sp(t, u, sigma, rho, beta):
x, y, z = u
return np.array([sigma * (y - x), x * (rho - z) - y, x * y - beta * z])
u0 = | np.array([1.0,0.0,0.0]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
## Code for performing Curveball method for associations between mutations and molecular features
#Author: <NAME>
#from curveball import*
import pandas as pd
import numpy as np
import scipy.stats as stats
import statsmodels.stats.multitest
import copy
import random
import matplotlib.pyplot as plt
from random import sample, randint, shuffle
from multiprocessing import Pool
comut = pd.read_csv("/home/zlin/mcll/comut_for_curveball_wes_wgs_noContam_withMBL_withIGHV_20210106.tsv", sep="\t", index_col=False)
comut=comut[comut['MCLL'] ==1]
#comut_np = np.array(comut[comut.columns[1:204]])
#get contingency dict from the original matrix.
comut_np = np.array(comut[comut.columns[1:204]])
def whole_process(input_):
input_matrix = input_[0]
iter_ = int(input_[1])
#merge
contigency_dict_init = {}
for i in range(input_matrix.shape[1]):
print(i)
contigency_dict_init[i] = {}
for j in range(i+1, input_matrix.shape[1]):
contigency_dict_init[i][j] = [sum((input_matrix[:, i] == 1) & (input_matrix[:, j] == 1))]
contigency_dict_init[i][j].append(sum((input_matrix[:, i] == 0) & (input_matrix[:, j] == 1)))
contigency_dict_init[i][j].append(sum((input_matrix[:, i] == 1) & (input_matrix[:, j] == 0)))
contigency_dict_init[i][j].append(sum((input_matrix[:, i] == 0) & (input_matrix[:, j] == 0)))
p_value_diff_mat_positive = np.array([]).reshape(0,20503)
p_value_diff_mat_negative = | np.array([]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# # CNN for Classification
# ---
# In this and the next notebook, we define **and train** a CNN to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist).
#
# We are providing two solutions to show you how different network structures and training strategies can affect the performance and accuracy of a CNN. This first solution will be a simple CNN with two convolutional layers.
#
# Please note that this is just one possible solution out of many!
# ### Load the [data](https://pytorch.org/docs/stable/torchvision/datasets.html)
#
# In this cell, we load in both **training and test** datasets from the FashionMNIST class.
# In[1]:
# our basic libraries
import torch
import torchvision
# data loading and transforming
from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
from torchvision import transforms
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors for input into a CNN
## Define a transform to read the data in as a tensor
data_transform = transforms.ToTensor()
# choose the training and test datasets
train_data = FashionMNIST(root='./data', train=True,
download=True, transform=data_transform)
test_data = FashionMNIST(root='./data', train=False,
download=True, transform=data_transform)
# Print out some stats about the training and test data
print('Train data, number of images: ', len(train_data))
print('Test data, number of images: ', len(test_data))
# In[2]:
# prepare data loaders, set the batch_size
## TODO: you can try changing the batch_size to be larger or smaller
## when you get to training your network, see how batch_size affects the loss
batch_size = 20
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# ### Visualize some training data
#
# This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid.
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx]])
# ### Define the network architecture
#
# The various layers that make up any neural network are documented, [here](https://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll use a simple series of layers:
# * Convolutional layers
# * Maxpooling layers
# * Fully-connected (linear) layers
#
# You are also encouraged to look at adding [dropout layers](https://pytorch.org/docs/stable/nn.html#dropout) to avoid overfitting this data.
#
# ---
#
# To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in.
#
# Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network.
#
# #### Define the Layers in ` __init__`
# As a reminder, a conv/pool layer may be defined like this (in `__init__`):
# ```
# # 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel
# self.conv1 = nn.Conv2d(1, 32, 3)
#
# # maxpool that uses a square window of kernel_size=2, stride=2
# self.pool = nn.MaxPool2d(2, 2)
# ```
#
# #### Refer to Layers in `forward`
# Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied:
# ```
# x = self.pool(F.relu(self.conv1(x)))
# ```
#
# You must place any layers with trainable weights, such as convolutional layers, in the `__init__` function and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, may appear *only* in the `forward` function. In practice, you'll often see conv/pool layers defined in `__init__` and activations defined in `forward`.
#
# #### Convolutional layer
# The first convolution layer has been defined for you, it takes in a 1 channel (grayscale) image and outputs 10 feature maps as output, after convolving the image with 3x3 filters.
#
# #### Flattening
#
# Recall that to move from the output of a convolutional/pooling layer to a linear layer, you must first flatten your extracted features into a vector. If you've used the deep learning library, Keras, you may have seen this done by `Flatten()`, and in PyTorch you can flatten an input `x` with `x = x.view(x.size(0), -1)`.
#
# ### TODO: Define the rest of the layers
#
# It will be up to you to define the other layers in this network; we have some recommendations, but you may change the architecture and parameters as you see fit.
#
# Recommendations/tips:
# * Use at least two convolutional layers
# * Your output must be a linear layer with 10 outputs (for the 10 classes of clothing)
# * Use a dropout layer to avoid overfitting
#
# ### A note on output size
#
# For any convolutional layer, the output feature maps will have the specified depth (a depth of 10 for 10 filters in a convolutional layer) and the dimensions of the produced feature maps (width/height) can be computed as the _input image_ width/height, W, minus the filter size, F, divided by the stride, S, all + 1. The equation looks like: `output_dim = (W-F)/S + 1`, for an assumed padding size of 0. You can find a derivation of this formula, [here](http://cs231n.github.io/convolutional-networks/#conv).
#
# For a pool layer with a size 2 and stride 2, the output dimension will be reduced by a factor of 2. Read the comments in the code below to see the output size for each layer.
# In[4]:
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel (grayscale), 10 output channels/feature maps
# 3x3 square convolution kernel
## output size = (W-F)/S +1 = (28-3)/1 +1 = 26
# the output Tensor for one image, will have the dimensions: (10, 26, 26)
# after one pool layer, this becomes (10, 13, 13)
self.conv1 = nn.Conv2d(1, 10, 3)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2, 2)
# second conv layer: 10 inputs, 20 outputs, 3x3 conv
## output size = (W-F)/S +1 = (13-3)/1 +1 = 11
# the output tensor will have dimensions: (20, 11, 11)
# after another pool layer this becomes (20, 5, 5); 5.5 is rounded down
self.conv2 = nn.Conv2d(10, 20, 3)
# 20 outputs * the 5*5 filtered/pooled map size
# 10 output channels (for the 10 classes)
self.fc1 = nn.Linear(20*5*5, 10)
# define the feedforward behavior
def forward(self, x):
# two conv/relu + pool layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
# prep for linear layer
# flatten the inputs into a vector
x = x.view(x.size(0), -1)
# one linear layer
x = F.relu(self.fc1(x))
# a softmax layer to convert the 10 outputs into a distribution of class scores
x = F.log_softmax(x, dim=1)
# final output
return x
# instantiate and print your Net
net = Net()
print(net)
# ### TODO: Specify the loss function and optimizer
#
# Learn more about [loss functions](https://pytorch.org/docs/stable/nn.html#loss-functions) and [optimizers](https://pytorch.org/docs/stable/optim.html) in the online documentation.
#
# Note that for a classification problem like this, one typically uses cross entropy loss, which can be defined in code like: `criterion = nn.CrossEntropyLoss()`; cross entropy loss combines `softmax` and `NLL loss` so, alternatively (as in this example), you may see NLL Loss being used when the output of our Net is a distribution of class scores.
#
# PyTorch also includes some standard stochastic optimizers like stochastic gradient descent and Adam. You're encouraged to try different optimizers and see how your model responds to these choices as it trains.
#
# In[5]:
import torch.optim as optim
## TODO: specify loss function
# cross entropy loss combines softmax and nn.NLLLoss() in one single class.
criterion = nn.NLLLoss()
## TODO: specify optimizer
# stochastic gradient descent with a small learning rate
optimizer = optim.SGD(net.parameters(), lr=0.001)
# ### A note on accuracy
#
# It's interesting to look at the accuracy of your network **before and after** training. This way you can really see that your network has learned something. In the next cell, let's see what the accuracy of an untrained network is (we expect it to be around 10% which is the same accuracy as just guessing for all 10 classes).
# In[6]:
# Calculate accuracy before training
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
# forward pass to get outputs
# the outputs are a series of class scores
outputs = net(images)
# get the predicted class from the maximum value in the output-list of class scores
_, predicted = torch.max(outputs.data, 1)
# count up total number of correct labels
# for which the predicted and true labels are equal
total += labels.size(0)
correct += (predicted == labels).sum()
# calculate the accuracy
# to convert `correct` from a Tensor into a scalar, use .item()
accuracy = 100.0 * correct.item() / total
# print it out!
print('Accuracy before training: ', accuracy)
# ### Train the Network
#
# Below, we've defined a `train` function that takes in a number of epochs to train for.
# * The number of epochs is how many times a network will cycle through the entire training dataset.
# * Inside the epoch loop, we loop over the training dataset in batches; recording the loss every 1000 batches.
#
# Here are the steps that this training function performs as it iterates over the training dataset:
#
# 1. Zero's the gradients to prepare for a forward pass
# 2. Passes the input through the network (forward pass)
# 3. Computes the loss (how far is the predicted classes are from the correct labels)
# 4. Propagates gradients back into the network’s parameters (backward pass)
# 5. Updates the weights (parameter update)
# 6. Prints out the calculated loss
#
#
# In[7]:
def train(n_epochs):
loss_over_time = [] # to track the loss as the network trains
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
for batch_i, data in enumerate(train_loader):
# get the input images and their corresponding labels
inputs, labels = data
# zero the parameter (weight) gradients
optimizer.zero_grad()
# forward pass to get outputs
outputs = net(inputs)
# calculate the loss
loss = criterion(outputs, labels)
# backward pass to calculate the parameter gradients
loss.backward()
# update the parameters
optimizer.step()
# print loss statistics
# to convert loss into a scalar and add it to running_loss, we use .item()
running_loss += loss.item()
if batch_i % 1000 == 999: # print every 1000 batches
avg_loss = running_loss/1000
# record and print the avg loss over the 1000 batches
loss_over_time.append(avg_loss)
print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, avg_loss))
running_loss = 0.0
print('Finished Training')
return loss_over_time
# In[ ]:
# define the number of epochs to train for
n_epochs = 30 # start small to see if your model works, initially
# call train and record the loss over time
training_loss = train(n_epochs)
# ## Visualizing the loss
#
# A good indication of how much your network is learning as it trains is the loss over time. In this example, we printed and recorded the average loss for each 1000 batches and for each epoch. Let's plot it and see how the loss decreases (or doesn't) over time.
#
# In this case, you can see that it takes a little bit for a big initial loss decrease, and the loss is flattening out over time.
# In[ ]:
# visualize the loss as the network trained
plt.plot(training_loss)
plt.xlabel('1000\'s of batches')
plt.ylabel('loss')
plt.ylim(0, 2.5) # consistent scale
plt.show()
# ### Test the Trained Network
#
# Once you are satisfied with how the loss of your model has decreased, there is one last step: test!
#
# You must test your trained model on a previously unseen dataset to see if it generalizes well and can accurately classify this new dataset. For FashionMNIST, which contains many pre-processed training images, a good model should reach **greater than 85% accuracy** on this test dataset. If you are not reaching this value, try training for a larger number of epochs, tweaking your hyperparameters, or adding/subtracting layers from your CNN.
# In[ ]:
# initialize tensor and lists to monitor test loss and accuracy
test_loss = torch.zeros(1)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
# set the module to evaluation mode
net.eval()
for batch_i, data in enumerate(test_loader):
# get the input images and their corresponding labels
inputs, labels = data
# forward pass to get outputs
outputs = net(inputs)
# calculate the loss
loss = criterion(outputs, labels)
# update average test loss
test_loss = test_loss + ((torch.ones(1) / (batch_i + 1)) * (loss.data - test_loss))
# get the predicted class from the maximum value in the output-list of class scores
_, predicted = torch.max(outputs.data, 1)
# compare predictions to true label
# this creates a `correct` Tensor that holds the number of correctly classified images in a batch
correct = np.squeeze(predicted.eq(labels.data.view_as(predicted)))
# calculate test accuracy for *each* object class
# we get the scalar value of correct items for a class, by calling `correct[i].item()`
for i in range(batch_size):
label = labels.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
print('Test Loss: {:.6f}\n'.format(test_loss.numpy()[0]))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
| np.sum(class_correct) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 16:31:20 2017
Generate fidelity weighting vector
@author: rouhinen
"""
import scipy
from scipy import signal
import os
import mne
import mne.minimum_norm as minnorm
import matplotlib.pyplot as plt
import numpy as np
###############################################################################################################################################
###############################################################################################################################################
########## Load source identities, forward and inverse operators from csv
os.chdir('M:\\inverse_weighting\\Santeri data') # where M: is E:\projects on PS5
fileSourceIdentities = 'sourceIdentities.csv'
fileForwardOperator = 'forwardSolution.csv'
fileInverseOperator = 'inverseSolution.csv'
sourceIdentities = scipy.genfromtxt(fileSourceIdentities, dtype='int32', delimiter=',')
# Expected ids for parcels are 0 to n-1, where n is number of parcels, and -1 for sources that do not belong to any parcel.
# sourceIdentities = [0, 1, 2, 1, -1]
# Zero as ID doesn't work if parcel not belonging to any parcel is given zero value. There could be sources not in any parcel.
# Sparse parcels that is. Should initialize those to -1 or Nan.
forwardOperator = scipy.matrix(scipy.genfromtxt(fileForwardOperator, dtype='float', delimiter=','))
# forwardOperator = scipy.matrix('1 2 3 2 1; 4 5 6 5 4') # sensors x sources
inverseOperator = scipy.matrix(scipy.genfromtxt(fileInverseOperator, dtype='float', delimiter=','))
# inverseOperator = scipy.matrix('1 -1; 2 2; -1 -3; 1 2; 2 1') # sources x sensors
###############################################################################################################################################
###############################################################################################################################################
########## Load fwd and inv solutions from RS example data
subject = 'S0116'
subjects_dir = 'M:\\inverse_weighting\\'
parc = 'parc2018yeo7_400'
fwdFile = 'M:\\inverse_weighting\\S0116\\S0116_set02__220416_tsss_09_trans_MEG_ICA-py-fwd.fif'
invFile = 'M:\\inverse_weighting\\S0116\\S0116_set02__220416_tsss_09_trans_MEG_ICA-py-inv.fif'
# read fwd op
fwd = mne.read_forward_solution(fwdFile)
fwd_sol = fwd['sol']['data'] # counterpart to forwardOperator, [sensors x sources]
# read and prepare inv op
inv = minnorm.read_inverse_operator(invFile)
invP = minnorm.prepare_inverse_operator(inv,1,1./9.)
inv_sol = minnorm.inverse._assemble_kernel(invP, None, 'MNE',None)[0] # counterpart to forwardOperator, [sources x sensors]
# get source space
src = inv.get('src')
vert_lh = src[0].get('vertno')
vert_rh = src[1].get('vertno')
# get labels, vertices and src-identities
labels_parc = mne.read_labels_from_annot(subject, parc=parc, subjects_dir=subjects_dir)
src_ident_lh = np.full(len(vert_lh), -1)
src_ident_rh = np.full(len(vert_rh), -1)
for l,label in enumerate(labels_parc[:201]): # find sources that belong to the left HS labels
for v in label.vertices:
src_ident_lh[np.where(vert_lh == v)]=l
for l,label in enumerate(labels_parc[201:402]): # find sources that belong to the right HS labels
for v in label.vertices:
src_ident_rh[np.where(vert_rh == v)]=l
src_ident_lh = src_ident_lh -1 # fix numbers, so that sources in med. wall and unassigned get value -1
src_ident_lh[src_ident_lh==-2] = -1
src_ident_rh = src_ident_rh + 200
src_ident_rh[src_ident_rh==400] = -1
src_ident_rh[src_ident_rh==199] = -1
src_ident = np.concatenate((src_ident_lh,src_ident_rh))
#### change variable names
sourceIdentities = src_ident
inverseOperator = inv_sol
forwardOperator = fwd_sol
###############################################################################################################################################
###############################################################################################################################################
############### Santeri's code for inverse weighting
########## Generate oscillatory parcel signals
numberParcels = int(max(sourceIdentities) +1) # Maybe one should test if unique non-negative values == max+1. This is expected in the code.
timeOutput = 60000 # Samples. Peaks at about 20 GB ram with 30 000 samples.
timeCut = 20 # Samples to remove from ends to get rid of border effects
timeGenerate = timeOutput + 2*timeCut
widths = scipy.arange(5, 6) # Original values 1, 31. Higher number wider span.
parcelTimeSeries = scipy.random.randn(numberParcels, timeGenerate) # Generate random signal
for i in range(numberParcels):
parcelTimeSeries[i] = signal.cwt(parcelTimeSeries[i], signal.ricker, widths) # Mexican hat continuous wavelet transform random series.
parcelTimeSeries = signal.hilbert(parcelTimeSeries) # Hilbert transform. Get analytic signal.
parcelTimeSeries = parcelTimeSeries[:, timeCut:-timeCut] # Cut off borders
########## Clone parcel time series to source time series
sourceTimeSeries = 1j* scipy.zeros((len(sourceIdentities), int(parcelTimeSeries.shape[1])), dtype=float) # Zeros (complex) sources x samples
for i,identity in enumerate(sourceIdentities): # i-teration and identity
if identity > -1: # -1 as identity means source does not belong to any parcel. Other negative values should not really be there.
sourceTimeSeries[i] = parcelTimeSeries[identity] # Clone parcel time series to source space.
checkSourceTimeSeries = scipy.real(sourceTimeSeries[:]) # For checking
########## Forward then inverse model source series
# sourceTimeSeries = inverseOperator*(forwardOperator * sourceTimeSeries) this didn't work
sourceTimeSeries = np.dot(inverseOperator,np.dot(forwardOperator, sourceTimeSeries)) # this works
########## Change to amplitude 1, keep angle using Euler's formula.
sourceTimeSeries = scipy.exp(1j*(scipy.asmatrix(scipy.angle(sourceTimeSeries))))
parcelTimeSeries = scipy.exp(1j*(scipy.asmatrix(scipy.angle(parcelTimeSeries))))
########## Get cPLV needed for flips and weighting
cPLVArray = 1j* scipy.zeros(len(sourceIdentities), dtype=float) # Initialize as zeros (complex).
for i,identity in enumerate(sourceIdentities): # Compute cPLV only of parcel source pairs of sources that belong to that parcel. One source belong to only one parcel.
if sourceIdentities[i] >= 0: # Don't compute negative values. These should be sources not belonging to any parcel.
cPLVArray[i] = scipy.sum((scipy.asarray(parcelTimeSeries[identity])) * scipy.conjugate(scipy.asarray(sourceTimeSeries[i])))
cPLVArray /= timeOutput # Normalize by samples. For debugging. Output doesn't change even if you don't do this.
########## Get weights and flip. This could be the output.
weights = scipy.zeros(len(sourceIdentities)) # Initialize as zeros
for i,cPLV in enumerate(cPLVArray):
weights[i] = scipy.real(cPLV)**2 * scipy.sign(scipy.real(cPLV)) # Sign is the flip; weight (real part)^2
########## Create weighted inverse operator and normalize the norm of weighted inv op to match original inv op's norm.
weightedInvOp = np.dot(scipy.eye(weights.shape[0])*weights, inverseOperator) # Multiply sensor dimension in inverseOperator by weight. This one would be the un-normalized operator.
weightsNormalized = scipy.zeros(len(weights)) # Initialize norm normalized weights. Maybe not necessary.
for parcel in range(numberParcels): # Normalize parcel level norms.
ii = [i for i,source in enumerate(sourceIdentities) if source == parcel] # Index sources belonging to parcel
weightsNormalized[ii] = weights[ii] * (scipy.linalg.norm(inverseOperator[ii]) / scipy.linalg.norm(weightedInvOp[ii])) # Normalize per parcel.
weightedInvOp = np.dot(scipy.eye(weightsNormalized.shape[0])*weightsNormalized,inverseOperator) # Parcel level normalized operator.
weightedInvOp *= scipy.linalg.norm(inverseOperator) / scipy.linalg.norm(scipy.nan_to_num(weightedInvOp)) # Operator level normalized operator. If there are sources not in any parcel weightedInvOp gets Nan values due to normalizations.
weightedInvOp = scipy.nan_to_num(weightedInvOp)
########## Check if weighting worked.
## Do correlations between the original time series and the weighted inverse and normal inverse models.
# Make parcel and sensor time series. Separate series to avoid overfitted estimation.
samplesSubset = 10000 + 2*timeCut
checkParcelTimeSeries = scipy.random.randn(numberParcels, samplesSubset) # Generate random signal
for i in range(numberParcels):
checkParcelTimeSeries[i] = signal.cwt(checkParcelTimeSeries[i], signal.ricker, widths) # Mexican hat continuous wavelet transform random series.
checkParcelTimeSeries = signal.hilbert(checkParcelTimeSeries) # Hilbert transform. Get analytic signal.
checkParcelTimeSeries = checkParcelTimeSeries[:, timeCut:-timeCut] # Cut off borders
# Change to amplitude 1, keep angle using Euler's formula.
checkParcelTimeSeries = scipy.exp(1j*(scipy.asmatrix(scipy.angle(checkParcelTimeSeries))))
## Clone parcel time series to source time series
checkSourceTimeSeries = 1j* scipy.zeros((len(sourceIdentities), int(checkParcelTimeSeries.shape[1])), dtype=float) # Zeros (complex) sources x samples
for i,identity in enumerate(sourceIdentities): # i-teration and identity
if identity > -1: # -1 as identity means source does not belong to any parcel. Other negative values should not really be there.
checkSourceTimeSeries[i] = checkParcelTimeSeries[identity] # Clone parcel time series to source space.
sensorTimeSeries = np.dot(forwardOperator, checkSourceTimeSeries)
# Correlations between inversed sensorTimeSeries and sourceTimeSeries. Use only a time subset as the memory use is quite large.
#from scipy.stats.stats import pearsonr
# Binary matrix of sources belonging to parcels
sourceParcelMatrix = scipy.zeros((numberParcels,len(sourceIdentities)), dtype=scipy.int8)
for i,identity in enumerate(sourceIdentities):
if identity >= 0: # Don't place negative values. These should be sources not belonging to any parcel.
sourceParcelMatrix[identity,i] = 1
# for each parcel:
# correlation(checkParcelTimeSeries, currentParcelsSourcesBinary x invOp x sensorTimeSeries)
# cPLV = scipy.mean((scipy.asarray(parcelTimeSeries[identity])) * scipy.conjugate(scipy.asarray(sourceTimeSeries[i])))
parcelPLVW = scipy.zeros(numberParcels, dtype=scipy.float32) # For the weighted inverse operator
parcelPLVO = scipy.zeros(numberParcels, dtype=scipy.float32) # For the original inverse operator
#for i in range(numberParcels):
# parcelPLVW[i] = pearsonr( scipy.ravel(checkParcelTimeSeries[i,0:samplesSubset]), scipy.ravel((sourceParcelMatrix[i,:, scipy.newaxis]).T * weightedInvOp * sensorTimeSeries[:,0:samplesSubset]) )[0]
# parcelPLVO[i] = pearsonr( scipy.ravel(checkParcelTimeSeries[i,0:samplesSubset]), scipy.ravel((sourceParcelMatrix[i,:, scipy.newaxis]).T * inverseOperator * sensorTimeSeries[:,0:samplesSubset]) )[0]
estimatedSourceSeriesW = np.dot(weightedInvOp , sensorTimeSeries) # Weighted and original estimated source time series
estimatedSourceSeriesO = np.dot(inverseOperator , sensorTimeSeries)
# Change to amplitude 1, keep angle using Euler's formula.
estimatedSourceSeriesW = scipy.exp(1j*(scipy.asmatrix(scipy.angle(estimatedSourceSeriesW))))
estimatedSourceSeriesO = scipy.exp(1j*(scipy.asmatrix(scipy.angle(estimatedSourceSeriesO))))
for i in range(numberParcels):
A = scipy.ravel(checkParcelTimeSeries[i,:]) # True simulated parcel time series
nSources = scipy.sum(sourceParcelMatrix[i,:])
B = scipy.ravel((sourceParcelMatrix[i,:]) * estimatedSourceSeriesW) /nSources # Estimated parcel time series
C = scipy.ravel((sourceParcelMatrix[i,:]) * estimatedSourceSeriesO) /nSources
parcelPLVW[i] = scipy.mean(A * scipy.conjugate(B))
parcelPLVO[i] = scipy.mean(A * scipy.conjugate(C))
plt.plot(np.sort(parcelPLVO))
plt.plot(np.sort(parcelPLVW)) # this should be equivalent to parcel fidelity ?!
np.mean(parcelPLVO)
| np.mean(parcelPLVW) | numpy.mean |
import numpy as np
import scipy
from numpy.fft import rfft,irfft
import os
import time
import librosa
from Audio_proc_lib.audio_proc_functions import *
import multiprocessing
import scipy.signal as sg
class scale_frame:
#FOR THE IMPLEMENTATION OF THE IRREGULAR MATRIX i assumed that Ln (window len) = Mn (FFT len)
#Painless case Ln<=Mn
#CONSTRUCTOR PARAMETERS
#1)ksi_s : sampling rate
#2)min_scl : minimal scale given in samples
#3)overlap_factor : the amount of overlap each new constructed window will have to its previous one (and the next one) given as a ratio
# Notes-> i.e. overlap_factor of 1/2 means that if the previous window is 512samples then the next one will overlap in 256samples (similar to hop size in STFT)
# For the first and the last windowes we used a tukey window and an overlap of 1/2 .
#4)onset_seq : The sequence of onsets produced by an onset detection algorithm
#5)middle_window : The middle window used in each get_window_interval procedure given as an object i.e. np.hanning or scipy.signal.tukey
#6)L : signal length in samples
#7)matrix_form : flag to indicate if will be calculated a regular matrix or irregular matrix
#8)multiproc : flag to indicate if it will use multiprocessing to compute the window for each onset interval indices in the get_window_interval procedure
# (recommended True)
def timeis(func):
'''Decorator that reports the execution time.'''
def wrap(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, end-start)
return result
return wrap
def cputime(self):
utime, stime, cutime, cstime, elapsed_time = os.times()
return utime
g = []
g_dual = []
def __init__(self,ksi_s,min_scl,overlap_factor,onset_seq,middle_window,L,matrix_form,multiproc):
self.ksi_s = ksi_s
self.onsets = onset_seq
self.min_scl=min_scl
self.overlap_factor = overlap_factor
self.multiprocessing = multiproc
self.middle_window = middle_window
self.L=L
self.matrix_form=matrix_form
#writing in the correct order the function calls in order for the FORWARD AND BACKWARD methods to work
#Creating the onset_tuples sequence
self.get_onset_tuples()
#Construction of the windows indices
if self.multiprocessing:
pool = multiprocessing.Pool(processes=4)
all_inds_list = list( pool.imap(self.get_windows_interval, self.onset_tuples) )
else:
all_inds_list = list( map( lambda x : self.get_windows_interval(x) , self.onset_tuples ) )
self.all_inds = []
for interval_inds in all_inds_list:
self.all_inds += interval_inds
self.get_first_last_window()
self.N = len(self.all_inds)
self.get_frame_operator()
def get_onset_tuples(self):
#onsets = librosa.onset.onset_detect(y=sig, sr=self.ksi_s, units="samples")
#putting manualy some onsets in the start and the end
#and then creating a sequence of onset tuples (each tuple contains two successive onsets)
self.onsets = np.insert( self.onsets , [0,len(self.onsets)] , [self.min_scl,(self.L-1)-self.min_scl] )
self.onset_tuples = []
for i in range(len(self.onsets)-1):
self.onset_tuples.append( (self.onsets[i],self.onsets[i+1]) )
def get_windows_interval(self,onset_tuple):
#Function to get the window start (a) , end (b) indices and window length
#for the windows between 2 onsets
#Params:
#1)onsets_tuple: the first and last onset for the interval under considaration
#2)self.min_scl: is the minimal scale that we apply to the two onsets (because they are the transient positions) (POWER OF 2)
#3)overlap_fact: the amount of the previous window that the next will overlap to the previous (must be a fraction greater than 1)
#Idea implemented:
#In the first onset we use the minimal scale and for the following windows we increase the scale by doubling it each time
# until the median (end + start)/2 of the interval . We use the symmetric windows in order to reash gradually the minimal
# scale again in the position of the second onset. For the median position we use another window.
#
#Constructing the windows for all onset intervals-----------------------------------------------------------------------------------
start = onset_tuple[0]
end = onset_tuple[1]
middle = (start + end )//2
win_len = self.min_scl
#Constructing the first symmetric windows--------------------------------------------------------------------
inds_dict = [ { "window" : np.hanning , "win_len" : win_len , "a" : start - win_len//2 , "b" : start + win_len//2 } ]
k = 0
while True:
k+=1
ovrlp = int(inds_dict[k-1]["win_len"]*self.overlap_factor)
window = np.hanning
win_len = win_len*2
a = inds_dict[k-1]["b"] - ovrlp
b = a + win_len
if b>middle:
break
# if (a+b)/2>middle:
# break
else:
inds_dict.append( { "window" : window , "win_len" : win_len , "a" : a , "b" : b } )
#Constructing the middle window---------------------------------------------------------------------------------------
window = self.middle_window
ovrlp = int(inds_dict[-1]["win_len"]*self.overlap_factor)
a = inds_dict[-1]["b"] - ovrlp
b = int( 2*middle - inds_dict[-1]["b"] ) + ovrlp
win_len = b - a
inds_dict.append( { "window" : window , "win_len" : win_len , "a" : a , "b" : b } )
#Constructing the first symmetric windows --------------------------------------------------------------------------------
# (we dont need the last symmetric window thats why the for loop goes until 0 )
for k in range(len(inds_dict)-2,0,-1):
tmp = inds_dict[k].copy()
tmp["a"] = int( 2*middle - inds_dict[k]["b"] )
tmp["b"] = int( 2*middle - inds_dict[k]["a"] )
inds_dict.append(tmp)
return inds_dict
def get_first_last_window(self):
#first_window
ovrlp = int(self.all_inds[0]["win_len"]*self.overlap_factor)
ovrlp = int(self.all_inds[0]["win_len"]*(1/2))
a = 0
b = self.all_inds[0]["a"] + ovrlp
win_len = b - a
first_window_inds = { "win_len" : win_len , "a" : a , "b" : b }
#last_window
#ovrlp = int(self.all_inds[len(self.all_inds)-1]["win_len"]*self.overlap_factor)
ovrlp = int(self.all_inds[len(self.all_inds)-1]["win_len"]*(1/2))
a = self.all_inds[len(self.all_inds)-1]["b"] - ovrlp
b = self.L
win_len = b - a
last_window_inds = { "win_len" : win_len , "a" : a , "b" : b }
self.all_inds = [first_window_inds] + self.all_inds + [last_window_inds]
def plot_windows(self):
#Plot the windows for a small 3sec exerpt of the signal
if self.L/44100<=7.0:
#first window using Tukey
z_tmp = np.zeros(self.L)
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
Ln = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[:Ln]
z_tmp[inds] = gn
plt.plot(z_tmp)
for k in range(1,self.N-1):
z_tmp = np.zeros(self.L)
inds = np.arange( self.all_inds[k]["a"],self.all_inds[k]["b"] )
z_tmp[inds] = self.all_inds[k]["window"]( self.all_inds[k]["win_len"] )
plt.plot(z_tmp)
#last window using Tukey
z_tmp = np.zeros(self.L)
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
Ln = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[Ln:]
z_tmp[inds] = gn
plt.plot(z_tmp)
plt.show()
# plt.axvline(start)
# plt.axvline(end)
# plt.axvline(middle)
# plt.show()
def get_frame_operator(self):
#CONSTRUCTING THE FRAME OPERATOR-----------------------------------------------
self.frame_operator = np.zeros(self.L)
#MATRIX FORM CASE:
if self.matrix_form:
#calculate the max window length:
self.M = np.array( list( map( lambda x : x["win_len"] , self.all_inds ) ) ).max()
#first window using Tukey
nb_zeros_concat = self.M-self.all_inds[0]["win_len"]
bnew = self.all_inds[0]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[0]["a"],bnew )
Ln = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[:Ln]
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
self.frame_operator[ inds ] += (gn**2)
#The remaining windows--------------------------------------------------------------------
for n in range(1,self.N//2):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
bnew = self.all_inds[n]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[n]["a"],bnew )
Ln = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"]( Ln )
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
self.frame_operator[ inds ] += (gn**2)
#After the self.N//2 window we update the a inds in order to avoid indices problems out of range
for n in range(self.N//2,self.N-1):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
anew = self.all_inds[n]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[n]["b"] )
Ln = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"]( Ln )
gn = np.concatenate(( np.zeros(nb_zeros_concat),gn ))
self.frame_operator[ inds ] += (gn**2)
#last window using Tukey
nb_zeros_concat = self.M-self.all_inds[self.N-1]["win_len"]
anew = self.all_inds[self.N-1]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[self.N-1]["b"] )
Ln = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[Ln:]
gn = np.concatenate(( np.zeros(nb_zeros_concat) ,gn ))
self.frame_operator[ inds ] += (gn**2)
#IRREGULAR MATRIX CASE:
else:
#first window using Tukey
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
Ln = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[:Ln]
self.frame_operator[ inds ] += (gn**2)
#The remaining windows
for n in range(1,self.N-1):
inds = np.arange( self.all_inds[n]["a"],self.all_inds[n]["b"] )
Ln = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"]( Ln )
self.frame_operator[ inds ] += (gn**2)
#last window using Tukey
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
Ln = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[Ln:]
self.frame_operator[ inds ] += (gn**2)
@timeis
def forward(self,signal):
c = []
#MATRIX FORM CASE:
if self.matrix_form:
#first window using Tukey
nb_zeros_concat = self.M-self.all_inds[0]["win_len"]
bnew = self.all_inds[0]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[0]["a"],bnew )
fft_len = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#The remaining windows----------------------------------------------------------------------------------------
for n in range(1,self.N//2):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
bnew = self.all_inds[n]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[n]["a"],bnew )
fft_len = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"](fft_len)
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#After the self.N//2 window we update the a inds in order to avoid indices problems out of range
for n in range(self.N//2,self.N-1):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
anew = self.all_inds[n]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[n]["b"] )
fft_len = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"](fft_len)
gn = np.concatenate(( np.zeros(nb_zeros_concat),gn ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#last window using Tukey
nb_zeros_concat = self.M-self.all_inds[self.N-1]["win_len"]
anew = self.all_inds[self.N-1]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]
gn = np.concatenate(( np.zeros(nb_zeros_concat) ,gn ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#IRREGULAR MATRIX CASE:
else:
#first window using Tukey
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
fft_len = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#The remaining windows
for n in range(1,self.N-1):
fft_len = self.all_inds[n]["win_len"]
inds = np.arange(self.all_inds[n]["a"],self.all_inds[n]["b"])
gn = self.all_inds[n]["window"](fft_len)
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#last window using Tukey
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
return c
@timeis
def backward(self,c):
f_rec = np.zeros(self.L)
if self.matrix_form:
#first window using Tukey
nb_zeros_concat = self.M-self.all_inds[0]["win_len"]
bnew = self.all_inds[0]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[0]["a"],bnew )
fft_len = self.all_inds[0]["win_len"]
fn = np.real( irfft( c[0] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]
gn_dual = np.concatenate(( gn_dual,np.zeros(nb_zeros_concat) ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
for n in range(1,self.N//2):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
bnew = self.all_inds[n]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[n]["a"],bnew )
fft_len = self.all_inds[n]["win_len"]
fn = np.real( irfft( c[n] , norm="ortho" ) )
gn_dual = self.all_inds[n]["window"](fft_len)
gn_dual = np.concatenate(( gn_dual,np.zeros(nb_zeros_concat) ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
#After the self.N//2 window we update the a inds in order to avoid indices problems out of range
for n in range(self.N//2,self.N-1):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
anew = self.all_inds[n]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[n]["b"] )
fft_len = self.all_inds[n]["win_len"]
fn = np.real( irfft( c[n] , norm="ortho" ) )
gn_dual = self.all_inds[n]["window"](fft_len)
gn_dual = np.concatenate(( np.zeros(nb_zeros_concat),gn_dual ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
#last window using Tukey
nb_zeros_concat = self.M-self.all_inds[self.N-1]["win_len"]
anew = self.all_inds[self.N-1]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
fn = np.real( irfft( c[self.N-1] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]
gn_dual = np.concatenate(( np.zeros(nb_zeros_concat),gn_dual ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
else:
#self.get_frame_operator()
#first window using Tukey
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
fft_len = self.all_inds[0]["win_len"]
fn = np.real( irfft( c[0] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
for n in range(1,self.N-1):
fft_len = self.all_inds[n]["win_len"]
inds = np.arange(self.all_inds[n]["a"],self.all_inds[n]["b"])
fn = np.real( irfft( c[n] , norm="ortho" ) )
gn_dual = self.all_inds[n]["window"](fft_len)/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
#last window using Tukey
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
fn = np.real( irfft( c[self.N-1] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
return f_rec
if __name__ =='__main__':
def plot_NSGT(c):
from scipy import interpolate
c_matrix = []
max_win_len = np.array( list( map( lambda x : len(x) , c ) ) ).max()
for n in range(len(c)):
N = len(c[n])
fk = np.arange(N)*(22050/N)
(x,y) = (fk,np.abs(c[n]))
f = interpolate.interp1d(x, y)
xnew = np.linspace(0, fk[N-1], max_win_len)
ynew = f(xnew)
c_matrix.append( ynew )
grid = np.array(c_matrix).T
np.log10(grid, out=grid)
grid *= 20
pmax = | np.percentile(grid, 99.99) | numpy.percentile |
import os
from os.path import join
import gzip
import shutil
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from scipy.linalg import block_diag
def get_decoder(manifold, x_dim, z_dim, rng_data_gen):
if manifold == "nn":
# NOTE: injectivity requires z_dim <= h_dim <= x_dim
h_dim = x_dim
neg_slope = 0.2
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# sampling NN weight matrices
W1 = rng_data_gen.normal(size=(z_dim, h_dim))
W1 = np.linalg.qr(W1.T)[0].T
# print("distance to identity:", np.max(np.abs(np.matmul(W1, W1.T) - np.eye(self.z_dim))))
W1 *= np.sqrt(2 / (1 + neg_slope ** 2)) * np.sqrt(2. / (z_dim + h_dim))
W1 = torch.Tensor(W1).to(device)
W1.requires_grad = False
W2 = rng_data_gen.normal(size=(h_dim, h_dim))
W2 = np.linalg.qr(W2.T)[0].T
# print("distance to identity:", np.max(np.abs(np.matmul(W2, W2.T) - np.eye(h_dim))))
W2 *= np.sqrt(2 / (1 + neg_slope ** 2)) * | np.sqrt(2. / (2 * h_dim)) | numpy.sqrt |
"""
Bivariate statistical tools:
* elbow detection in an (x,y) plot
* peaks: finding peaks, quantifying their height, width, center, area, left & right boundaries
* area under curve
"""
from typing import List, Union
import numpy as np
from ..regression.methods import fit_robust_lm
def find_elbow_point(x: np.ndarray, y: np.ndarray, max_iter=41) -> Union[int, float]:
"""
Finds the elbow point when plotting numeric entries in `x` vs numeric values in list `y`.
Returns the index into the vectors `x` and `y` [the vectors must have the same length], where
the elbow point occurs.
Using a robust linear fit, sorts the samples in X (independent variable)
and takes sample 1:5 from the left, and samples (end-5):end and fits two
linear regressions. Finds the angle between the two lines.
Adds a point to each regression, so (1:6) and (end-6:end) and repeats.
Finds the median angle, which is where it should stabilize.
Will probably not work well on few data points. If so, try fitting a spline
to the raw data and then repeat with the interpolated data.
"""
start = 5
# assert divmod(max_iter, 2)[1] # must be odd number; to ensure we calculate the median later
def calculate_line_length(
x1: float, y1: float, x2: np.ndarray, y2: np.ndarray
) -> Union[float, np.ndarray]:
"""Returns the length of the line between 2 points (x1, y1) and (x2, y2), defined as:
:math:`\\sqrt{(x2 - x1)^2 + (y2 - y1)^2}`
"""
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
# Ensure it is a Numpy array: pandas objects and lists are correctly handled.
x = np.array(x.copy())
y = np.array(y.copy())
assert len(x) == len(y)
# Stop if everything is missing:
if np.isnan( | np.nanmedian(x) | numpy.nanmedian |
"""Compares lift and drag curves for various approximations"""
import numpy as np
import functools
from scipy import interpolate
import matplotlib.pyplot as plt
f = np.loadtxt('SphereConeTable.txt')
alfalist = []
CLdata = []
CDdata = []
for line in f:
alfalist.append(line[0])
CLdata.append(line[1])
CDdata.append(line[2])
CLtck = interpolate.splrep(alfalist,CLdata) #(tck = knots,coefficients,order)
CDtck = interpolate.splrep(alfalist,CDdata)
def CLlookup(alfa, dc, rn, rc):
if np.iscomplex(alfa)==True:
deriv=interpolate.splev(np.real(alfa),CLtck,der=1)
return deriv*1j*1e-30
else:
return interpolate.splev(alfa,CLtck,der=0)
def CDlookup(alfa, dc, rn, rc):
if np.iscomplex(alfa)==True:
deriv=interpolate.splev(np.real(alfa),CDtck,der=1)
return deriv*1j*1e-30
else:
return interpolate.splev(alfa,CDtck,der=0)
def CLfunction(alfa, dc, rn, rc):
CN = (1-(rn/rc)**2*np.cos(dc)**2)*np.cos(dc)**2*np.sin(2*alfa)
CA = (1-np.sin(dc)**4)*(rn/rc)**2 + (2*np.sin(dc)**2*np.cos(alfa)**2+np.cos(dc)**2*np.sin(alfa)**2)*(1-(rn/rc)**2*np.cos(dc)**2)
return CN*np.cos(alfa) - CA*np.sin(alfa)
def CDfunction(alfa, dc, rn, rc):
CN = (1-(rn/rc)**2*np.cos(dc)**2)*np.cos(dc)**2*np.sin(2*alfa)
CA = (1-np.sin(dc)**4)*(rn/rc)**2 + (2*np.sin(dc)**2*np.cos(alfa)**2+np.cos(dc)**2* | np.sin(alfa) | numpy.sin |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Computes broadband power, offset and slope of power spectrum
Based on selected epochs (e.g. ASCIIS) in the list of files a power spectrum
is computed. Based on this power spectrum the broadband power is calculated,
followed by the offset and slope using the FOOOF algorithm.
Reference paper FOOOF: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2018) Parameterizing Neural
Power Spectra. bioRxiv, 299859. doi: https://doi.org/10.1101/299859
reference Github: https://fooof-tools.github.io/fooof/index.html
"""
__author__ = '<NAME>'
__contact__ = '<EMAIL>' # or <EMAIL>
__date__ = '2020/09/14' ### Date it was created
__status__ = 'Finished'
####################
# Review History #
####################
# Reviewed and Updated by Eduarda Centeno 20201030
####################
# Libraries #
####################
# Standard imports
import time
import os
import glob
import ast
from datetime import date
# Third party imports
import numpy as np # version 1.19.1
import matplotlib.pyplot as plt # version 3.3.0
import pandas as pd # version 1.1.0
from scipy import signal # version 1.4.1
from fooof import FOOOF # version 0.1.3
# Define Functions ------------------------------------------------------------
def find_paths(main_dir, subject, extension, **kwargs):
""" Flexible way to find files in subdirectories based on keywords
Parameters
----------
main_dir: str
Give the main directory where the subjects' folders are stored
subject: str
Give the name of the subject to be analyzed
extension: str
Give the extension type
**kwargs: str
Give keywords that will be used in the filtering of paths
!Important!
It is possible to use the kwargs 'start' & 'end' (int) OR
'selection' (list or str) for selecting epochs. The 'selection'
list should contain the exact way in which the Tr is written, e.g.
Tr01, or Tr_1, etc.
Examples
-------
Ex.1
find_paths(main_dir='/data/KNW/NO-cohorten/Scans/',
subject='sub-9690',
extension='.asc',
key1='T1',
key2='BNA',
key3='Tr_7')
This example will result in a list with a single path:
['.../T1/BNA/1_100_WITH_200_WITH_246_VE_89.643to102.750_Tr_7.asc']
Ex2.
find_paths(main_dir='/data/KNW/NO-cohorten/Scans/',
subject='sub-9690',
extension='.asc',
key1='T1',
key2='BNA',
start=20,
end=23)
This example will result in a list with several paths:
['.../T1/BNA/1_100_WITH_200_WITH_246_VE_260.037to273.143_Tr_20.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_273.144to286.250_Tr_21.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_286.251to299.358_Tr_22.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_299.358to312.465_Tr_23.asc']
Ex3.
find_paths(main_dir='/data/doorgeefluik/',
subject='mumo_002',
extension='.asc',
key1='OD1',
selection=['Tr01', 'Tr04'])
Returns
-------
updatedfilter: list
List with path strings
Notes
-------
Be careful that final slicing for 'start' & 'end' is done assuming that
the sorting step was correct. Thus, it is based on index not on finding the
specific start-end values in the string. This was done because the tested
paths had various ways of using Tr (e.g. Tr_1 or Tr_01, or Tr1 or Tr_01) -
what caused inconsistencies in the output.
"""
# Check if arguments are in the correct type
assert isinstance(main_dir, str), 'Argument must be str'
assert isinstance(subject, str), 'Argument must be str'
assert isinstance(extension, str), 'Argument must be str'
# Filtering step based on keywords
firstfilter = glob.glob(main_dir + subject + '/**/*' + extension,
recursive=True)
updatedfilter = firstfilter
print('\n..............NaN keys will be printed.................')
start = None
end = None
selection = None
for key, value in kwargs.items():
# In case the key value is NaN (possible in subjects dataframe)
if not isinstance(value,list) and pd.isnull(value):
print(key + '= NaN')
continue
elif key == 'start':
assert isinstance(value, (int,str,float)), 'Argument must be int or number str'
start = int(value)
elif key == 'end':
assert isinstance(value, (int,str,float)), 'Argument must be int or number str'
end = int(value)
elif key == 'selection':
if isinstance(value, list):
selection = value
elif isinstance(value, str):
selection = value.replace(';',',') # Step that convert ; to , (used in example.csv)
selection = ast.literal_eval(selection)
assert isinstance(selection, list), 'Argument should end up being a list of Tr numbers strings'
assert all(isinstance(item, str) for item in selection), 'Argument must be a list of of Tr numbers strings'
else:
start = None
end = None
selection = None
# Update list accoring to key value
updatedfilter = list(filter(lambda path: value in path, updatedfilter))
# Check if too many arguments were passed!
print('\n..............Checking if input is correct!.................')
#print(start, end, selection)
if (start and end) != None and selection != None:
raise RuntimeError('User should use Start&End OR Selection')
else:
print('All good to continue! \n')
pass
# To find index of Tr (last appearance)
location = updatedfilter[0].rfind('Tr')
# Sort list according to Tr* ending (+1 was necessary to work properly)
updatedfilter.sort(key=lambda path:int(''.join(filter(str.isdigit, path[location+1 :]))))
# After the list is sorted, slice by index.
if (start and end) != None:
print('Start&End were given. \n' +
'-- Start is: ' + str(start) +
'\n--End is: ' + str(end))
updatedfilter = updatedfilter[start-1:end]
# for number in range(start, end):
# updatedfilter = [
# list(filter(lambda k: str(number) in k[location:],
# updatedfilter))[0] for number in range(start, end)
# ]
# After the list is sorted, interesect with selection.
elif selection != None:
print('\nA selection of values was given.' +
'\nThe selection was: ' + str(selection))
updatedlist=[]
for item in selection:
updatedlist += list(filter(lambda path: item + extension in path[location:], updatedfilter))
updatedfilter = updatedlist
return updatedfilter
def make_csv(csv_path, output_path, extension = '.asc'):
"""Function to insert the number of epochs to include in analysis into csv.
Number of epochs is calculated by comparing the number of epochs available
for each subject and including the minimum amount.
Parameters
----------
csv_path : str,
path to the csv containing information on subjects to include
output_path: str,
complete path to output new csv (e.g. '/path/to/folder/new_csv.csv')
extension : str,
file extension of meg files (e.g. '.asc')
default = '.asc'
Returns
-------
None
saves the extended csv to the same directory where found old csv
(i.e. overwrites old csv)
epochs_df: pandas DataFrame,
dataframe containing the filepaths to the epochs included for every subject
"""
df = pd.read_csv(csv_path, delimiter = ',', header =0)
nr_epochs = []
for index, row in df.iterrows():
asc_paths = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'])
#store nr of epochs available for each subject
nr_epochs.append(len(asc_paths))
#find smallest number of epochs available
min_nr_epochs = min(nr_epochs)
#add start and stop epochs to df
df['Start'] = np.repeat(1,len(df['Path']))
df['End'] = np.repeat(min_nr_epochs, len(df['Path']))
#save new csv file that includes the epochs to analyse
df.to_csv(output_path, index = False, sep = ',')
#load new csv file with start and end epochs
new_csv = pd.read_csv(output_path)
subs = []
paths = []
#search for asc files between start and end epoch range specified in csv
for index, row in new_csv.iterrows():
subs.append(row['Case_ID'])
asc_paths = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'],
start = row['Start'],
end = row['End'])
#append list of asc_paths for subject to list
paths.append(asc_paths)
#store lists of asc_paths (for every subject) in dataframe
epochs_df = pd.DataFrame(paths)
#index rows to subject IDs
epochs_df.set_index([pd.Index(subs)], 'Subs', inplace = True)
return(epochs_df)
def cal_power_spectrum(timeseries, nr_rois=np.arange(92), fs=1250,
window='hamming', nperseg=4096, scaling='spectrum',
plot_figure=False, title_plot='average power spectrum'):
""" Calculate (and plot) power spectrum of timeseries
Parameters
----------
timeseries: DataFrame with ndarrays
Rows are timepoints, columns are rois/electrodes
Give list with rois/electrodes you want to include,
default=np.arange(92)
fs: int, optional
Sample frequency, default=1250
window: str or tuple, optional
Type of window you want to use, check spectral.py for details,
default='hamming'
nperseg : int, optional
Length of each segment, default=4096
scaling : str, optional
'density' calculates the power spectral density (V**2/Hz), 'spectrum'
calculates the power spectrum (V**2), default='spectrum'
plot_figure: bool
Creates a figure of the mean + std over all rois/electrodes,
default=False
title_plot: str
Give title of the plot, default='average power spectrum'
Returns
-------
f: ndarray
Array with sample frequencies (x-axis of power spectrum plot)
pxx: ndarray
Columns of power spectra for each roi/VE
"""
pxx = np.empty([int(nperseg/2+1), np.size(nr_rois)])
i = 0
for roi in nr_rois:
(f, pxx[:,i]) = signal.welch(timeseries[roi].values, fs, window,
nperseg, scaling=scaling)
i = i + 1
if plot_figure==True:
plt.figure()
plt.plot(f, np.mean(pxx,1), color='teal')
plt.plot(f, np.mean(pxx,1)+np.std(pxx,1), color='teal', linewidth=0.7)
plt.plot(f, np.mean(pxx,1)-np.std(pxx,1), color='teal', linewidth=0.7)
plt.fill_between(f, | np.mean(pxx,1) | numpy.mean |
# -*- coding: utf-8 -*-
"""\
Caelus Input File Pretty-printer
--------------------------------
"""
import sys
import re
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from contextlib import contextmanager
import numpy as np
from . import dtypes
from ..utils import osutils
from ..version import version
file_banner = r"""/*---------------------------------------------------------------------------*\
* Caelus (http://www.caelus-cml.com)
*
* Caelus Python Library (CPL) %(version)s
* Auto-generated on: %(timestamp)s
*
\*---------------------------------------------------------------------------*/
"""
header_separator = """\
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
"""
eof_separator = """\
// ************************************************************************* //
"""
@contextmanager
def foam_writer(filename, header=None):
"""Caelus/OpenFOAM file writer
Args:
header (CaelusDict): The FoamFile entries
Yields:
printer (DictPrinter): A dictionary printer for printing data
"""
fh = None
try:
fh = open(filename, 'w')
fh.write(file_banner%{
'timestamp': osutils.timestamp(),
'version': version,
})
printer = DictPrinter(buf=fh)
if header:
printer.write_dict_item("FoamFile", header, True)
fh.write(header_separator)
yield printer
fh.write(eof_separator)
finally:
if fh:
fh.close()
class Indenter(object):
"""An indentation utility for use with DictPrinter"""
def __init__(self, tab_width=4):
"""
Args:
tab_width (int): Default indentation width
"""
#: Identation width
self.tab_width = tab_width
#: Current indentation column
self.curr_indent = 0
@property
def indent_str(self):
"""Return an indentation string"""
return ' '*self.curr_indent
def emit(self, fh):
"""Emit the leading identation"""
fh.write(self.indent_str)
def indent(self):
"""Indent the tab"""
self.curr_indent += self.tab_width
def dedent(self):
"""Dedent the tab"""
self.curr_indent -= self.tab_width
class DictPrinter(object):
"""Caelus Input File Pretty-printer
Given a CaelusDict instance, this class will emit formatted data suitable
for use with Caelus solvers
"""
#: Default width for keywords
keyword_fmt = "%-20s"
no_keywd_values = (
dtypes.Directive,
dtypes.MacroSubstitution,
)
def __init__(self, buf=sys.stdout, tab_width=4):
"""
Args:
buf (file handle): A valid buffer to output to
tab_width (int): Indentation width
"""
self.indenter = Indenter(tab_width)
self.buf = buf
def __call__(self, entries):
"""Pretty-print the dictionary
Args:
entries (CaelusDict): Contents dictionary for output
"""
if not entries:
return
tab_width = max(len(key) for key, value in entries.items()
if not isinstance(value, self.no_keywd_values))
tab_width += self.indenter.tab_width
curr_keywd_fmt = self.keyword_fmt
self.keyword_fmt = "%%-%ds"%tab_width
for key, value in entries.items():
self.write_dict_item(key, value)
self.keyword_fmt = curr_keywd_fmt
def write_dict_item(self, key, value, nested=False):
"""Pretty-print a dictionary entry
Args:
key (str): Keyword for the parameter
value (object): Value for the keyword
nested (bool): Flag indicating whether the entries are nested
"""
buf = self.buf
indenter = self.indenter
if isinstance(value, self.no_keywd_values):
value.write_value(buf, indenter.indent_str)
elif isinstance(value, dtypes.BoundaryList):
buf.write("%d"%len(value.value))
self.write_list(value.value)
elif isinstance(value, dtypes.FoamType):
buf.write(indenter.indent_str + self.keyword_fmt%key + " ")
value.write_value(buf, indenter.indent_str)
else:
buf.write(indenter.indent_str + self.keyword_fmt%key + " ")
self.write_value(value)
if not nested:
buf.write("\n")
def write_value(self, value, recursive=False, indented=False):
"""Pretty-print an RHS entry based on its type
Args:
value (object): Value to be printed
recursive (bool): Flag indicating whether the value is part of a
dictionary or a list
indented (bool): Flag indicating whether value must be indented
"""
buf = self.buf
if isinstance(value, Mapping):
self.write_dict(value)
elif isinstance(value, np.ndarray):
self.write_ndarray(value, recursive=recursive)
elif isinstance(value, list):
self.write_list(value, recursive=recursive)
elif isinstance(value, bool):
if indented:
buf.write(self.indenter.indent_str)
pvalue = "on" if value else "off"
buf.write(pvalue)
buf.write("\n" if recursive else ";\n")
else:
if indented:
buf.write(self.indenter.indent_str)
buf.write(str('' if value is None else value))
buf.write("\n" if recursive else ";\n")
def write_dict(self, value):
"""Pretty-print a Caelus dictionary type
Args:
value (Mapping): A valid python dict-like instance
"""
buf = self.buf
indenter = self.indenter
curr_keywd_fmt = self.keyword_fmt
tab_width = indenter.tab_width
if value:
tab_width = tab_width + max(
len(key) for key in value.keys())
self.keyword_fmt = "%%-%ds"%tab_width
buf.write("\n" + indenter.indent_str + "{\n")
indenter.indent()
for key, val in value.items():
self.write_dict_item(key, val, nested=True)
indenter.dedent()
buf.write(indenter.indent_str + "}\n\n")
self.keyword_fmt = curr_keywd_fmt
def write_ndarray(self, value, recursive=False):
"""Pretty-print a numeric list
Args:
value (np.ndarray): Array object
recursive (bool): Flag indicating whether it is part of a list or dict
"""
buf = self.buf
indent = self.indenter.curr_indent
indent_str = self.indenter.indent_str
ndim = value.ndim
arr_size = value.size
arr_str = None
# Ensure that numpy doesn't truncate the list
threshold = np.get_printoptions()['threshold']
try:
np.set_printoptions(threshold=arr_size+10)
arr_str = np.array_str(value, max_line_width=80-indent)
finally:
| np.set_printoptions(threshold=threshold) | numpy.set_printoptions |
from psisim import spectrum
import numpy as np
import scipy.interpolate as si
import scipy.integrate as integrate
import astropy.units as u
from scipy.ndimage import gaussian_filter
import copy
import matplotlib.pyplot as plt
import warnings
def simulate_observation(telescope,instrument,planet_table_entry,planet_spectrum,wvs,spectrum_R,
inject_noise=True,verbose=False,post_processing_gain = 1,return_noise_components=False,stellar_spec=None,
apply_lsf=False,integrate_delta_wv=False,no_speckle_noise=False,plot=False, sky_on=True):
'''
A function that simulates an observation
Inputs:
Telescope - A Telescope object
Instrument - An Instrument object
planet_table_entry - an entry/row from a Universe planet table
planet_spectrum - A planet spectrum from simulate spectrum given in contrast units
observing_configs - To be defined
Kwargs:
stellar_spectrum - an optional argument to pass if the user has already generated a stellar spectrum. Expected units are photons/s/cm^2/angstrom
post_processing_gain - optional argument by which to reduce the speckle noise (set to np.inf to replace "no_speckle_noise")
sky_on - optional bool to apply atmospheric transmission (default True to apply transmission). Replaces old _nosky function
Outputs:
F_lambda, F_lambda_error
'''
##### ALL UNITS NEED TO BE PROPERLY EXAMINED #####
#Some relevant planet properties
separation = planet_table_entry['AngSep'].to(u.arcsec)
star_aomag = planet_table_entry['StarAOmag']
star_spt = planet_table_entry['StarSpT']
if stellar_spec is None:
#Get the stellar spectrum at the wavelengths of interest.
#The stellar spectrum will be in units of photons/s/cm^2/angstrom
stellar_spectrum = spectrum.get_stellar_spectrum(planet_table_entry,wvs,instrument.current_R,
verbose=verbose)
else:
stellar_spectrum = copy.deepcopy(stellar_spec)
#TODO: Add check that the input stellar spectrum as the correct units
#Multiply the stellar spectrum by the collecting area and a factor of 10,000
#to convert from m^2 to cm^2 and get the stellar spectrum in units of photons/s
stellar_spectrum *= telescope.collecting_area.to(u.cm**2)
#Multiply by atmospheric transmission if requested
if sky_on:
stellar_spectrum *= telescope.get_atmospheric_transmission(wvs,R=instrument.current_R)
#Multiply by telescope throughput
stellar_spectrum *= telescope.get_telescope_throughput(wvs,band=instrument.current_filter)
#Multiply by filter transmission
stellar_spectrum *= instrument.get_filter_transmission(wvs,instrument.current_filter)
# Convert planet from contrast units back into spectrum units
# Thus planet spectrum now has throughputs above from stellar spectrum accounted for
#This assumes that you have properly carried around 'wvs'
#and that the planet_spectrum is given at the wvs wavelengths.
scaled_spectrum = planet_spectrum*stellar_spectrum
# Account for instrument throughput for stellar spectrum and planet spectrum individually
stellar_spectrum *= instrument.get_inst_throughput(wvs,planet_flag=False)
scaled_spectrum *= instrument.get_inst_throughput(wvs,planet_flag=True,planet_sep=separation)
#Multiply by quantum efficiency
stellar_spectrum *= instrument.qe #now units of e-/s/Angstrom
scaled_spectrum *= instrument.qe #now units of e-/s/Angstrom
#Get Sky thermal background in photons/s/Angstrom
thermal_sky = telescope.get_sky_background(wvs,R=instrument.current_R) #Assumes diffraction limited PSF had to multiply by solid angle of PSF.
thermal_sky *= telescope.collecting_area.to(u.cm**2) #Multiply by collecting area - units of photons/s/Angstom
thermal_sky *= telescope.get_telescope_throughput(wvs,band=instrument.current_filter) #Multiply by telescope throughput
thermal_sky *= instrument.get_inst_throughput(wvs) #Multiply by instrument throughput
#Get Telescope thermal background in photons/s/Angstrom
thermal_telescope = telescope.get_thermal_emission(wvs,band=instrument.current_filter)
thermal_telescope *= telescope.collecting_area.to(u.cm**2) #Now in units of photons/s/Angstrom
thermal_telescope *= instrument.get_inst_throughput(wvs) #Multiply by telescope throughput
#Get the Instrument thermal background in photons/s/Angstrom
diffraction_limit = (wvs/telescope.diameter.to(u.micron)*u.radian).to(u.arcsec)
solidangle = diffraction_limit**2 * 1.13
thermal_inst = instrument.get_instrument_background(wvs,solidangle) #In units of photons/s/Angstrom
thermal_flux = thermal_sky + thermal_telescope + thermal_inst
# thermal_flux *= instrument.get_filter_transmission(wvs, instrument.current_filter) # Comment this out for now. It should be part of get_instrument_background
thermal_flux *= instrument.qe #e-/s/Angstrom
#Apply the line-spread function if the user wants to.
if apply_lsf:
dwvs = np.abs(wvs - np.roll(wvs, 1))
dwvs[0] = dwvs[1]
dwv_mean = np.mean(dwvs)
lsf_fwhm = (instrument.lsf_width/dwv_mean).decompose() #Get the lsf_fwhm in units of current wavelength spacing
lsf_sigma = lsf_fwhm/(2*np.sqrt(2*np.log(2))) #Convert to sigma
stellar_spectrum = gaussian_filter(stellar_spectrum, lsf_sigma.value) * stellar_spectrum.unit
scaled_spectrum = gaussian_filter(scaled_spectrum, lsf_sigma.value) * scaled_spectrum.unit
#Downsample to instrument wavelength sampling
intermediate_spectrum = si.interp1d(wvs, scaled_spectrum,fill_value="extrapolate",bounds_error=False)
intermediate_stellar_spectrum = si.interp1d(wvs, stellar_spectrum,fill_value="extrapolate",bounds_error=False)
intermediate_thermal_spectrum = si.interp1d(wvs, thermal_flux,fill_value="extrapolate",bounds_error=False)
if integrate_delta_wv:
detector_spectrum = []
detector_stellar_spectrum = []
detector_thermal_flux = []
#Intergrate over the delta_lambda between each wavelength value.
for inst_wv, inst_dwv in zip(instrument.current_wvs, instrument.current_dwvs):
wv_start = inst_wv - inst_dwv/2.
wv_end = inst_wv + inst_dwv/2.
flux = 1e4*u.AA/u.micron*integrate.quad(intermediate_spectrum, wv_start.value, wv_end.value)[0]*scaled_spectrum.unit # detector spectrum now in e-/s (1e4 is for micron to angstrom conversion)
stellar_flux = 1e4*u.AA/u.micron*integrate.quad(intermediate_stellar_spectrum, wv_start.value, wv_end.value)[0]*stellar_spectrum.unit # detector spectrum now in e-/s
thermal_flux = 1e4*u.AA/u.micron*integrate.quad(intermediate_thermal_spectrum, wv_start.value, wv_end.value)[0]*thermal_flux.unit # detector spectrum now in e-/s
detector_spectrum.append(flux)
detector_stellar_spectrum.append(stellar_flux)
detector_thermal_flux.append(thermal_flux)
detector_spectrum = np.array(detector_spectrum)
detector_stellar_spectrum = np.array(detector_stellar_spectrum)
detector_thermal_flux = | np.array(detector_thermal_flux) | numpy.array |
from __future__ import division
from warnings import warn
import numpy as np
from dipy.reconst.cache import Cache
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.csdeconv import csdeconv
from dipy.reconst.shm import real_sph_harm
from scipy.special import gamma, hyp1f1
from dipy.core.geometry import cart2sphere
from dipy.data import get_sphere
from dipy.reconst.odf import OdfModel, OdfFit
from scipy.optimize import leastsq
from dipy.utils.optpkg import optional_package
cvxpy, have_cvxpy, _ = optional_package("cvxpy")
class ForecastModel(OdfModel, Cache):
r"""Fiber ORientation Estimated using Continuous Axially Symmetric Tensors
(FORECAST) [1,2,3]_. FORECAST is a Spherical Deconvolution reconstruction
model for multi-shell diffusion data which enables the calculation of a
voxel adaptive response function using the Spherical Mean Tecnique (SMT)
[2,3]_.
With FORECAST it is possible to calculate crossing invariant parallel
diffusivity, perpendicular diffusivity, mean diffusivity, and fractional
anisotropy [2]_
References
----------
.. [1] <NAME>., "Measurement of Fiber Orientation Distributions
Using High Angular Resolution Diffusion Imaging", Magnetic
Resonance in Medicine, 2005.
.. [2] <NAME>. et al., "Quantitative Mapping of the Per-Axon Diffusion
Coefficients in Brain White Matter", Magnetic Resonance in
Medicine, 2016.
.. [3] <NAME>. et al., "A generalized SMT-based framework for
Diffusion MRI microstructural model estimation", MICCAI Workshop
on Computational DIFFUSION MRI (CDMRI), 2017.
Notes
-----
The implementation of FORECAST may require CVXPY (http://www.cvxpy.org/).
"""
def __init__(self,
gtab,
sh_order=8,
lambda_lb=1e-3,
dec_alg='CSD',
sphere=None,
lambda_csd=1.0):
r""" Analytical and continuous modeling of the diffusion signal with
respect to the FORECAST basis [1,2,3]_.
This implementation is a modification of the original FORECAST
model presented in [1]_ adapted for multi-shell data as in [2,3]_ .
The main idea is to model the diffusion signal as the combination of a
single fiber response function $F(\mathbf{b})$ times the fODF
$\rho(\mathbf{v})$
..math::
:nowrap:
\begin{equation}
E(\mathbf{b}) = \int_{\mathbf{v} \in \mathcal{S}^2} \rho(\mathbf{v}) F({\mathbf{b}} | \mathbf{v}) d \mathbf{v}
\end{equation}
where $\mathbf{b}$ is the b-vector (b-value times gradient direction)
and $\mathbf{v}$ is an unit vector representing a fiber direction.
In FORECAST $\rho$ is modeled using real symmetric Spherical Harmonics
(SH) and $F(\mathbf(b))$ is an axially symmetric tensor.
Parameters
----------
gtab : GradientTable,
gradient directions and bvalues container class.
sh_order : unsigned int,
an even integer that represent the SH order of the basis (max 12)
lambda_lb: float,
Laplace-Beltrami regularization weight.
dec_alg : str,
Spherical deconvolution algorithm. The possible values are Weighted Least Squares ('WLS'),
Positivity Constraints using CVXPY ('POS') and the Constraint
Spherical Deconvolution algorithm ('CSD'). Default is 'CSD'.
sphere : array, shape (N,3),
sphere points where to enforce positivity when 'POS' or 'CSD'
dec_alg are selected.
lambda_csd : float,
CSD regularization weight.
References
----------
.. [1] <NAME>., "Measurement of Fiber Orientation Distributions
Using High Angular Resolution Diffusion Imaging", Magnetic
Resonance in Medicine, 2005.
.. [2] <NAME> al., "Quantitative Mapping of the Per-Axon Diffusion
Coefficients in Brain White Matter", Magnetic Resonance in
Medicine, 2016.
.. [3] <NAME>. et al., "A generalized SMT-based framework for
Diffusion MRI microstructural model estimation", MICCAI Workshop
on Computational DIFFUSION MRI (CDMRI), 2017.
Examples
--------
In this example, where the data, gradient table and sphere tessellation
used for reconstruction are provided, we model the diffusion signal
with respect to the FORECAST and compute the fODF, parallel and
perpendicular diffusivity.
>>> from dipy.data import get_sphere, get_3shell_gtab
>>> gtab = get_3shell_gtab()
>>> from dipy.sims.voxel import MultiTensor
>>> mevals = np.array(([0.0017, 0.0003, 0.0003],
... [0.0017, 0.0003, 0.0003]))
>>> angl = [(0, 0), (60, 0)]
>>> data, sticks = MultiTensor(gtab,
... mevals,
... S0=100.0,
... angles=angl,
... fractions=[50, 50],
... snr=None)
>>> from dipy.reconst.forecast import ForecastModel
>>> fm = ForecastModel(gtab, sh_order=6)
>>> f_fit = fm.fit(data)
>>> d_par = f_fit.dpar
>>> d_perp = f_fit.dperp
>>> sphere = get_sphere('symmetric724')
>>> fodf = f_fit.odf(sphere)
"""
OdfModel.__init__(self, gtab)
# round the bvals in order to avoid numerical errors
self.bvals = np.round(gtab.bvals/100) * 100
self.bvecs = gtab.bvecs
if sh_order >= 0 and not(bool(sh_order % 2)) and sh_order <= 12:
self.sh_order = sh_order
else:
msg = "sh_order must be a non-zero even positive number "
msg += "between 2 and 12"
raise ValueError(msg)
if sphere is None:
sphere = get_sphere('repulsion724')
self.vertices = sphere.vertices[
0:int(sphere.vertices.shape[0]/2), :]
else:
self.vertices = sphere
self.b0s_mask = self.bvals == 0
self.one_0_bvals = np.r_[0, self.bvals[~self.b0s_mask]]
self.one_0_bvecs = np.r_[np.array([0, 0, 0]).reshape(
1, 3), self.bvecs[~self.b0s_mask, :]]
self.rho = rho_matrix(self.sh_order, self.one_0_bvecs)
# signal regularization matrix
self.srm = rho_matrix(4, self.one_0_bvecs)
self.lb_matrix_signal = lb_forecast(4)
self.b_unique = np.sort(np.unique(self.bvals[self.bvals > 0]))
self.wls = True
self.csd = False
self.pos = False
if dec_alg.upper() == 'POS':
if have_cvxpy:
self.wls = False
self.pos = True
else:
msg = 'cvxpy is needed to inforce positivity constraints.'
raise ValueError(msg)
if dec_alg.upper() == 'CSD':
self.csd = True
self.lb_matrix = lb_forecast(self.sh_order)
self.lambda_lb = lambda_lb
self.lambda_csd = lambda_csd
self.fod = rho_matrix(sh_order, self.vertices)
@multi_voxel_fit
def fit(self, data):
data_b0 = data[self.b0s_mask].mean()
data_single_b0 = np.r_[data_b0, data[~self.b0s_mask]] / data_b0
# calculates the mean signal at each b_values
means = find_signal_means(self.b_unique,
data_single_b0,
self.one_0_bvals,
self.srm,
self.lb_matrix_signal)
# average diffusivity initialization
x = np.array([np.pi/4, np.pi/4])
x, status = leastsq(forecast_error_func, x,
args=(self.b_unique, means))
# transform to bound the diffusivities from 0 to 3e-03
d_par = np.cos(x[0])**2 * 3e-03
d_perp = np.cos(x[1])**2 * 3e-03
if d_perp >= d_par:
temp = d_par
d_par = d_perp
d_perp = temp
# round to avoid memory explosion
diff_key = str(int(np.round(d_par * 1e05))) + \
str(int(np.round(d_perp * 1e05)))
M_diff = self.cache_get('forecast_matrix', key=diff_key)
if M_diff is None:
M_diff = forecast_matrix(
self.sh_order, d_par, d_perp, self.one_0_bvals)
self.cache_set('forecast_matrix', key=diff_key, value=M_diff)
M = M_diff * self.rho
M0 = M[:, 0]
c0 = np.sqrt(1.0/(4*np.pi))
# coefficients vector initialization
n_c = int((self.sh_order + 1)*(self.sh_order + 2)/2)
coef = np.zeros(n_c)
coef[0] = c0
if int(np.round(d_par*1e05)) > int(np.round(d_perp*1e05)):
if self.wls:
data_r = data_single_b0 - M0*c0
Mr = M[:, 1:]
Lr = self.lb_matrix[1:, 1:]
pseudo_inv = np.dot(np.linalg.inv(
np.dot(Mr.T, Mr) + self.lambda_lb*Lr), Mr.T)
coef = np.dot(pseudo_inv, data_r)
coef = np.r_[c0, coef]
if self.csd:
coef, _ = csdeconv(data_single_b0, M, self.fod, tau=0.1,
convergence=50)
coef = coef / coef[0] * c0
if self.pos:
c = cvxpy.Variable(M.shape[1])
design_matrix = cvxpy.Constant(M)
objective = cvxpy.Minimize(
cvxpy.sum_squares(design_matrix * c - data_single_b0) +
self.lambda_lb * cvxpy.quad_form(c, self.lb_matrix))
constraints = [c[0] == c0, self.fod * c >= 0]
prob = cvxpy.Problem(objective, constraints)
try:
prob.solve(solver=cvxpy.OSQP, eps_abs=1e-05, eps_rel=1e-05)
coef = np.asarray(c.value).squeeze()
except Exception:
warn('Optimization did not find a solution')
coef = np.zeros(M.shape[1])
coef[0] = c0
return ForecastFit(self, data, coef, d_par, d_perp)
class ForecastFit(OdfFit):
def __init__(self, model, data, sh_coef, d_par, d_perp):
""" Calculates diffusion properties for a single voxel
Parameters
----------
model : object,
AnalyticalModel
data : 1d ndarray,
fitted data
sh_coef : 1d ndarray,
forecast sh coefficients
d_par : float,
parallel diffusivity
d_perp : float,
perpendicular diffusivity
"""
OdfFit.__init__(self, model, data)
self.model = model
self._sh_coef = sh_coef
self.gtab = model.gtab
self.sh_order = model.sh_order
self.d_par = d_par
self.d_perp = d_perp
self.rho = None
def odf(self, sphere, clip_negative=True):
r""" Calculates the fODF for a given discrete sphere.
Parameters
----------
sphere : Sphere,
the odf sphere
clip_negative : boolean, optional
if True clip the negative odf values to 0, default True
"""
if self.rho is None:
self.rho = rho_matrix(self.sh_order, sphere.vertices)
odf = np.dot(self.rho, self._sh_coef)
if clip_negative:
odf = np.clip(odf, 0, odf.max())
return odf
def fractional_anisotropy(self):
r""" Calculates the fractional anisotropy.
"""
fa = np.sqrt(0.5 * (2*(self.d_par - self.d_perp)**2) /
(self.d_par**2 + 2*self.d_perp**2))
return fa
def mean_diffusivity(self):
r""" Calculates the mean diffusivity.
"""
md = (self.d_par + 2*self.d_perp)/3.0
return md
def predict(self, gtab=None, S0=1.0):
r""" Calculates the fODF for a given discrete sphere.
Parameters
----------
gtab : GradientTable, optional
gradient directions and bvalues container class.
S0 : float, optional
the signal at b-value=0
"""
if gtab is None:
gtab = self.gtab
M_diff = forecast_matrix(self.sh_order,
self.d_par,
self.d_perp,
gtab.bvals)
rho = rho_matrix(self.sh_order, gtab.bvecs)
M = M_diff * rho
S = S0 * np.dot(M, self._sh_coef)
return S
@property
def sh_coeff(self):
"""The FORECAST SH coefficients
"""
return self._sh_coef
@property
def dpar(self):
"""The parallel diffusivity
"""
return self.d_par
@property
def dperp(self):
"""The perpendicular diffusivity
"""
return self.d_perp
def find_signal_means(b_unique, data_norm, bvals, rho, lb_matrix, w=1e-03):
r"""Calculate the mean signal for each shell.
Parameters
----------
b_unique : 1d ndarray,
unique b-values in a vector excluding zero
data_norm : 1d ndarray,
normalized diffusion signal
bvals : 1d ndarray,
the b-values
rho : 2d ndarray,
SH basis matrix for fitting the signal on each shell
lb_matrix : 2d ndarray,
Laplace-Beltrami regularization matrix
w : float,
weight for the Laplace-Beltrami regularization
Returns
-------
means : 1d ndarray
the average of the signal for each b-values
"""
lb = len(b_unique)
means = np.zeros(lb)
for u in range(lb):
ind = bvals == b_unique[u]
shell = data_norm[ind]
if np.sum(ind) > 20:
M = rho[ind, :]
pseudo_inv = np.dot(np.linalg.inv(
np.dot(M.T, M) + w*lb_matrix), M.T)
coef = np.dot(pseudo_inv, shell)
means[u] = coef[0] / np.sqrt(4*np.pi)
else:
means[u] = shell.mean()
return means
def forecast_error_func(x, b_unique, E):
r""" Calculates the difference between the mean signal calculated using
the parameter vector x and the average signal E using FORECAST and SMT
"""
d_par = np.cos(x[0])**2 * 3e-03
d_perp = np.cos(x[1])**2 * 3e-03
if d_perp >= d_par:
temp = d_par
d_par = d_perp
d_perp = temp
E_reconst = 0.5 * np.exp(-b_unique * d_perp) * psi_l(0, (b_unique * (d_par - d_perp)))
v = E-E_reconst
return v
def psi_l(l, b):
n = l//2
v = (-b)**n
v *= gamma(n + 1./2) / gamma(2*n + 3./2)
v *= hyp1f1(n + 1./2, 2*n + 3./2, -b)
return v
def forecast_matrix(sh_order, d_par, d_perp, bvals):
r"""Compute the FORECAST radial matrix
"""
n_c = int((sh_order + 1) * (sh_order + 2) / 2)
M = np.zeros((bvals.shape[0], n_c))
counter = 0
for l in range(0, sh_order + 1, 2):
for m in range(-l, l + 1):
M[:, counter] = 2 * np.pi * \
np.exp(-bvals * d_perp) * psi_l(l, bvals * (d_par - d_perp))
counter += 1
return M
def rho_matrix(sh_order, vecs):
r"""Compute the SH matrix $\rho$
"""
r, theta, phi = cart2sphere(vecs[:, 0], vecs[:, 1], vecs[:, 2])
theta[np.isnan(theta)] = 0
n_c = int((sh_order + 1) * (sh_order + 2) / 2)
rho = | np.zeros((vecs.shape[0], n_c)) | numpy.zeros |
import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
from ..utils import get_corners_in_world_coordinates
from ..utils import transform_points
from ..utils import transform_matrix
from ..utils import road_segment_color
CV2_SHIFT = 8
CV2_SHIFT_VALUE = 2 ** CV2_SHIFT
def rasterize(parsed):
"""
Parameters:
parsed: a parsed example
Returns:
batch_images: a nparray of rasterized images of shape(B, 224,448, 3) dtype = float32
"""
decoded_example = parsed
past_states = tf.stack([
decoded_example['state/past/x'],
decoded_example['state/past/y'],
decoded_example['state/past/length'],
decoded_example['state/past/width'],
decoded_example['state/past/bbox_yaw']
], -1)
cur_states = tf.stack([
decoded_example['state/current/x'],
decoded_example['state/current/y'],
decoded_example['state/current/length'],
decoded_example['state/current/width'],
decoded_example['state/current/bbox_yaw']
], -1)
states = tf.concat([past_states, cur_states], axis = 1)
past_is_valid = decoded_example['state/past/valid'] > 0
current_is_valid = decoded_example['state/current/valid'] > 0
is_valid = tf.concat([past_is_valid, current_is_valid], axis = 1)
is_valid = tf.reduce_any(is_valid, 1)
valid_states = tf.boolean_mask(states, is_valid)
tracks_to_predict = parsed['state/tracks_to_predict']
current_is_valid = tf.squeeze(current_is_valid, axis = 1)
orig_to_valid_map = (tf.cumsum(tf.cast(is_valid, dtype = tf.int32)) - 1).numpy()
tracks = tf.where(tracks_to_predict > 0)
tracks = tracks.numpy().reshape(-1)
current_is_valid = current_is_valid.numpy()
r_valid_states = tf.transpose(valid_states, perm = [1,0,2]) # (11,58,5)
r_valid_states = tf.reshape(r_valid_states, (-1,5))
corners = get_corners_in_world_coordinates(r_valid_states) # (58*11, 4, 2)
ego_info = {}
current_x = parsed['state/current/x'].numpy().reshape(-1)
current_y = parsed['state/current/y'].numpy().reshape(-1)
current_yaw = parsed['state/current/bbox_yaw'].numpy().reshape(-1)
# Prepare the road data
xyz_road = parsed['roadgraph_samples/xyz']
is_valid_road = parsed['roadgraph_samples/valid']
road_type = parsed['roadgraph_samples/type']
xy_road = xyz_road[:,:2]
is_valid_road = tf.squeeze(is_valid_road)
valid_xy_road = tf.boolean_mask(xy_road, is_valid_road)
dir_road = parsed['roadgraph_samples/dir']
dir_xy_road = dir_road[:, :2]
valid_dir_xy_road = tf.boolean_mask(dir_xy_road, is_valid_road)
valid_road_type = np.squeeze(tf.boolean_mask(road_type, is_valid_road).numpy())
road_ids = np.squeeze(tf.boolean_mask(parsed['roadgraph_samples/id'], is_valid_road).numpy())
valid_xy_plus_dir = valid_xy_road + valid_dir_xy_road
valid_xy_plus_dir = valid_xy_plus_dir.numpy()
valid_xy_road = valid_xy_road.numpy()
tl_state = parsed['traffic_light_state/current/state']
tl_ids = parsed['traffic_light_state/current/id']
tl_valid = parsed['traffic_light_state/current/valid']
valid_tl_states = tf.boolean_mask(tl_state, tl_valid).numpy()
valid_tl_ids = tf.boolean_mask(tl_ids, tl_valid).numpy()
batch_images = np.zeros((len(tracks), 224,448, 3), dtype=np.float32)
for track_index, track in enumerate(tracks):
if not current_is_valid[track]:
print("WARNING! Found a track that is not valid in current frame!")
batch_images[track_index] = None
continue
track_in_valid_index = orig_to_valid_map[track]
cx = current_x[track]
cy = current_y[track]
yaw = current_yaw[track]
# generate the transfer matrix
transform = transform_matrix(cx, cy, yaw)
transformed = transform_points(corners, transform)
tl_colors = [(1,1,1), # white Unknown = 0
(1,0,0), # red Arrow_Stop = 1
(1,1,0), # yellow Arrow_Caution = 2
(0,1,0), # green Arrow_go = 3
(1,0,0), # red stop = 4
(1,1,0), # yellow caution = 5
(0,1,0), # green go = 6
(1,115/255,0), # red flashing_stop = 7
(212/255,1,0)] # yellow flashing caution = 8
# Drawing the road
road_img = np.zeros((224,448,3), dtype = np.float32)
valid_xy_road_in_img = transform_points(valid_xy_road, transform)*CV2_SHIFT_VALUE
valid_xy_plus_dir_in_img = transform_points(valid_xy_plus_dir, transform)*CV2_SHIFT_VALUE
road_pts = | np.stack([valid_xy_road_in_img, valid_xy_plus_dir_in_img], 1) | numpy.stack |
import os
import re
import random
import json
import copy
import tgt
import librosa
import numpy as np
import pyworld as pw
from scipy.stats import betabinom
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from pathlib import Path
from g2p_en import G2p
import audio as Audio
from model import PreDefinedEmbedder
from text import grapheme_to_phoneme
from utils.pitch_tools import get_pitch, get_cont_lf0, get_lf0_cwt
from utils.tools import get_phoneme_level_energy, plot_embedding, spec_f0_to_figure
class Preprocessor:
def __init__(self, preprocess_config, model_config, train_config):
random.seed(train_config['seed'])
self.preprocess_config = preprocess_config
self.multi_speaker = model_config["multi_speaker"]
# self.learn_alignment = model_config["duration_modeling"]["learn_alignment"]
self.corpus_dir = preprocess_config["path"]["corpus_path"]
self.in_dir = preprocess_config["path"]["raw_path"]
self.out_dir = preprocess_config["path"]["preprocessed_path"]
self.val_size = preprocess_config["preprocessing"]["val_size"]
self.sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
self.hop_length = preprocess_config["preprocessing"]["stft"]["hop_length"]
self.filter_length = preprocess_config["preprocessing"]["stft"]["filter_length"]
self.trim_top_db = preprocess_config["preprocessing"]["audio"]["trim_top_db"]
self.beta_binomial_scaling_factor = preprocess_config["preprocessing"]["duration"]["beta_binomial_scaling_factor"]
self.with_f0 = preprocess_config["preprocessing"]["pitch"]["with_f0"]
self.with_f0cwt = preprocess_config["preprocessing"]["pitch"]["with_f0cwt"]
assert preprocess_config["preprocessing"]["energy"]["feature"] in [
"phoneme_level",
"frame_level",
]
self.energy_normalization = preprocess_config["preprocessing"]["energy"]["normalization"]
self.STFT = Audio.stft.TacotronSTFT(
preprocess_config["preprocessing"]["stft"]["filter_length"],
preprocess_config["preprocessing"]["stft"]["hop_length"],
preprocess_config["preprocessing"]["stft"]["win_length"],
preprocess_config["preprocessing"]["mel"]["n_mel_channels"],
preprocess_config["preprocessing"]["audio"]["sampling_rate"],
preprocess_config["preprocessing"]["mel"]["mel_fmin"],
preprocess_config["preprocessing"]["mel"]["mel_fmax"],
)
# self.STFT = Audio.stft.FastSpeechSTFT(
# preprocess_config["preprocessing"]["stft"]["filter_length"],
# preprocess_config["preprocessing"]["stft"]["hop_length"],
# preprocess_config["preprocessing"]["stft"]["win_length"],
# preprocess_config["preprocessing"]["mel"]["n_mel_channels"],
# preprocess_config["preprocessing"]["audio"]["sampling_rate"],
# preprocess_config["preprocessing"]["mel"]["mel_fmin"],
# preprocess_config["preprocessing"]["mel"]["mel_fmax"],
# )
self.val_unsup_prior = self.val_prior_names(os.path.join(self.out_dir, "val_unsup.txt"))
self.val_sup_prior = self.val_prior_names(os.path.join(self.out_dir, "val_sup.txt"))
self.speaker_emb = None
self.in_sub_dirs = [p for p in os.listdir(self.in_dir) if os.path.isdir(os.path.join(self.in_dir, p))]
if self.multi_speaker and preprocess_config["preprocessing"]["speaker_embedder"] != "none":
self.speaker_emb = PreDefinedEmbedder(preprocess_config)
self.speaker_emb_dict = self._init_spker_embeds(self.in_sub_dirs)
self.g2p = G2p()
def _init_spker_embeds(self, spkers):
spker_embeds = dict()
for spker in spkers:
spker_embeds[spker] = list()
return spker_embeds
def val_prior_names(self, val_prior_path):
val_prior_names = set()
if os.path.isfile(val_prior_path):
print("Load pre-defined validation set...")
with open(val_prior_path, "r", encoding="utf-8") as f:
for m in f.readlines():
val_prior_names.add(m.split("|")[0])
return list(val_prior_names)
else:
return None
def build_from_path(self):
embedding_dir = os.path.join(self.out_dir, "spker_embed")
os.makedirs((os.path.join(self.out_dir, "mel_unsup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "mel_sup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "f0_unsup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "f0_sup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "pitch_unsup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "pitch_sup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "cwt_spec_unsup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "cwt_spec_sup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "cwt_scales_unsup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "cwt_scales_sup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "f0cwt_mean_std_unsup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "f0cwt_mean_std_sup")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "energy_unsup_frame")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "energy_sup_frame")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "energy_sup_phone")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "duration")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "mel2ph")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "attn_prior")), exist_ok=True)
os.makedirs(embedding_dir, exist_ok=True)
print("Processing Data ...")
out_unsup = list()
out_sup = list()
filtered_out_unsup = set()
filtered_out_sup = set()
train_unsup = list()
val_unsup = list()
train_sup = list()
val_sup = list()
n_frames = 0
max_seq_len = -float('inf')
mel_unsup_min = np.ones(80) * float('inf')
mel_unsup_max = | np.ones(80) | numpy.ones |
"""
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks
from . import spectrum as sp
def gaussian(x, mean, sigma):
"""
Gaussian function.
Parameters
----------
x : numpy array.
x-values.
mean : float or int.
mean of distribution.
sigma : float or int.
standard deviation.
Returns
-------
numpy array.
Gaussian distribution.
"""
z = (x - mean) / sigma
return np.exp(-(z ** 2) / 2.0)
def gaussian_derivative(x, mean, sigma):
"""
First derivative of a Gaussian.
Parameters
----------
x : numpy array.
x-values.
mean : float or int.
mean of distribution.
sigma : float or int.
standard deviation.
Returns
-------
numpy array
first derivaive of a Gaussian.
"""
z = x - mean
return -1 * z * gaussian(x, mean, sigma)
class PeakSearch:
def __init__(self, spectrum, ref_x, ref_fwhm, fwhm_at_0=1.0, min_snr=2):
"""
Find peaks in a Spectrum object and decompose specrum into components
using a Gaussian kernel deconvolution technique. Most of this
functionality was adapted from https://github.com/lbl-anp/becquerel
Parameters
----------
spectrum : Spectrum object.
previously initialized spectrum object.
ref_x : int
reference x-value (in channels) corresponding to ref_fwhm.
ref_fwhm : int or float.
fwhm value (in channels) corresponding to ref_x.
fwhm_at_0 : int or float, optional
fwhm value at channel = 0. The default is 1.0.
min_snr : int or float, optional
minimum SNR to look for releant peaks. The default is 2.
Raises
------
Exception
'spectrum' must be a Spectrum object.
Returns
-------
None.
"""
if not isinstance(spectrum, sp.Spectrum):
raise Exception("'spectrum' must be a Spectrum object")
self.ref_x = ref_x
self.ref_fwhm = ref_fwhm
self.fwhm_at_0 = fwhm_at_0
self.spectrum = spectrum
self.min_snr = min_snr
self.snr = []
self.peak_plus_bkg = []
self.bkg = []
self.signal = []
self.noise = []
self.peaks_idx = []
self.fwhm_guess = []
self.calculate()
def fwhm(self, x):
"""
Calculate the expected FWHM at the given x value
Parameters
----------
x : numpy array
x-values.
Returns
-------
numpy array.
expected FWHM values.
"""
# f(x) = k * sqrt(x) + b
# b = f(0)
# k = f1/sqrt(x1)
f0 = self.fwhm_at_0
f1 = self.ref_fwhm
x1 = self.ref_x
# fwhm_sqr = np.sqrt(f0**2 + (f1**2 - f0**2) * (x / x1)**2)
fwhm_sqr = (f1 / np.sqrt(x1)) * np.sqrt(x) + f0
return fwhm_sqr
def kernel(self, x, edges):
"""Generate the kernel for the given x value."""
fwhm1 = self.fwhm(x)
sigma = fwhm1 / 2.355
g1_x0 = gaussian_derivative(edges[:-1], x, sigma)
g1_x1 = gaussian_derivative(edges[1:], x, sigma)
kernel = g1_x0 - g1_x1
return kernel
def kernel_matrix(self, edges):
"""Build a matrix of the kernel evaluated at each x value."""
n_channels = len(edges) - 1
kern = np.zeros((n_channels, n_channels))
for i, x in enumerate(edges[:-1]):
kern[:, i] = self.kernel(x, edges)
kern_pos = +1 * kern.clip(0, np.inf)
kern_neg = -1 * kern.clip(-np.inf, 0)
# normalize negative part to be equal to the positive part
kern_neg *= kern_pos.sum(axis=0) / kern_neg.sum(axis=0)
kmat = kern_pos - kern_neg
return kmat
def convolve(self, edges, data):
"""Convolve kernel with the data."""
kern_mat = self.kernel_matrix(edges)
kern_mat_pos = +1 * kern_mat.clip(0, np.inf)
kern_mat_neg = -1 * kern_mat.clip(-np.inf, 0)
peak_plus_bkg = kern_mat_pos @ data
bkg = kern_mat_neg @ data
signal = kern_mat @ data
noise = (kern_mat ** 2) @ data
# print("other")
# noise = np.array([np.sqrt(x) for x in noise])
noise = np.sqrt(noise)
snr = | np.zeros_like(signal) | numpy.zeros_like |
import numpy as np
import copy
import scipy
from scipy.stats import norm
from scipy import io,signal
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from weighted_median import *
def check_str_bool(s):
return s in ['True' ,'true', '1', 't', 'y','YES' ,'Yes','yes', 'yeah','Yeah', 'yup', 'certainly', 'uh-huh']
class vec_properties:
def __init__(self,source,ws,time_unit,time_scale_to_seconds,length_unit,length_scale_to_meter):
self.source = source
self.ws = ws
self.time_unit = time_unit
self.time_scale_to_seconds = time_scale_to_seconds
self.length_unit = length_unit
self.length_scale_to_meter = length_scale_to_meter
self.velocity_units = length_unit+'/'+time_unit
def show(self):
print(
'source: ',self.source,'\n',
'window size: ',self.ws,'\n',
'dt: ',self.time_scale_to_seconds,'\n',
'pixel to meter: ',self.length_scale_to_meter,'\n',
'velocity units: ',self.velocity_units,'\n')
class field_properties:
def __init__(self,frame,time,images_path,source,time_unit,time_scale_to_seconds,length_unit,length_scale_to_meter):
self.frame = frame
self.time = time
self.images_path = images_path
self.source = source
self.history = ''
self.time_unit = time_unit
self.time_scale_to_seconds = time_scale_to_seconds
self.length_unit = length_unit
self.length_scale_to_meter = length_scale_to_meter
self.velocity_units = length_unit+'/'+time_unit
def show(self):
print(
'frame: ',self.frame,'\n',
'absolute time: ',self.time,'\n',
'images_path: ',self.images_path,'\n',
'source: ',self.source,'\n',
'dt: ',self.time_scale_to_seconds,'\n',
'pixel to meter: ',self.length_scale_to_meter,'\n',
'length units: ',self.length_scale_to_meter,'\n',
'velocity units: ',self.velocity_units)
class run_properties:
pass
class vector:
def __init__(self,X,Y,U,V,S2N,properties):
self.X = X
self.Y = Y
self.U = U
self.V = V
self.S2N = S2N
self.properties = properties
def convert_units(self,output_length_unit,output_time_unit):
LS = {'mm':0.001, 'cm':0.01, 'm':1.0,'meter':1.0,'meters':1.0, 'km':1000.}
TS = {'ms':0.001, 's':1.0,'second':1.0,'seconds':1.0, 'min':60.,'mins':60.,'h':3600.,'hour':3600.,'hours':3600.}
LS[self.properties.length_unit]=float(self.properties.length_scale_to_meter)
TS[self.properties.time_unit]=float(self.properties.time_scale_to_seconds)
self.X = self.X*(LS[self.properties.length_unit]/LS[output_length_unit])
self.Y = self.Y*(LS[self.properties.length_unit]/LS[output_length_unit])
self.U = self.U*(LS[self.properties.length_unit]/LS[output_length_unit])*(TS[output_time_unit]/TS[self.properties.time_unit])
self.V = self.V*(LS[self.properties.length_unit]/LS[output_length_unit])*(TS[output_time_unit]/TS[self.properties.time_unit])
self.properties.length_unit = output_length_unit
self.properties.length_scale_to_meter = LS[output_length_unit]
self.properties.time_unit = output_time_unit
self.properties.time_scale_to_seconds = TS[output_time_unit]
self.properties.velocity_units = output_length_unit+'/'+output_time_unit
class field:
def __init__(self,field_properties):
self.data = {}
self.filtered = {}
self.properties = field_properties
def __add__(self,other):
check_list = []
check_list.append(self.properties.length_unit == other.properties.length_unit)
check_list.append(self.properties.length_scale_to_meter == other.properties.length_scale_to_meter)
check_list.append(self.properties.time_unit == other.properties.time_unit)
check_list.append(self.properties.time_scale_to_seconds == other.properties.time_scale_to_seconds)
check_list.append(self.properties.velocity_units == other.properties.velocity_units)
if all(check_list):
sum_properties = self.properties
sum_properties.source = 'Sum'
sum_properties.frame = self.properties.frame + ' & ' + other.properties.frame
sum_properties.time = self.properties.time + ' & ' + other.properties.time
sum_properties.images_path = self.properties.images_path + ' & ' + other.properties.images_path
sum_field = field(sum_properties)
for xy in list(self.data.keys()):
sum_field.add_vec(self.data[xy])
for xy in list(other.data.keys()):
sum_field.add_vec(other.data[xy])
return sum_field
else:
print( 'Field properties do not match')
def add_vec(self, vector):
self.data[vector.X,vector.Y] = vector
def check_if_grid_point_exists(self,x,y):
xy = list(self.data.keys())
return (x,y) in xy
def move_to_filtered(self,vector):
self.filtered[vector.X,vector.Y] = copy.deepcopy(vector)
vector.U = np.nan
vector.V = np.nan
vector.properties.source = 'filtered'
def transfer(self,other):
for xy in list(other.data.keys()):
self.add_vec(other.data[xy])
def convert_field_units(self,output_length_unit,output_time_unit):
XY = list(self.data.keys())
if self.properties.length_unit == None or self.properties.length_unit == '':
self.properties.length_unit = str(input('field length units'))
if self.properties.length_scale_to_meter== None or self.properties.length_scale_to_meter == '':
self.length_scale_to_meter = str(input('field length units to meters'))
if self.properties.time_unit == None or self.properties.time_unit == '':
self.properties.time_unit = str(input('field time units'))
if self.properties.time_scale_to_seconds== None or self.properties.time_scale_to_seconds == '':
self.properties.time_scale_to_seconds = str(input('field time units to seconds'))
for xy in XY:
self.data[xy].properties.length_unit = self.properties.length_unit
self.data[xy].properties.length_scale_to_meter = self.properties.length_scale_to_meter
self.data[xy].properties.time_unit = self.properties.time_unit
self.data[xy].properties.time_scale_to_seconds = self.properties.time_scale_to_seconds
self.data[xy].convert_units(output_length_unit,output_time_unit)
self.add_vec(self.data[xy])
self.remove_vec(xy[0],xy[1])
XY0 = list(self.data.keys())[0]
self.properties.length_unit = self.data[XY0].properties.length_unit
self.properties.length_scale_to_meter = self.data[XY0].properties.length_scale_to_meter
self.properties.time_unit = self.data[XY0].properties.time_unit
self.properties.time_scale_to_seconds = self.data[XY0].properties.time_scale_to_seconds
self.properties.velocity_units = self.data[XY0].properties.velocity_units
def remove_vec(self,X,Y,vector=None):
if vector is not None:
del self.data[vector.X,vector.Y]
else:
del self.data[X,Y]
def return_vel(self,x,y):
u = self.data[x,y].U
v = self.data[x,y].V
return u,v
def return_n_closest_neighbors(self,x,y,n=4):
X,Y = self.return_grid()
dist = np.sqrt((X-x)**2+(Y-y)**2)
n_closest_neighbors = [ [(X[ind],Y[ind]),dist[ind]] for ind in dist.argsort()[:n]]
return n_closest_neighbors
def return_closest_neighbors_radius(self,x,y,radius):
X,Y = self.return_grid()
dist = np.sqrt((X-x)**2+(Y-y)**2)
indecies = np.where(dist<radius)
closest_neighbors = [[(X[indecies[0][i]],Y[indecies[0][i]]),dist[indecies[0][i]]] for i in range(len(indecies[0]))]
return closest_neighbors
def return_grid(self):
XY = list(self.data.keys())
X,Y = zip(*XY)
X = np.array(X)
Y = np.array(Y)
return X,Y
def return_all_velocities(self):
XY = list(self.data.keys())
U = np.array([self.data[xy[0],xy[1]].U for xy in XY])
V = np.array([self.data[xy[0],xy[1]].V for xy in XY])
return U,V
def sub_average(self):
XY = list(self.data.keys())
umean,ustd,vmean,vstd = self.mean_velocity()
for i in range(len(XY)):
self.data[XY[i]].U = self.data[XY[i]].U - umean
self.data[XY[i]].V = self.data[XY[i]].V - vmean
def create_mesh_grid(self):
X,Y = self.return_grid()
U,V = self.return_all_velocities()
X_mesh_grid = sorted(list(set(X)))
Y_mesh_grid = sorted(list(set(Y)))
X_mesh_grid,Y_mesh_grid = np.meshgrid(X_mesh_grid,Y_mesh_grid)
U_mesh_grid = np.empty(X_mesh_grid.shape)
U_mesh_grid.fill(np.nan)
V_mesh_grid = np.empty(X_mesh_grid.shape)
V_mesh_grid.fill(np.nan)
for vec_ind in range(len(X)):
x = X[vec_ind]
y = Y[vec_ind]
col = np.array(np.where(X_mesh_grid[0,:]==x))[0,0]
row = np.array(np.where(Y_mesh_grid[:,0]==y))[0,0]
U_mesh_grid[row,col] = U[vec_ind]
V_mesh_grid[row,col] = V[vec_ind]
return X_mesh_grid,Y_mesh_grid[::-1],U_mesh_grid[::-1],V_mesh_grid[::-1]
def s2n_filter(self,threshold):
XY = list(self.data.keys())
for xy in XY:
if self.data[xy].S2N < threshold:
self.move_to_filtered(self.data[xy])
def hist_filter(self,percentage):
def TrueXor(*args):
return sum(args) == 1
hist_u,hist_v,hist2d = self.velocity_histogram()
#strech boundry edges
hist_u[1][0] = hist_u[1][0]-1
hist_u[1][-1] = hist_u[1][-1]+1
hist_v[1][0] = hist_v[1][0]-1
hist_v[1][-1] = hist_v[1][-1]+1
hist2d[1][0] = hist2d[1][0]-1
hist2d[1][-1] = hist2d[1][-1]+1
hist2d[2][0] = hist2d[2][0]-1
hist2d[2][-1] = hist2d[2][-1]+1
XY = list(self.data.keys())
number_of_vectors = len(XY)
for xy in XY:
u = self.data[xy].U
v = self.data[xy].V
if np.isfinite(u) and not np.isfinite(v):
if hist_u[0][np.digitize(u,hist_u[1])-1] / number_of_vectors > percentage/100:
u_iter,v_iter = self.inverse_distance_interpolation(xy[0],xy[1])
if np.isfinite(v_iter):
self.data[xy].V = v_iter
v = v_iter
else:
self.move_to_filtered(self.data[xy])
if np.isfinite(v) and not np.isfinite(u):
if hist_v[0][np.digitize(v,hist_v[1])-1] / number_of_vectors > percentage/100:
u_iter,v_iter = self.inverse_distance_interpolation(xy[0],xy[1])
if np.isfinite(u_iter):
self.data[xy].U = u_iter
u = u_iter
else:
self.move_to_filtered(self.data[xy])
if np.isfinite(v) and np.isfinite(u):
U_histpos = np.digitize(u,hist2d[1])-1
V_histpos = np.digitize(v,hist2d[2])-1
if hist2d[0][U_histpos,V_histpos] / number_of_vectors < percentage/100:
self.move_to_filtered(self.data[xy])
def Z_filter(self,threshold,neighbors=4,power=1):
XY = list(self.data.keys())
for xy in XY:
u = self.data[xy].U
v = self.data[xy].V
closest_neighbors = self.return_n_closest_neighbors(self.data[xy].X,self.data[xy].Y,neighbors+1)[1:]
neighbor_pos , dis = zip(*closest_neighbors)
weights = [(1/d)**power for d in dis]
U,V = zip(*[self.return_vel(pos[0],pos[1]) for pos in neighbor_pos])
median_U = weighted_median(U,weights)
median_V = weighted_median(V,weights)
median_absolute_deviation_U = weighted_median([np.abs(u_neighbor - median_U) for u_neighbor in U],weights)
median_absolute_deviation_V = weighted_median([np.abs(v_neighbor - median_V) for v_neighbor in V],weights)
if 0.6745*(u - median_U) / max(median_absolute_deviation_U,0.01) > threshold:
self.move_to_filtered(self.data[xy])
continue
if 0.6745*(v - median_V) / max(median_absolute_deviation_V,0.01) > threshold:
self.move_to_filtered(self.data[xy])
continue
def max_arg_filter(self,U_bound,V_bound):
XY = list(self.data.keys())
for xy in XY:
U_check = True
V_check = True
if self.data[xy].U > U_bound[1] or self.data[xy].U < U_bound[0]:
U_check=False
if self.data[xy].V > V_bound[1] or self.data[xy].V < V_bound[0]:
V_check=False
if U_check and not V_check:
u_itr,v_itr = self.inverse_distance_interpolation(xy[0],xy[1])
self.data[xy].V = v_itr
elif V_check and not U_check:
u_itr,v_itr = self.inverse_distance_interpolation(xy[0],xy[1])
self.data[xy].U = u_itr
elif not V_check and not U_check:
self.move_to_filtered(self.data[xy])
def mean_velocity(self):
U,V = self.return_all_velocities()
return np.nanmean(U),np.nanstd(U),np.nanmean(V),np.nanstd(V)
def velocity_histogram(self,bins=10):
def remove_nans(u,v):
u = list(u)
v = list(v)
nan_index=[]
for i in range(len(u)):
if not np.isfinite(u[i]) or not np.isfinite(v[i]):
nan_index.append(i)
for index in sorted(nan_index, reverse=True):
del u[index]
del v[index]
return np.array(u),np.array(v)
U,V = self.return_all_velocities()
hist_U = np.histogram(U[np.isfinite(U)],bins)
hist_V = np.histogram(V[np.isfinite(V)],bins)
U,V = remove_nans(U,V)
hist2d = np.histogram2d(U, V, bins)
return hist_U,hist_V,hist2d
def extract_area(self,x_boundry,y_boundry):
area = field(self.properties)
X,Y = self.return_grid()
for i in range(len(X)):
if x_boundry[0]<=X[i]<=x_boundry[1] and y_boundry[0]<=Y[i]<=y_boundry[1]:
area.add_vec(self.data[X[i],Y[i]])
return area
def vel_gradients(self):
X,Y,U,V = self.create_mesh_grid()
Udx,Udy = np.gradient(U)
Vdx,Vdy = np.gradient(V)
return X,Y,Udx,Udy,Vdx,Vdy
def vel_differntial(self):
def least_square_diff(field,grid,axis=0):
if axis==0:
shape = field.shape
dif = np.zeros(shape)
for row in range(shape[0]):
for col in range(2,shape[1]-2):
rs = 2*field[row,col+2]+field[row,col+1]
ls = -field[row,col-1]-2*field[row,col-2]
dis = 10*(grid[row,col+1]-grid[row,col])
dif[row,col] = (rs+ls)/dis
#dif[row,col] = (2*field[row,col+2]+field[row,col+1]-field[row,col-1]-2*field[row,col-2])/10*(grid[row,col+1]-grid[row,col])
return dif
elif axis==1:
shape = field.shape
dif = np.zeros(shape)
for row in range(2,shape[0]-2):
for col in range(shape[1]):
us = 2*field[row-2,col]+field[row-1,col]
ds = -field[row+1,col]-2*field[row+2,col]
dis = 10*(grid[row-1,col]-grid[row,col])
dif[row,col] = (us+ds)/dis
#dif[row,col] = (2*field[row-2,col]+field[row-1,col]-field[row+1,col]-2*field[row+2,col])/10*(grid[row-1,col]-grid[row,col])
return dif
X,Y,U,V = self.create_mesh_grid()
dU_x = least_square_diff(U,X)
dU_y = least_square_diff(U,Y,axis=1)
dV_x = least_square_diff(V,X)
dV_y = least_square_diff(V,Y,axis=1)
return dU_x,dU_y,dV_x,dV_y
def profile(self,axis='y'):
X,Y,U,V = self.create_mesh_grid()
if axis=='y' or axis=='Y':
U_profile = np.nanmean(U,axis=1)[::-1]
V_profile = np.nanmean(V,axis=1)[::-1]
Y_profile = Y[:,0]
return U_profile,V_profile,Y_profile
else:
U_profile = np.nanmean(U,axis=0)[::-1]
V_profile = np.nanmean(V,axis=0)[::-1]
X_profile = X[0,:]
return U_profile,V_profile,X_profile
def vorticity_field(self):
dU_x,dU_y,dV_x,dV_y = self.vel_differntial()
vort = dV_x-dU_y
return vort[2:-2,2:-2]
def inverse_distance_interpolation(self,x,y,number_of_neighbors=5,radius=None,inverse_power=2):
def weigted_velocity(neighbors_vels,weights):
weight_sum=0
weigted_vels=[]
for i in range(len(neighbors_vels)):
if not np.isnan(neighbors_vels[i]):
weight_sum += weights[i]
weigted_vels.append(weights[i]*neighbors_vels[i])
return np.nansum(weigted_vels)/weight_sum
if self.check_if_grid_point_exists(x,y):
if radius is not None:
indecies,distances = zip(*self.return_closest_neighbors_radius(x,y,radius))
else:
indecies,distances = zip(*self.return_n_closest_neighbors(x,y,n=number_of_neighbors+1))
weights = list(np.array(distances[1:])**-float(inverse_power))
neighbors_vel = [self.return_vel(ind[0],ind[1]) for ind in indecies[1:]]
u_vels,v_vels = zip(*neighbors_vel)
inter_u = weigted_velocity(u_vels,weights)
inter_v = weigted_velocity(v_vels,weights)
return inter_u,inter_v
else:
if radius is not None:
indecies,distances = zip(*self.return_closest_neighbors_radius(x,y,radius))
else:
indecies,distances = zip(*self.return_n_closest_neighbors(x,y,n=number_of_neighbors))
weights = list(np.array(distances)**-float(inverse_power))
neighbors_vel = [self.return_vel(ind[0],ind[1]) for ind in indecies]
u_vels,v_vels = zip(*neighbors_vel)
inter_u = weigted_velocity(u_vels,weights)
inter_v = weigted_velocity(v_vels,weights)
return inter_u,inter_v
def interpf(self):
X,Y = self.return_grid()
for ind in range(X.shape[0]):
pos = (X[ind],Y[ind])
u_cur,v_cur = self.return_vel(pos[0],pos[1])
if np.isnan(u_cur) and np.isnan(v_cur):
u_iter,v_iter = self.inverse_distance_interpolation(pos[0],pos[1])
vec = self.data[pos]
vec.U = u_iter
vec.V = v_iter
vec.properties.source = 'Interpolation'
elif np.isnan(u_cur):
u_iter,v_iter = self.inverse_distance_interpolation(pos[0],pos[1])
vec = self.data[pos]
vec.U = u_iter
vec.properties.source = 'Interpolation'
elif np.isnan(v_cur):
u_iter,v_iter = self.inverse_distance_interpolation(pos[0],pos[1])
vec = self.data[pos]
vec.V = v_iter
vec.properties.source = 'Interpolation'
def remap(self,X,Y,shape_of_new_grid=None):
new_feild = field(self.properties)
Xold,Yold = self.return_grid()
if shape_of_new_grid==None:
X = X.flatten()
Y = Y.flatten()
else:
X,Y = np.meshgrid(np.linspace(Xold.min(),Xold.max(),shape_of_new_grid[1]),np.linspace(Yold.min(),Yold.max(),shape_of_new_grid[0]))
X = X.flatten()
Y = Y.flatten()
vec_properties = self.data[Xold[0],Yold[0]].properties
vec_properties.source = 'Interpolation'
for ind in range(len(X)):
u,v = self.inverse_distance_interpolation(X[ind],Y[ind])
vec = vector(X[ind],Y[ind],u,v,0,vec_properties)
new_feild.add_vec(vec)
self.filtered = self.data
self.data = {}
self.transfer(new_feild)
def auto_spatial_correlation(self):
X,Y,U,V = self.create_mesh_grid()
Uc = scipy.signal.convolve2d(U,U[::-1])
Vc = scipy.signal.convolve2d(V,V[::-1])
Uc = Uc - Uc.min()
Vc = Vc - Vc.min()
s_cor = np.sqrt(Uc**2+Vc**2)
dX = X - np.mean(X[0,:])
dY = Y - np.mean(Y[:,0])
return dX,dY,s_cor
class run:
def __init__(self):
self.fields = {}
def add_field(self,field):
self.fields[field.properties.frame] = field
def frames(self):
return list(self.fields.keys())
def remove_field(self,frame,field=None):
if field is not None:
del self.fields[field.properties.frame]
else:
del self.fields[frame]
def remap_run(self,X,Y,shape_of_new_grid=None):
frames = self.frames()
for frame in frames:
self.fields[frame].remap(X,Y,shape_of_new_grid)
def convert_run_units(self,output_length_unit,output_time_unit,run_length_unit=None,run_length_scale_to_meter=None,run_time_unit=None,run_time_scale_to_seconds=None):
same_prop = check_str_bool(input('Do all frames in run have the same properties?'))
if same_prop:
''' After correcting the properties of run use this:
if self.properties.length_unit == None or self.properties.length_unit == '':
self.properties.length_unit = str(input('run length units: '))
if self.properties.length_scale_to_meter== None or self.properties.length_scale_to_meter == '':
self.properties.length_scale_to_meter = str(input('run length units to meters: '))
if self.properties.time_unit == None or self.properties.time_unit == '':
self.properties.time_unit = str(input('run time units: '))
if self.properties.time_scale_to_seconds== None or self.properties.time_scale_to_seconds == '':
self.properties.time_scale_to_seconds = str(input('run time units to seconds: '))
'''
if run_length_unit is None:
self.length_unit = str(input('run length units: '))
else:
self.length_unit = run_length_unit
if run_length_scale_to_meter is None:
self.length_scale_to_meter = str(input('run length units to meters: '))
else:
self.length_scale_to_meter = run_length_scale_to_meter
if run_time_unit is None:
self.time_unit = str(input('run time units: '))
else:
self.time_unit = run_time_unit
if run_time_scale_to_seconds is None:
self.time_scale_to_seconds = str(input('run time units to seconds: '))
else:
self.time_scale_to_seconds = run_time_scale_to_seconds
frames = self.frames()
for frame in frames:
if same_prop:
self.fields[frame].properties.length_unit = self.length_unit
self.fields[frame].properties.length_scale_to_meter = self.length_scale_to_meter
self.fields[frame].properties.time_unit = self.time_unit
self.fields[frame].properties.time_scale_to_seconds = self.time_scale_to_seconds
self.fields[frame].convert_field_units(output_length_unit,output_time_unit)
def check_same_grid_run(self):
frames = self.frames()
base_frame = frames[0]
for frame in frames:
X_base,Y_base = self.fields[base_frame].return_grid()
X_check,Y_check = self.fields[frame].return_grid()
if all(X_base == X_check) and all(Y_base == Y_check):
base_frame = frame
else:
return False
return True
def gp_exists_all_frames(self,x,y,show_missing_frames=False):
frames = self.frames()
gp_exists = [self.fields[f].check_if_grid_point_exists(x,y) for f in frames]
if all(gp_exists):
return True
else:
no_gp_frames = [x for x, y in zip(frames, gp_exists) if y == False]
frames_with_gp = [x for x, y in zip(frames, gp_exists) if y == True]
#allows checking of misssing grid point frames
if show_missing_frames:
print('Frames without the requested grid point ','(',x,',',y,')',': ',no_gp_frames)
return frames_with_gp
def run_grid(self):
frames = self.frames()
Y_agp = []
X_agp =[]
for frame in frames:
X,Y = self.fields[frame].return_grid()
Y_agp += Y.tolist()
Y_agp = sorted(list(set(Y_agp)))
X_agp += X.tolist()
X_agp = sorted(list(set(X_agp)))
return np.meshgrid(np.array(X_agp),np.array(Y_agp))
def grid_point_velocity(self,x,y,frames=None):
if frames==None:
frames = self.frames()
if self.gp_exists_all_frames(x,y):
U = []
V = []
for f in frames:
u,v = self.fields[f].return_vel(x,y)
U.append(u)
V.append(v)
U = np.array(U)
V = np.array(V)
return U,V
else:
U = []
V = []
for f in frames:
u,v = self.fields[f].return_vel(x,y)
U.append(u)
V.append(v)
U = np.array(U)
V = np.array(V)
return U,V
def return_field(self,number_of_field,name_of_frame=None):
if name_of_frame is not None:
return self.fields[name_of_frame]
else:
return self.fields[self.frames()[number_of_field]]
def mean_gp_velocity(self,x,y):
for_all_frames = self.gp_exists_all_frames(x,y)
if for_all_frames==True:
U,V = self.grid_point_velocity(x,y)
U_rms = U - np.nanmean(U)
V_rms = V - np.nanmean(V)
return np.nanmean(U),U_rms,np.nanmean(V),V_rms
else:
U,V = self.grid_point_velocity(x,y,for_all_frames)
U_rms = U - np.nanmean(U)
V_rms = V - np.nanmean(V)
return np.nanmean(U),U_rms,np.nanmean(V),V_rms
def mean_velocity_properties(self):
frames = self.frames()
U_mean = []
V_mean = []
for f in frames:
u_mean,u_std,v_mean,v_std = self.fields[f].mean_velocity()
U_mean.append(u_mean)
V_mean.append(v_mean)
Um = np.mean(U_mean)
Vm = np.mean(V_mean)
U_rms = [(np.sqrt((u-Um)**2)) for u in U_mean]
V_rms = [(np.sqrt((v-Vm)**2)) for v in V_mean]
print('Max in mean U velocity accures in frame: ',frames[U_mean.index(max(U_mean))])
print('Max in mean V velocity accures in frame: ',frames[V_mean.index(max(V_mean))])
U_mean = np.array(U_mean)
V_mean = np.array(V_mean)
U_rms = np.array(U_rms)
V_rms = np.array(V_rms)
return U_mean,U_rms,V_mean,V_rms
def run_mean_velocities(self):
if self.check_same_grid_run():
X,Y = self.run_grid()
frames = self.frames()
shape = (X.shape[0],X.shape[1],len(frames))
U_mean = np.zeros(shape)
V_mean = np.zeros(shape)
for ind in range(len(frames)):
x,y,u,v = self.fields[frames[ind]].create_mesh_grid()
U_mean[:,:,ind] = u[::-1]
V_mean[:,:,ind] = v[::-1]
return np.nanmean(U_mean,axis=2),np.nanmean(V_mean,axis=2)
else:
X,Y = self.run_grid()
U_mean = | np.zeros(X.shape) | numpy.zeros |
import os
import fnmatch
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from netCDF4 import Dataset
from satpy import Scene, find_files_and_readers
from pyresample import create_area_def
from logger import logger
from colormap import chiljet_colormap
from helper import parseTime
plt.switch_backend('Agg')
PROJECTDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Visualizer(object):
def __init__(self, file, *,
latRange=[20, 50], lonRange=[110, 130]):
self.file = file
self.latRange = latRange
self.lonRange = lonRange
try:
self.fd = Dataset(file, 'r')
except Exception as e:
raise e
def list_product(self):
"""
list all the products in the file.
"""
count = 1
for variable in self.fd.variables.keys():
logger.info('{0:2d}: {1:15s} {2}'.format(
count,
variable,
getattr(self.fd.variables[variable], 'long_name')))
count = count + 1
def load_data(self, product, mTime):
"""
load data to the workspace.
"""
lat = self.fd['latitude'][:]
lon = self.fd['longitude'][:]
mask_lat = np.logical_and(lat >= self.latRange[0],
lat <= self.latRange[1])
mask_lon = np.logical_and(lon >= self.lonRange[0],
lon <= self.lonRange[1])
self.data = self.fd.variables[product][:, mask_lon][mask_lat]
self.unit = getattr(self.fd.variables[product], 'units')
self.long_name = getattr(self.fd.variables[product], 'long_name')
self.lat = lat[mask_lat]
self.lon = lon[mask_lon]
self.mTime = mTime
def colorplot_with_RGB(self, imgFile, *args,
axLatRange=[20, 60], axLonRange=[90, 140],
cmap=None, pixels=100, **kwargs):
"""
load RGB data.
TODO:
Too time consuming in my local machine. (RAM > 10GB)
Wait till I get a better PC!
"""
pass
def colorplot_with_band(self, band, HSD_Dir, imgFile, *args,
axLatRange=[20, 60], axLonRange=[90, 140],
cmap=None, pixels=100, **kwargs):
"""
colorplot the variables together with radiance data.
Parameters
----------
band: int
band number [1-16]. See band specification in
`../doc/2018_A_Yamashita.md`
HSD_Dir: str
path for hosting the HSD files.
imgFile: str
filename of the exported image
Keywords
--------
axLatRange: list
latitude range of the plot (default: [20, 60]). [degree]
axLonRange: list
longitude range of the plot (default: [90, 140]). [degree]
cmap: str
colormap name.
pixels: int
resampled pixels of the band data (default: 100). Take care of
time consumption when pixels > 1000!
History
-------
2020-02-24 First version.
"""
files = find_files_and_readers(
start_time=(self.mTime - dt.timedelta(seconds=300)),
end_time=(self.mTime + dt.timedelta(seconds=300)),
base_dir=HSD_Dir,
reader='ahi_hsd'
)
matched_files = []
for file in files['ahi_hsd']:
if fnmatch.fnmatch(os.path.basename(file),
'HS_H08_*_B{0:02d}_FLDK_*_S0[1234]*DAT*'.format(band)):
matched_files.append(file)
h8_scene = Scene(filenames=matched_files,
reader='ahi_hsd', sensor='ahi')
band_label = 'B{0:02d}'.format(band)
h8_scene.load([band_label])
roi = create_area_def('roi',
{'proj': 'eqc', 'ellps': 'WGS84'},
width=pixels, height=pixels,
area_extent=[axLonRange[0], axLatRange[0],
axLonRange[1], axLatRange[1]],
units='degrees')
roi_scene = h8_scene.resample(roi)
# read China boundaries
with open(os.path.join(PROJECTDIR,
'include', 'CN-border-La.dat'), 'r') as fd:
context = fd.read()
blocks = [cnt for cnt in context.split('>') if len(cnt) > 0]
borders = [
| np.fromstring(block, dtype=float, sep=' ') | numpy.fromstring |
from copy import deepcopy
import numpy as np
from mushroom_rl.core import MDPInfo
from mdp.algo.model_free.reps_int import REPSInt
from mushroom_rl.policy import TDPolicy
from mushroom_rl.utils.parameters import Parameter
from mushroom_rl.utils.table import Table
from scipy.optimize import minimize
class REPS(REPSInt):
def __init__(self, mdp_info: MDPInfo, policy: TDPolicy, learning_rate: Parameter, eps=0.7):
self.eps = eps
self.Q = Table(mdp_info.size)
self.policy_table_base = Table(mdp_info.size)
policy.set_q(self.Q)
self.errors = np.zeros(mdp_info.size)
self.states = list()
super().__init__(mdp_info, policy, self.Q, learning_rate)
@staticmethod
def dual_function(eta_array, *args):
eta = eta_array.item()
eps, errors = args
max_error = np.nanmax(errors)
r = errors - max_error
sum1 = np.mean(np.exp(r / eta))
return eta * eps + eta * np.log(sum1) + max_error
# return eta * self.eps + eta * np.log(np.mean([np.exp(error / eta) for error in args[0][0]]))
@staticmethod
def _dual_function_diff(eta_array, *args):
eta = eta_array.item()
eps, errors = args
max_error = np.nanmax(errors)
r = errors - max_error
sum1 = np.mean(np.exp(r / eta))
sum2 = np.mean(np.exp(r / eta) * r)
gradient = eps + np.log(sum1) - sum2 / (eta * sum1)
return np.array([gradient])
def _update(self, state: np.ndarray, action: np.ndarray, reward: np.ndarray, next_state: np.ndarray,
absorbing: bool):
# Use SARSA for updating the q-table
q_current = self.Q[state, action]
self.next_action = self.draw_action(next_state)
q_next = self.Q[next_state, self.next_action] if not absorbing else 0.
self.Q[state, action] = q_current + self._alpha(state, action) * (
reward + self.mdp_info.gamma * q_next - q_current)
self.states.append(state)
#error: np.ndarray = reward + np.nanmax(self.Q[next_state, :]) - np.nanmax(self.Q[state, :])
#self.errors.append(error)
if absorbing:
# compute advantage over state action space
for state in self.states:
self.errors[state, :] = self.Q[state, :] - np.max(self.Q[state, :])
policy_table = deepcopy(self.policy_table_base)
eta_start = np.ones(1) # Must be larger than 0
# eta and v are obtained by minimizing the dual function
result = minimize(
fun=self.dual_function,
x0=eta_start, # Initial guess
jac=REPS._dual_function_diff, # gradient function
bounds=((np.finfo(np.float32).eps, np.inf),),
args=(self.eps, self.errors), # Additional arguments for the function
)
eta_optimal = result.x.item()
for state in self.states:
policy_table[state, :] = | np.exp(eta_optimal * self.errors[state, :]) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 13:08:41 2019
@author: vkapoor
"""
from __future__ import print_function, unicode_literals, absolute_import, division
#import matplotlib.pyplot as plt
import numpy as np
import os
import collections
from tifffile import imread, imwrite
from skimage import morphology
from skimage.morphology import dilation, square
import cv2
from skimage.filters import gaussian
from six.moves import reduce
from matplotlib import cm
from skimage.filters import threshold_local, threshold_otsu
from skimage.morphology import remove_small_objects
from skimage.segmentation import find_boundaries
import matplotlib.pyplot as plt
from scipy.ndimage.morphology import binary_fill_holes
from skimage.segmentation import watershed
from pathlib import Path
from skimage.segmentation import relabel_sequential
from scipy.ndimage.measurements import find_objects
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from skimage.util import invert as invertimage
from skimage import measure
from scipy.ndimage.filters import gaussian_filter
from skimage.measure import label
from csbdeep.utils import normalize
from skimage import filters
from skimage.util import random_noise
globalthreshold = 0.01
def SimplePrediction(x, UnetModel, StarModel, n_tiles = (2,2), UseProbability = True, min_size = 20):
Mask = UNETPrediction3D(x, UnetModel, min_size, n_tiles, 'YX')
SmartSeeds, _, StarImage = STARPrediction3D(x, StarModel, min_size, n_tiles, MaskImage = Mask, smartcorrection = None, UseProbability = UseProbability)
SmartSeeds = SmartSeeds.astype('uint16')
return SmartSeeds
def crappify_flou_G_P(x, y, mu, sigma, savedirx, savediry, name):
x = x.astype('float32')
gaussiannoise = np.random.normal(mu, sigma*0.05, x.shape)
x = x + gaussiannoise
#add noise to original image
imwrite(savedirx + '/' + name + 'pg' + str(mu) + str(sigma) + '.tif', x.astype('float32'))
#keep the label the same
imwrite(savediry + '/' + name + 'pg' + str(mu) + str(sigma) + '.tif', y.astype('uint16'))
def _fill_label_holes(lbl_img, **kwargs):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
mask_filled = binary_fill_holes(mask,**kwargs)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def fill_label_holes(lbl_img, **kwargs):
"""Fill small holes in label image."""
# TODO: refactor 'fill_label_holes' and 'edt_prob' to share code
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
objects = find_objects(lbl_img)
lbl_img_filled = np.zeros_like(lbl_img)
for i,sl in enumerate(objects,1):
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask_filled = binary_fill_holes(grown_mask,**kwargs)[shrink_slice]
lbl_img_filled[sl][mask_filled] = i
return lbl_img_filled
def dilate_label_holes(lbl_img, iterations):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (range(np.min(lbl_img), np.max(lbl_img) + 1)):
mask = lbl_img==l
mask_filled = binary_dilation(mask,iterations = iterations)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def remove_big_objects(ar, max_size=6400, connectivity=1, in_place=False):
out = ar.copy()
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
too_big = component_sizes > max_size
too_big_mask = too_big[ccs]
out[too_big_mask] = 0
return out
def multiplotline(plotA, plotB, plotC, titleA, titleB, titleC, targetdir = None, File = None, plotTitle = None):
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
ax = axes.ravel()
ax[0].plot(plotA)
ax[0].set_title(titleA)
ax[1].plot(plotB)
ax[1].set_title(titleB)
ax[2].plot(plotC)
ax[2].set_title(titleC)
plt.tight_layout()
if plotTitle is not None:
Title = plotTitle
else :
Title = 'MultiPlot'
if targetdir is not None and File is not None:
plt.savefig(targetdir + Title + File + '.png')
if targetdir is not None and File is None:
plt.savefig(targetdir + Title + File + '.png')
plt.show()
def BinaryDilation(Image, iterations = 1):
DilatedImage = binary_dilation(Image, iterations = iterations)
return DilatedImage
def CCLabels(fname, max_size = 15000):
BinaryImageOriginal = imread(fname)
Orig = normalizeFloatZeroOne(BinaryImageOriginal)
InvertedBinaryImage = invertimage(BinaryImageOriginal)
BinaryImage = normalizeFloatZeroOne(InvertedBinaryImage)
image = binary_dilation(BinaryImage)
image = invertimage(image)
IntegerImage = label(image)
labelclean = remove_big_objects(IntegerImage, max_size = max_size)
AugmentedLabel = dilation(labelclean, selem = square(3) )
AugmentedLabel = np.multiply(AugmentedLabel , Orig)
return AugmentedLabel
def SmartSeedPrediction3D( SaveDir, fname, UnetModel, StarModel, NoiseModel = None, min_size_mask = 100, min_size = 10,
n_tiles = (1,2,2), doMask = True, smartcorrection = None, threshold = 20, projection = False, UseProbability = True, filtersize = 0):
print('Generating SmartSeed results')
UNETResults = SaveDir + 'BinaryMask/'
SmartSeedsResults = SaveDir + 'SmartSeedsMask/'
StarDistResults = SaveDir + 'StarDist/'
Path(SaveDir).mkdir(exist_ok = True)
Path(SmartSeedsResults).mkdir(exist_ok = True)
Path(StarDistResults).mkdir(exist_ok = True)
Path(UNETResults).mkdir(exist_ok = True)
#Read Image
image = imread(fname)
sizeZ = image.shape[0]
sizeY = image.shape[1]
sizeX = image.shape[2]
SizedMask = np.zeros([sizeZ, sizeY, sizeX], dtype = 'uint16')
SizedSmartSeeds = np.zeros([sizeZ, sizeY, sizeX], dtype = 'uint16')
Name = os.path.basename(os.path.splitext(fname)[0])
if NoiseModel is not None:
image = NoiseModel.predict(image, axes='ZYX', n_tiles=n_tiles)
Mask = UNETPrediction3D(gaussian_filter(image, filtersize), UnetModel, n_tiles, 'ZYX')
for i in range(0, Mask.shape[0]):
Mask[i,:] = remove_small_objects(Mask[i,:].astype('uint16'), min_size = min_size)
SizedMask[:, :Mask.shape[1], :Mask.shape[2]] = Mask
SmartSeeds, _, StarImage = STARPrediction3D(gaussian_filter(image,filtersize), StarModel, n_tiles, MaskImage = Mask, UseProbability = UseProbability, smartcorrection = smartcorrection)
#Upsample images back to original size
for i in range(0, Mask.shape[0]):
SmartSeeds[i,:] = remove_small_objects(SmartSeeds[i,:].astype('uint16'), min_size = min_size)
SmartSeeds = RemoveLabels(SmartSeeds)
SizedSmartSeeds[:, :SmartSeeds.shape[1], :SmartSeeds.shape[2]] = SmartSeeds
imwrite((StarDistResults + Name+ '.tif' ) , StarImage.astype('uint16'))
imwrite((SmartSeedsResults + Name+ '.tif' ) , SizedSmartSeeds.astype('uint16'))
imwrite((UNETResults + Name+ '.tif' ) , SizedMask.astype('uint16'))
return SizedSmartSeeds, SizedMask
def DownsampleData(image, DownsampleFactor):
if DownsampleFactor!=1:
print('Downsampling Image in XY by', DownsampleFactor)
scale_percent = int(100/DownsampleFactor) # percent of original size
width = int(image.shape[2] * scale_percent / 100)
height = int(image.shape[1] * scale_percent / 100)
dim = (width, height)
smallimage = np.zeros([image.shape[0], height,width])
for i in range(0, image.shape[0]):
# resize image
smallimage[i,:] = cv2.resize(image[i,:].astype('float32'), dim)
return smallimage
else:
return image
def UNETPrediction3D(image, model, n_tiles, axis):
Segmented = model.predict(image, axis, n_tiles = n_tiles)
try:
thresh = threshold_otsu(Segmented)
Binary = Segmented > thresh
except:
Binary = Segmented > 0
#Postprocessing steps
Filled = binary_fill_holes(Binary)
Finalimage = label(Filled)
Finalimage = fill_label_holes(Finalimage)
Finalimage = relabel_sequential(Finalimage)[0]
return Finalimage
def RemoveLabels(LabelImage, minZ = 2):
properties = measure.regionprops(LabelImage, LabelImage)
for prop in properties:
regionlabel = prop.label
sizeZ = abs(prop.bbox[0] - prop.bbox[3])
if sizeZ <= minZ:
LabelImage[LabelImage == regionlabel] = 0
return LabelImage
def STARPrediction3D(image, model, n_tiles, MaskImage = None, smartcorrection = None, UseProbability = True):
copymodel = model
image = normalize(image, 1, 99.8, axis = (0,1,2))
shape = [image.shape[1], image.shape[2]]
image = zero_pad_time(image, 64, 64)
grid = copymodel.config.grid
MidImage, details = model.predict_instances(image, n_tiles = n_tiles)
SmallProbability, SmallDistance = model.predict(image, n_tiles = n_tiles)
StarImage = MidImage[:image.shape[0],:shape[0],:shape[1]]
SmallDistance = MaxProjectDist(SmallDistance, axis=-1)
Probability = np.zeros([SmallProbability.shape[0] * grid[0],SmallProbability.shape[1] * grid[1], SmallProbability.shape[2] * grid[2] ])
Distance = np.zeros([SmallDistance.shape[0] * grid[0], SmallDistance.shape[1] * grid[1], SmallDistance.shape[2] * grid[2] ])
#We only allow for the grid parameter to be 1 along the Z axis
for i in range(0, SmallProbability.shape[0]):
Probability[i,:] = cv2.resize(SmallProbability[i,:], dsize=(SmallProbability.shape[2] * grid[2] , SmallProbability.shape[1] * grid[1] ))
Distance[i,:] = cv2.resize(SmallDistance[i,:], dsize=(SmallDistance.shape[2] * grid[2] , SmallDistance.shape[1] * grid[1] ))
if UseProbability:
Probability[Probability < globalthreshold ] = 0
MaxProjectDistance = Probability[:image.shape[0],:shape[0],:shape[1]]
else:
MaxProjectDistance = Distance[:image.shape[0],:shape[0],:shape[1]]
Watershed, Markers = WatershedwithMask3D(MaxProjectDistance.astype('uint16'), StarImage.astype('uint16'), MaskImage.astype('uint16'), grid )
Watershed = fill_label_holes(Watershed.astype('uint16'))
return Watershed, MaxProjectDistance, StarImage
def VetoRegions(Image, Zratio = 3):
Image = Image.astype('uint16')
properties = measure.regionprops(Image, Image)
for prop in properties:
LabelImage = prop.image
if LabelImage.shape[0] < Image.shape[0]/Zratio :
indices = zip(*np.where(LabelImage > 0))
for z, y, x in indices:
Image[z,y,x] = 0
return Image
#Default method that works well with cells which are below a certain shape and do not have weak edges
def iou3D(boxA, centroid):
ndim = len(centroid)
inside = False
Condition = [Conditioncheck(centroid, boxA, p, ndim) for p in range(0,ndim)]
inside = all(Condition)
return inside
def Conditioncheck(centroid, boxA, p, ndim):
condition = False
if centroid[p] >= boxA[p] and centroid[p] <= boxA[p + ndim]:
condition = True
return condition
def WatershedwithMask3D(Image, Label,mask, grid):
properties = measure.regionprops(Label, Image)
binaryproperties = measure.regionprops(label(mask), Image)
Coordinates = [prop.centroid for prop in properties]
BinaryCoordinates = [prop.centroid for prop in binaryproperties]
Binarybbox = [prop.bbox for prop in binaryproperties]
Coordinates = sorted(Coordinates , key=lambda k: [k[0], k[1], k[2]])
if len(Binarybbox) > 0:
for i in range(0, len(Binarybbox)):
box = Binarybbox[i]
inside = [iou3D(box, star) for star in Coordinates]
if not any(inside) :
Coordinates.append(BinaryCoordinates[i])
Coordinates.append((0,0,0))
Coordinates = np.asarray(Coordinates)
coordinates_int = np.round(Coordinates).astype(int)
markers_raw = np.zeros_like(Image)
markers_raw[tuple(coordinates_int.T)] = 1 + np.arange(len(Coordinates))
markers = morphology.dilation(markers_raw.astype('uint16'), morphology.ball(2))
watershedImage = watershed(-Image, markers, mask = mask.copy())
return watershedImage, markers
def Integer_to_border(Label, max_size = 6400):
SmallLabel = remove_big_objects(Label, max_size = max_size)
BoundaryLabel = find_boundaries(SmallLabel, mode='outer')
Binary = BoundaryLabel > 0
return Binary
def zero_pad(image, PadX, PadY):
sizeY = image.shape[1]
sizeX = image.shape[0]
sizeXextend = sizeX
sizeYextend = sizeY
while sizeXextend%PadX!=0:
sizeXextend = sizeXextend + 1
while sizeYextend%PadY!=0:
sizeYextend = sizeYextend + 1
extendimage = np.zeros([sizeXextend, sizeYextend])
extendimage[0:sizeX, 0:sizeY] = image
return extendimage
def zero_pad_color(image, PadX, PadY):
sizeY = image.shape[1]
sizeX = image.shape[0]
color = image.shape[2]
sizeXextend = sizeX
sizeYextend = sizeY
while sizeXextend%PadX!=0:
sizeXextend = sizeXextend + 1
while sizeYextend%PadY!=0:
sizeYextend = sizeYextend + 1
extendimage = np.zeros([sizeXextend, sizeYextend, color])
extendimage[0:sizeX, 0:sizeY, 0:color] = image
return extendimage
def zero_pad_time(image, PadX, PadY):
sizeY = image.shape[2]
sizeX = image.shape[1]
sizeXextend = sizeX
sizeYextend = sizeY
while sizeXextend%PadX!=0:
sizeXextend = sizeXextend + 1
while sizeYextend%PadY!=0:
sizeYextend = sizeYextend + 1
extendimage = np.zeros([image.shape[0], sizeXextend, sizeYextend])
extendimage[:,0:sizeX, 0:sizeY] = image
return extendimage
def BackGroundCorrection2D(Image, sigma):
Blur = gaussian(Image.astype(float), sigma)
Corrected = Image - Blur
return Corrected
def MaxProjectDist(Image, axis = -1):
MaxProject = np.amax(Image, axis = axis)
return MaxProject
def MidProjectDist(Image, axis = -1, slices = 1):
assert len(Image.shape) >=3
SmallImage = Image.take(indices = range(Image.shape[axis]//2 - slices, Image.shape[axis]//2 + slices), axis = axis)
MaxProject = np.amax(SmallImage, axis = axis)
return MaxProject
def multiplot(imageA, imageB, imageC, titleA, titleB, titleC, targetdir = None, File = None, plotTitle = None):
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
ax = axes.ravel()
ax[0].imshow(imageA, cmap=cm.gray)
ax[0].set_title(titleA)
ax[0].set_axis_off()
ax[1].imshow(imageB, cmap=plt.cm.nipy_spectral)
ax[1].set_title(titleB)
ax[1].set_axis_off()
ax[2].imshow(imageC, cmap=plt.cm.nipy_spectral)
ax[2].set_title(titleC)
ax[2].set_axis_off()
plt.tight_layout()
plt.show()
for a in ax:
a.set_axis_off()
def doubleplot(imageA, imageB, titleA, titleB, targetdir = None, File = None, plotTitle = None):
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
ax = axes.ravel()
ax[0].imshow(imageA, cmap=cm.gray)
ax[0].set_title(titleA)
ax[0].set_axis_off()
ax[1].imshow(imageB, cmap=plt.cm.nipy_spectral)
ax[1].set_title(titleB)
ax[1].set_axis_off()
plt.tight_layout()
plt.show()
for a in ax:
a.set_axis_off()
def _check_dtype_supported(ar):
# Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):
raise TypeError("Only bool or integer image types are supported. "
"Got %s." % ar.dtype)
def normalizeFloatZeroOne(x, pmin = 3, pmax = 99.8, axis = None, eps = 1e-20, dtype = np.float32):
"""Percentile based Normalization
Normalize patches of image before feeding into the network
Parameters
----------
x : np array Image patch
pmin : minimum percentile value for normalization
pmax : maximum percentile value for normalization
axis : axis along which the normalization has to be carried out
eps : avoid dividing by zero
dtype: type of numpy array, float 32 default
"""
mi = np.percentile(x, pmin, axis = axis, keepdims = True)
ma = np.percentile(x, pmax, axis = axis, keepdims = True)
return normalizer(x, mi, ma, eps = eps, dtype = dtype)
# https://docs.python.org/3/library/itertools.html#itertools-recipes
def move_image_axes(x, fr, to, adjust_singletons=False):
"""
x: ndarray
fr,to: axes string (see `axes_dict`)
"""
fr = axes_check_and_normalize(fr, length=x.ndim)
to = axes_check_and_normalize(to)
fr_initial = fr
x_shape_initial = x.shape
adjust_singletons = bool(adjust_singletons)
if adjust_singletons:
# remove axes not present in 'to'
slices = [slice(None) for _ in x.shape]
for i,a in enumerate(fr):
if (a not in to) and (x.shape[i]==1):
# remove singleton axis
slices[i] = 0
fr = fr.replace(a,'')
x = x[slices]
# add dummy axes present in 'to'
for i,a in enumerate(to):
if (a not in fr):
# add singleton axis
x = np.expand_dims(x,-1)
fr += a
if set(fr) != set(to):
_adjusted = '(adjusted to %s and %s) ' % (x.shape, fr) if adjust_singletons else ''
raise ValueError(
'image with shape %s and axes %s %snot compatible with target axes %s.'
% (x_shape_initial, fr_initial, _adjusted, to)
)
ax_from, ax_to = axes_dict(fr), axes_dict(to)
if fr == to:
return x
return np.moveaxis(x, [ax_from[a] for a in fr], [ax_to[a] for a in fr])
def consume(iterator):
collections.deque(iterator, maxlen=0)
def _raise(e):
raise e
def compose(*funcs):
return lambda x: reduce(lambda f,g: g(f), funcs, x)
def normalizeZeroOne(x):
x = x.astype('float32')
minVal = np.min(x)
maxVal = np.max(x)
x = ((x-minVal) / (maxVal - minVal + 1.0e-20))
return x
def normalizeZero255(x):
x = x.astype('float32')
minVal = np.min(x)
maxVal = np.max(x)
x = ((x-minVal) / (maxVal - minVal + 1.0e-20))
return x * 255
def normalizer(x, mi , ma, eps = 1e-20, dtype = np.float32):
"""
Number expression evaluation for normalization
Parameters
----------
x : np array of Image patch
mi : minimum input percentile value
ma : maximum input percentile value
eps: avoid dividing by zero
dtype: type of numpy array, float 32 defaut
"""
if dtype is not None:
x = x.astype(dtype, copy = False)
mi = dtype(mi) if | np.isscalar(mi) | numpy.isscalar |
# wave_functions.py
# Copyright (c) 2013-2019 <NAME>
# See LICENSE for details
# pylint: disable=C0111
# Standard library imports
import copy
import math
import warnings
# PyPI imports
import numpy as np
with warnings.catch_warnings():
from _pytest.warning_types import PytestWarning
warnings.filterwarnings("ignore", category=PytestWarning)
import pytest
from pmisc import AE, AI, RE
# Intra-package imports
import peng
from peng.constants import FP_ATOL, FP_RTOL
from .support import cmp_vectors, std_wobj
###
# Helper functions
###
def barange(bmin, bmax, inc):
vector = np.arange(bmin, bmax + inc, inc)
vector = vector if np.isclose(bmax, vector[-1], FP_RTOL, FP_ATOL) else vector[:-1]
return vector
def fft(dep_vector, npoints):
npoints = int(npoints)
if npoints < dep_vector.size:
vector = copy.copy(dep_vector[:npoints])
elif npoints > dep_vector.size:
vector = copy.copy(np.append(dep_vector, np.zeros(npoints - dep_vector.size)))
else:
vector = copy.copy(dep_vector)
ret_dep_vector = 0j + np.zeros(npoints)
nvector = np.arange(0, npoints)
for k in range(0, npoints):
ret_dep_vector[k] += np.sum(
np.multiply(vector, np.exp(-1j * 2 * math.pi * k * nvector / npoints))
)
return ret_dep_vector
def full_fft():
"""FFT of waveform where independent axis is evenly spaced and a power of 2."""
wobj, fsample, finc = fft_wave()
# Compute reference solution
npoints = len(wobj.indep_vector)
ret_indep_vector = barange(-fsample / 2.0, +fsample / 2.0, finc)
ret_dep_vector = fft(wobj.dep_vector, npoints)
return npoints, wobj, ret_indep_vector, ret_dep_vector
def fft_wave():
"""Create waveform for FFT analysis."""
# Evenly spaced data
freqs = [1e3, 2.5e3]
# Capture 4 cycles of slowest sinusoid
tend = 4 / float(min(freqs))
# Get power of 2 number of points
tsample = 1 / (4.0 * max(freqs))
npoints = 2 ** (math.ceil(math.log(tend / tsample, 2)))
fsample = 1 / (tend / (npoints - 1))
tinc = 1 / float(fsample)
indep_vector = barange(0, tend, tinc)
finc = fsample / (npoints - 1)
dep_vector = np.zeros(indep_vector.size)
for freq in freqs:
dep_vector += np.cos(2 * math.pi * freq * indep_vector)
wobj = peng.Waveform(indep_vector, dep_vector, "wobj", "LINEAR", "LINEAR", "sec")
return wobj, fsample, finc
def padded_fft():
"""FFT of padded waveform."""
wobj, _, _ = fft_wave()
npoints = 3 * len(wobj.indep_vector)
tend = wobj.indep_vector[-1]
fsample = 1 / (tend / (npoints - 1))
finc = fsample / (npoints - 1)
ret_indep_vector = barange(-fsample / 2.0, +fsample / 2.0, finc)
ret_dep_vector = fft(wobj.dep_vector, int(npoints))
return npoints, wobj, ret_indep_vector, ret_dep_vector
def padded_ifft():
"""Inverse FFT of padded waveform."""
return np.array(
[
+0.666666666666667 + 0.000000000000000j,
+0.285737489530649 + 0.476869653582089j,
-0.015936365874249 + 0.045644791294382j,
+0.488007491912056 + 0.000000000000000j,
+0.345878195186864 + 0.581036411381697j,
-0.196612727816662 + 0.358585429905346j,
+0.095649904985154 + 0.000000000000000j,
+0.245719006995142 + 0.407555608588782j,
-0.241281786260967 + 0.435954508657145j,
-0.207829933952911 + 0.000000000000000j,
+0.079181848920218 + 0.119104789454885j,
-0.146310168931118 + 0.271458842164858j,
-0.228872176444977 + 0.000000000000000j,
-0.011850730150955 - 0.038568262640418j,
-0.030305831982073 + 0.070533436670771j,
-0.046815544995869 - 0.000000000000000j,
+0.028875171992763 + 0.031971069056580j,
-0.017035673802716 + 0.047548848479650j,
+0.074173644652105 + 0.000000000000000j,
+0.122884890067809 + 0.194800677167786j,
-0.117974777548011 + 0.222380504636967j,
-0.057882725888977 + 0.000000000000000j,
+0.134312498217841 + 0.214593875092629j,
-0.221139275228979 + 0.401066656157726j,
-0.374366875288802 + 0.000000000000000j,
+0.001477627100940 - 0.015482870698707j,
-0.196141728358334 + 0.357769634913184j,
-0.600645911934946 - 0.000000000000000j,
-0.199011485493909 - 0.362740200077384j,
-0.018804678195657 + 0.050612853967036j,
-0.504992239743005 + 0.000000000000000j,
-0.315562220585312 - 0.564611994915193j,
+0.199575724795823 - 0.327633099391573j,
-0.114472447329920 + 0.000000000000000j,
-0.255496699659903 - 0.460575460889293j,
+0.306243399925619 - 0.512386932241631j,
+0.300322955967473 - 0.000000000000000j,
-0.067815050107907 - 0.135501308216900j,
+0.239504419511774 - 0.396791627319508j,
+0.459228956407857 + 0.000000000000000j,
+0.100566497782449 + 0.156144087786288j,
+0.079659353050397 - 0.119931850869178j,
+0.313230873595303 - 0.000000000000000j,
+0.135224295204036 + 0.216173153798907j,
-0.027379823813015 + 0.065465441858601j,
+0.074173644652105 - 0.000000000000000j,
+0.052594605527829 + 0.073054333066068j,
-0.003442167409076 + 0.024004204752853j,
+0.008651163045441 + 0.000000000000000j,
-0.022708661791116 - 0.057374751906286j,
+0.093566379308805 - 0.144019526930936j,
+0.186578307706181 + 0.000000000000000j,
+0.020324257052154 + 0.017160449928245j,
+0.128367169714283 - 0.204296264056780j,
+0.415659867905822 - 0.000000000000000j,
+0.170855279505655 + 0.277887828933000j,
+0.019446769948462 - 0.015640597681664j,
+0.421957081169469 - 0.000000000000000j,
+0.301573440172975 + 0.504298324680751j,
-0.178506610796544 + 0.327224715298713j,
+0.110622317568560 - 0.000000000000000j,
+0.283482917437827 + 0.472964620167994j,
-0.317523097098001 + 0.568008332662539j,
-0.333333333333333 + 0.000000000000000j,
+0.105348171727558 + 0.164426190004446j,
-0.286476236544092 + 0.514233592767663j,
-0.598629809480615 - 0.000000000000000j,
-0.111709703327381 - 0.211529077773645j,
-0.110185192448478 + 0.208888547474695j,
-0.517606986154623 + 0.000000000000000j,
-0.218498262747203 - 0.396492288355865j,
+0.072772506195994 - 0.108003482213407j,
-0.207829933952912 + 0.000000000000000j,
-0.166535132858598 - 0.306489507268501j,
+0.133320489948777 - 0.212875666369082j,
+0.042293868738796 + 0.000000000000000j,
-0.044714646575287 - 0.095490235623059j,
+0.063998431485942 - 0.092806339026192j,
+0.038164381950427 + 0.000000000000000j,
+0.008478811996460 - 0.003356462746482j,
-0.021703296617485 + 0.055633408345397j,
-0.148347289304210 + 0.000000000000000j,
-0.064120194417011 - 0.129101630433633j,
-0.001003762486682 + 0.019780763537840j,
-0.255348147706326 + 0.000000000000000j,
-0.184757642912780 - 0.338051820523779j,
+0.138901732758987 - 0.222542662485744j,
-0.084862081119056 + 0.000000000000000j,
-0.213711831793107 - 0.388201946756452j,
+0.284175415551571 - 0.474164062085144j,
+0.300322955967473 + 0.000000000000000j,
-0.081768641475705 - 0.159669637413981j,
+0.296297182432551 - 0.495159578200508j,
+0.619464687072923 - 0.000000000000000j,
+0.139711287047343 + 0.223944851644661j,
+0.139711287047343 - 0.223944851644662j,
+0.619464687072923 + 0.000000000000000j,
+0.296297182432551 + 0.495159578200508j,
-0.081768641475705 + 0.159669637413981j,
+0.300322955967473 + 0.000000000000000j,
+0.284175415551570 + 0.474164062085143j,
-0.213711831793107 + 0.388201946756451j,
-0.084862081119055 - 0.000000000000000j,
+0.138901732758987 + 0.222542662485744j,
-0.184757642912780 + 0.338051820523779j,
-0.255348147706325 - 0.000000000000000j,
-0.001003762486681 - 0.019780763537839j,
-0.064120194417011 + 0.129101630433633j,
-0.148347289304210 + 0.000000000000000j,
-0.021703296617484 - 0.055633408345397j,
+0.008478811996460 + 0.003356462746483j,
+0.038164381950427 - 0.000000000000000j,
+0.063998431485942 + 0.092806339026191j,
-0.044714646575287 + 0.095490235623059j,
+0.042293868738796 + 0.000000000000000j,
+0.133320489948778 + 0.212875666369083j,
-0.166535132858598 + 0.306489507268501j,
-0.207829933952912 + 0.000000000000000j,
+0.072772506195994 + 0.108003482213406j,
-0.218498262747202 + 0.396492288355864j,
-0.517606986154623 + 0.000000000000000j,
-0.110185192448478 - 0.208888547474694j,
-0.111709703327381 + 0.211529077773645j,
-0.598629809480615 - 0.000000000000000j,
-0.286476236544093 - 0.514233592767664j,
+0.105348171727559 - 0.164426190004447j,
-0.333333333333331 - 0.000000000000000j,
-0.317523097097999 - 0.568008332662537j,
+0.283482917437826 - 0.472964620167993j,
+0.110622317568558 + 0.000000000000000j,
-0.178506610796544 - 0.327224715298713j,
+0.301573440172975 - 0.504298324680750j,
+0.421957081169468 - 0.000000000000000j,
+0.019446769948462 + 0.015640597681663j,
+0.170855279505655 - 0.277887828933000j,
+0.415659867905821 + 0.000000000000000j,
+0.128367169714282 + 0.204296264056778j,
+0.020324257052154 - 0.017160449928246j,
+0.186578307706181 + 0.000000000000000j,
+0.093566379308804 + 0.144019526930935j,
-0.022708661791115 + 0.057374751906285j,
+0.008651163045441 - 0.000000000000000j,
-0.003442167409076 - 0.024004204752854j,
+0.052594605527830 - 0.073054333066069j,
+0.074173644652106 + 0.000000000000000j,
-0.027379823813015 - 0.065465441858602j,
+0.135224295204037 - 0.216173153798908j,
+0.313230873595305 - 0.000000000000000j,
+0.079659353050397 + 0.119931850869179j,
+0.100566497782449 - 0.156144087786288j,
+0.459228956407857 + 0.000000000000000j,
+0.239504419511774 + 0.396791627319508j,
-0.067815050107907 + 0.135501308216900j,
+0.300322955967474 - 0.000000000000000j,
+0.306243399925620 + 0.512386932241633j,
-0.255496699659903 + 0.460575460889294j,
-0.114472447329920 - 0.000000000000000j,
+0.199575724795822 + 0.327633099391572j,
-0.315562220585311 + 0.564611994915193j,
-0.504992239743005 + 0.000000000000000j,
-0.018804678195657 - 0.050612853967036j,
-0.199011485493909 + 0.362740200077383j,
-0.600645911934945 - 0.000000000000000j,
-0.196141728358334 - 0.357769634913183j,
+0.001477627100940 + 0.015482870698707j,
-0.374366875288802 + 0.000000000000000j,
-0.221139275228979 - 0.401066656157725j,
+0.134312498217840 - 0.214593875092628j,
-0.057882725888977 + 0.000000000000000j,
-0.117974777548011 - 0.222380504636967j,
+0.122884890067809 - 0.194800677167786j,
+0.074173644652104 + 0.000000000000000j,
-0.017035673802716 - 0.047548848479650j,
+0.028875171992763 - 0.031971069056579j,
-0.046815544995869 - 0.000000000000000j,
-0.030305831982073 - 0.070533436670771j,
-0.011850730150955 + 0.038568262640418j,
-0.228872176444978 + 0.000000000000000j,
-0.146310168931118 - 0.271458842164858j,
+0.079181848920218 - 0.119104789454885j,
-0.207829933952912 + 0.000000000000000j,
-0.241281786260968 - 0.435954508657147j,
+0.245719006995142 - 0.407555608588783j,
+0.095649904985155 - 0.000000000000000j,
-0.196612727816663 - 0.358585429905346j,
+0.345878195186865 - 0.581036411381699j,
+0.488007491912058 - 0.000000000000000j,
-0.015936365874248 - 0.045644791294380j,
+0.285737489530649 - 0.476869653582088j,
+0.666666666666667 - 0.000000000000000j,
+0.154635505679479 + 0.249794356578788j,
+0.154635505679479 - 0.249794356578787j,
]
)
def strict_compare_waves(
dep_vector=None,
rfunc=None,
rdesc=None,
dep_units=None,
nobj=None,
indep_vector=None,
):
"""Strictly compare waveform objects."""
# pylint: disable=R0913
wobj = rfunc(
std_wobj(dep_name="wobj", indep_vector=indep_vector, dep_vector=dep_vector)
)
ref = std_wobj(
dep_name="{0}(wobj)".format(rdesc),
indep_vector=indep_vector,
dep_vector=(
copy.copy(wobj.dep_vector)
if dep_vector is None
else (nobj if isinstance(nobj, np.ndarray) else nobj(dep_vector))
),
dep_units=dep_units,
)
assert wobj == ref
assert wobj.dep_name == ref.dep_name
def trunc_fft():
"""FFT of truncated waveform."""
# pylint: disable=E1101
wobj, _, _ = fft_wave()
npoints = int(round(wobj.indep_vector.size / 2.0))
tend = wobj.indep_vector[-1]
fsample = 1 / (tend / (npoints - 1))
finc = fsample / (npoints - 1)
ret_indep_vector = barange(-fsample / 2.0, +fsample / 2.0, finc)
ret_dep_vector = fft(wobj.dep_vector, int(npoints))
return npoints, wobj, ret_indep_vector, ret_dep_vector
def trunc_ifft():
"""Return truncated inverse FFT values."""
return np.array(
[
+2.031250000000000 + 0.485562004170310j,
+0.318199714955461 + 1.820624205429553j,
-0.655366529334933 + 0.370897276393319j,
+0.253770933956314 + 0.472192494451151j,
-1.091850625866407 + 1.008883882566930j,
-1.483726719229016 - 1.218334195829139j,
+0.932218867902417 - 1.517542164627016j,
+0.970942620785909 + 0.377363442128093j,
+0.057203489136323 - 0.118012970393690j,
+1.278229603717467 + 0.024011318785796j,
+0.363116952705678 + 1.901659679959610j,
-1.764639428441845 + 0.669238126410875j,
-0.592239801858736 - 1.067929045507627j,
+0.145743145851282 - 0.087430310992305j,
-0.734794443118979 - 0.637320331737055j,
+0.932218867902418 - 1.558125493050408j,
+1.889644061218770 + 0.675774448583172j,
-0.223336243357166 + 1.440169104578383j,
-0.413791867912630 - 0.006765550628069j,
+0.158131606216389 + 0.709580268064139j,
-1.521570958463868 + 0.551739964037835j,
-0.968749999999992 - 1.749225282837794j,
+1.297121243508404 - 1.083512630798513j,
+0.590984923118542 + 0.443632461449772j,
+0.253770933956317 - 0.416986909397531j,
+1.408936869223572 + 0.463176291745721j,
-0.312167341989761 + 1.900930595536623j,
-1.770687735804836 - 0.057830535626995j,
-0.142398177666932 - 1.058366268101016j,
-0.109196634987608 - 0.046753215150384j,
-0.592239801858736 - 1.221105979700343j,
+1.495272475736174 - 1.470194679913401j,
]
)
###
# Test functions
###
def test_acos():
"""Test acos function behavior."""
dep_vector = np.array([0.1, 0.98, 0.5])
strict_compare_waves(dep_vector, peng.acos, "acos", "rad", np.arccos)
@pytest.mark.wave_functions
def test_acos_exceptions():
"""Test acos function exceptions."""
dep_vector = np.array([-1.01, 0.98, 0.5])
wobj_a = std_wobj(dep_name="wobj_a", dep_vector=dep_vector)
dep_vector = np.array([1.01, 0.98, 0.5])
wobj_b = std_wobj(dep_name="wobj_b", dep_vector=dep_vector)
for item in [wobj_a, wobj_b]:
AE(peng.acos, ValueError, "Math domain error", item)
def test_acosh():
"""Test acosh function behavior."""
dep_vector = | np.array([1.1, 1.98, 1.5]) | numpy.array |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import LSTM, Dense, Input, Bidirectional, Conv2D, Flatten
from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Dropout, Reshape
from tensorflow.keras.models import Model
import config as cfg
import os
from data import read_sample
from utils import *
import random
import math
import numpy as np
import matplotlib.pyplot as plt
class BaseModel:
def __init__(self):
pass
def train(self):
''' Train the model '''
raise NotImplementedError
def predict(self, input):
''' Predict the activation and onset using the model
Returns:
activation, onset(optional, default=None)
'''
raise NotImplementedError
def evaluate(self):
''' Report the precision/recall/f-measure of the model'''
raise NotImplementedError
def save(self, path):
''' Save the model to the path'''
raise NotImplementedError
def load(self, path):
''' Load the model from the path'''
raise NotImplementedError
class BiLSTM(BaseModel):
def __init__(self):
self.checkpoint_path = "models/Bi-LSTM/{epoch:d}.h5"
self.checkpoint_dir = os.path.dirname(self.checkpoint_path)
self.model = None
def build_model(self, input_shape=(None, cfg.INPUT_SIZE)):
input = Input(shape=input_shape, dtype='float32')
x = Dense(128, activation='relu')(input)
x = Bidirectional(LSTM(128, return_sequences=True))(x)
x = Dense(128, activation='relu')(x)
preds = Dense(cfg.PITCH_NUM, activation='sigmoid')(x)
self.model = Model(input, preds)
def data_generator(self, filelist, batch_size=1):
while 1:
random.shuffle(filelist)
num = len(filelist)
cnt = 0
x, y = [], []
for i in range(num):
sample = read_sample(filelist[i])
x.append(sample['Frames'])
y.append(sample['Onset'])
cnt += 1
if cnt == batch_size or i == num-1:
x = | np.array(x) | numpy.array |
'''
Extract physical features of airfoils or wing sections.
'''
import copy
import os
import numpy as np
from scipy.interpolate import interp1d
class PhysicalSec():
'''
Extracting flow features of a section (features on/near the wall)
'''
_i = 0 # index of the mesh point
_X = 0.0 # location of the feature location
_value = 0.0
#* Dictionary of flow features (identify the index and location)
xf_dict = {
'Cu': ['upper crest', _i, _X], # crest point on upper surface
'Cl': ['lower crest', _i, _X], # crest point on lower surface
'tu': ['upper highest', _i, _X], # highest point on upper surface
'tl': ['lower highest', _i, _X], # lowest point on lower surface
'tm': ['max thickness', _i, _X], # maximum thickness position
'L': ['upper LE', _i, _X], # suction peak near leading edge on upper surface
'T': ['upper TE', _i, _X], # trailing edge upper surface (98% chord length)
'H': ['upper surface max Ma', _i, _X], # position of lower upper maximum Mach number
'S': ['separation start', _i, _X], # separation start position
'R': ['reattachment', _i, _X], # reattachment position
'Q': ['lower LE', _i, _X], # suction peak near leading edge on lower surface
'M': ['lower surface max Ma', _i, _X], # position of lower surface maximum Mach number
'mUy': ['min(du/dy)', _i, _X], # position of min(du/dy)
'F': ['shock foot', _i, _X], # shock foot position
'1': ['shock front', _i, _X], # shock wave front position
'3': ['shock hind', _i, _X], # position of just downstream the shock
'D': ['dent on plateau', _i, _X], # largest dent on the suction plateau
'U': ['local sonic', _i, _X], # local sonic position
'B': ['1st dent after L', _i, _X], # first dent after suction peak [X_L, X_L+0.1]
# # Note: for weak shock waves, may not reach Mw=1
# # define position of U as Mw minimal extreme point after shock foot
'A': ['maximum Mw after shock', _i, _X], # maximum wall Mach number after shock wave (or equal to 3)
'N': ['new flat boundary', _i, _X], # starting position of new flat boundary
# # most of the time, A == N
'Hi': ['maximum Hi', _i, _X], # position of maximum Hi
'Hc': ['maximum Hc', _i, _X], # position of maximum Hc
'L1U': ['length 1~U', _value], # XU-X1
'L13': ['length 1~3', _value], # X3-X1
'LSR': ['length S~R', _value], # XR-XS
'lSW': ['single shock', _value], # single shock wave flag
'DCp': ['shock strength', _value], # Cp change through shock wave
'Err': ['suc Cp area', _value], # Cp integral of suction plateau fluctuation
'FSp': ['fluctuation suc-plat', _value], # Mw fluctuation of suction plateau
'DMp': ['Mw dent on plateau', _value], # dMw of Mw dent on suction plateau
'CLU': ['upper CL', _value], # CL of upper surface
'CLL': ['lower CL', _value], # CL of lower surface
'CdU': ['upper Cd', _value], # Cdp of upper surface
'CdL': ['lower Cd', _value], # Cdp of lower surface
'CLw': ['windward CL', _value], # CL of windward surfaces (before crest point)
'Cdw': ['windward Cdp', _value], # Cdp of windward surfaces (before crest point)
'CLl': ['leeward CL', _value], # CL of leeward surfaces (behind crest point)
'Cdl': ['leeward Cdp', _value], # Cdp of leeward surfaces (behind crest point)
'kaf': ['slope aft', _value] # average Mw slope of the aft upper surface (3/N~T)
}
def __init__(self, Minf, AoA, Re):
'''
### Inputs:
```text
Minf: Free stream Mach number
AoA: Angle of attack (deg)
Re: Reynolds number per meter
```
'''
self.Minf = Minf
self.AoA = AoA
self.Re = Re
self.xf_dict = copy.deepcopy(PhysicalSec.xf_dict)
def setdata(self, x, y, Cp, Tw, Hi, Hc, dudy):
'''
Set the data of this foil or section.
Data: ndarray, start from lower surface trailing edge
'''
self.x = copy.deepcopy(x)
self.y = copy.deepcopy(y)
self.Cp = copy.deepcopy(Cp)
self.Mw = self.Cp2Mw()
self.Tw = copy.deepcopy(Tw)
self.Hi = copy.deepcopy(Hi)
self.Hc = copy.deepcopy(Hc)
self.dudy = copy.deepcopy(dudy)
iLE = np.argmin(self.x)
self.x -= self.x[iLE]
self.y -= self.y[iLE]
self.x[0] = 1.0
self.x[-1] = 1.0
fmw = interp1d(self.x[iLE:], self.Mw[iLE:], kind='cubic')
fhu = interp1d(self.x[iLE:], self.Hc[iLE:], kind='cubic')
gu = interp1d(self.x[iLE:], self.y [iLE:], kind='cubic')
x_ = np.append(self.x[iLE:0:-1], self.x[0])
y_ = np.append(self.y[iLE:0:-1], self.y[0])
gl = interp1d(x_, y_, kind='cubic')
self.xx = np.arange(0.0, 1.0, 0.001)
self.yu = gu(self.xx)
self.yl = gl(self.xx)
self.mu = fmw(self.xx)
self.hu = fhu(self.xx)
self.iLE = iLE
def set_Mw(self, x, Mw):
'''
Set the Mw distribution of this foil or section.
Data: ndarray, start from lower surface trailing edge
'''
self.x = copy.deepcopy(x)
self.Mw = copy.deepcopy(Mw)
iLE = np.argmin(self.x)
self.iLE = iLE
fmw = interp1d(self.x[iLE:], self.Mw[iLE:], kind='cubic')
self.xx = np.arange(0.0, 1.0, 0.001)
self.mu = fmw(self.xx)
@property
def n_point(self):
'''
Number of points in this section
'''
return self.x.shape[0]
@staticmethod
def IsentropicCp(Ma, Minf: float, g=1.4):
'''
Isentropic flow: Calculate Cp by Mach
### Inputs:
```text
Ma: float, or ndarray
Minf: free stream Mach number
g: γ=1.4, ratio of the specific heats
```
'''
X = (2.0+(g-1.0)*Minf**2)/(2.0+(g-1.0)*Ma**2)
X = X**(g/(g-1.0))
Cp = 2.0/g/Minf**2*(X-1.0)
return Cp
@staticmethod
def toMw(Cp: np.array, Minf: float, n_ref=100, M_max=2.0):
'''
Converting Cp to wall Mach number
'''
Ma_ref = np.linspace(0.0, M_max, n_ref)
Cp_ref = PhysicalSec.IsentropicCp(Ma_ref, Minf)
f = interp1d(Cp_ref, Ma_ref, kind='cubic')
Cp_ = Cp.copy()
Cp_ = np.clip(Cp_, Cp_ref[-1], Cp_ref[0])
return f(Cp_)
def Cp2Mw(self, n_ref=100, M_max=2.0):
'''
Converting Cp to wall Mach number
'''
Mw = PhysicalSec.toMw(self.Cp, self.Minf, n_ref=n_ref, M_max=M_max)
return Mw
@staticmethod
def ShapeFactor(sS, VtS, Tw: float, iUe: int, neglect_error=False):
'''
Calculate shape factor Hi & Hc by mesh points on a line pertenticular to the wall.
### Inputs:
```text
sS: ndarray [nMax], distance of mesh points to wall
VtS: ndarray [nMax], velocity component of mesh points (parallel to the wall)
Tw: wall temperature (K)
iUe: index of mesh point locating the outer velocity Ue
neglect_error: if True, set shape factor to 0 when error occurs
```
### Return:
```text
Hi: incompressible shape factor
Hc: compressible shape factor
```
### Note:
```text
XR => 物面参考点,考察以 XR 为起点,物面法向 nR 方向上的数据点,共 nMax 个数据点
sS => 数据点到物面距离
VtS => 数据点速度在物面方向的分量
se: distance of boundary layer outer boundary to wall
ds: 𝛿*, displacement thickness
tt: θ, momentum loss thickness
Ue: outer layer velocity component (parallel to the wall)
Ue 测试结果显示,直接取最大Ue较为合理,取一定范围内平均,或取固定网格的值,效果不好
```
'''
nMax= sS.shape[0]
Ue = VtS[iUe]
se = sS[iUe]
ds = 0.0
tt = 0.0
if iUe>=nMax or iUe<=int(0.2*nMax):
if neglect_error:
return 0.0, 0.0
else:
print()
print('Vts: velocity component of mesh points')
print(VtS)
print()
raise Exception('Error [ShapeFactor]: iUe %d not reasonable (nMax=%d)'%(iUe, nMax))
for i in range(iUe-1):
a1 = Ue-VtS[i]
a2 = Ue-VtS[i+1]
ds += 0.5*(a1+a2)*(sS[i+1]-sS[i])
for i in range(iUe-1):
a1 = VtS[i ]*(Ue-VtS[i ])
a2 = VtS[i+1]*(Ue-VtS[i+1])
tt += 0.5*(a1+a2)*(sS[i+1]-sS[i])
Hi = ds/tt*Ue
Hc = Tw*Hi+Tw-1
return Hi, Hc
@staticmethod
def getHi(X, Y, U, V, T, j0: int, j1: int, nHi: int, neglect_error=False):
'''
Calculate shape factor Hi & Hc from field data
### Inputs:
```text
Field data: ndarray (nj,nk), X, Y, U, V, T
j0: j index of the lower surface TE
j1: j index of the upper surface TE
nHi: maximum number of mesh points in k direction for boundary layer
neglect_error: if True, set shape factor to 0 when error occurs
```
### Return:
```text
Hi, Hc: ndarray (j1-j0)
info: tuple of ndarray (Tw, dudy)
```
### Note:
```text
Tw: wall temperature
dudy: du/dy
iUe: index of mesh point locating the outer velocity Ue
XR: reference position on the wall
```
### Filed data (j,k) index
```text
j: 1 - nj from far field of lower surface TE to far field of upper surface TE
j: j0 - j1 from lower surface TE to upper surface TE
k: 1 - nk from surface to far field (assuming pertenticular to the wall)
```
'''
iLE = int(0.5*(j0+j1))
nj = X.shape[0]
nk = X.shape[1]
nn = j1-j0
Hi = np.zeros(nn)
Hc = np.zeros(nn)
Tw = np.zeros(nn)
dudy = np.zeros(nn)
#* Locate boundary layer edge index iUe & calculate du/dy
sS = np.zeros([nn,nHi])
VtS = np.zeros([nn,nHi])
iUe = np.zeros(nn, dtype=int)
for j in range(nn):
jj = j0+j
XR = np.array([X[jj,0], Y[jj,0]])
tR = np.array([X[jj+1,0]-X[jj-1,0], Y[jj+1,0]-Y[jj-1,0]])
tR = tR/np.linalg.norm(tR)
if tR[0]<0.0:
tR = -tR
for i in range(nHi-1):
XS = np.array([X[jj,i+1], Y[jj,i+1]])
VS = np.array([U[jj,i+1], V[jj,i+1]])
sS [j,i+1] = np.linalg.norm(XR-XS)
VtS[j,i+1] = np.dot(tR,VS)
iUe[j] = np.argmax(np.abs(VtS[j,:]))
dudy[j] = VtS[j,1]/sS[j,1]
Tw[j] = T[jj,0]
#* Smooth iUe at shock wave foot
nspan = 4
for j in range(nn-2*nspan):
jj = j+nspan
r1 = 0.5*(iUe[jj-nspan]+iUe[jj+nspan])
r2 = abs(iUe[jj+nspan]-iUe[jj-nspan])
r3 = abs(iUe[jj]-iUe[jj-nspan]) + abs(iUe[jj]-iUe[jj+nspan])
if r3>r2:
iUe[jj] = int(r1)
#* Calculate Hi & Hc
for j in range(nn):
Hi[j], Hc[j] = PhysicalSec.ShapeFactor(sS[j,:], VtS[j,:],
Tw[j], iUe[j], neglect_error=neglect_error)
#* Limit leading edge Hi
r1 = 1.0
r2 = 1.0
r3 = 1.0
r4 = 1.0
for j in range(nn):
jj = j0+j
if (X[jj,0]-0.05)*(X[jj+1,0]-0.05)<=0.0 and jj<iLE:
r1 = Hi[j]
r3 = Hc[j]
if (X[jj,0]-0.05)*(X[jj+1,0]-0.05)<=0.0 and jj>=iLE:
r2 = Hi[j]
r4 = Hc[j]
for j in range(nn):
jj = j0+j
if X[jj,0]<0.05 and jj<iLE:
Hi[j] = r1
Hc[j] = r3
if X[jj,0]<0.05 and jj>=iLE:
Hi[j] = r2
Hc[j] = r4
return Hi, Hc, (Tw, dudy)
def getValue(self, feature: str, key='key') -> float:
'''
Get value of given feature.
### Inputs:
```text
feature: key of feature dictionary
key: 'i', 'X', 'Cp', 'Mw', 'Tw', 'Hi', 'Hc', 'dudy'
```
'''
if not feature in PhysicalSec.xf_dict.keys():
print(' Warning: feature [%s] not valid'%(feature))
return 0.0
aa = self.xf_dict[feature]
if len(aa)==2:
return aa[1]
if key == 'i':
return aa[1]
if key == 'X':
return aa[2]
if key == 'Cp':
yy = self.Cp
elif key == 'Mw':
yy = self.Mw
elif key == 'Tw':
yy = self.Tw
elif key == 'Hi':
yy = self.Hi
elif key == 'Hc':
yy = self.Hc
elif key == 'dudy':
yy = self.dudy
else:
raise Exception(' key %s not valid'%(key))
ii = aa[1]
xx = aa[2]
if xx <= 1e-6:
return 0.0
if ii >= self.iLE:
i0 = max(self.iLE, ii-4)
i1 = i0 + 7
else:
i1 = min(self.iLE, ii+4)
i0 = i1 - 7
X = self.x[i0:i1]
Y = yy[i0:i1]
f = interp1d(X, Y, kind='cubic')
return f(xx)
#TODO: locate the position of flow features
def locate_basic(self, dMwcri_L=1.0):
'''
Locate the index and position of basic flow features.
### Get value of: L, T, Q, M
'''
X = self.x
M = self.Mw
nn = X.shape[0]
iLE = self.iLE
#TODO: Basic features
#* L => suction peak near leading edge on upper surface
# 1: maximum extreme point
# 2: dMw/dx = 1
i_L = 0
for i in range(int(0.25*nn)):
ii = i + iLE
if X[ii] > 0.2:
break
if M[ii-1]<=M[ii] and M[ii]>=M[ii+1]:
i_L = ii
break
if i_L == 0:
dMw2 = 0.0
for i in range(int(0.25*nn)):
ii = i + iLE+1
dMw1 = dMw2
dMw2 = (M[ii+1]-M[ii])/(X[ii+1]-X[ii])
if dMw1>=dMwcri_L and dMw2<dMwcri_L:
i_L = ii
break
self.xf_dict['L'][1] = i_L
self.xf_dict['L'][2] = X[i_L]
#* T => trailing edge upper surface (98% chord length)
for i in range(int(0.2*nn)):
ii = nn-i-1
if X[ii]<=0.98 and X[ii+1]>0.98:
self.xf_dict['T'][1] = ii
self.xf_dict['T'][2] = 0.98
break
#* H => position of upper surface maximum Mach number
i_H = 0
max1 = -1.0
for i in np.arange(iLE, nn-2, 1):
if M[i-1]<=M[i] and M[i+1]<=M[i] and M[i]>max1:
max1 = M[i]
i_H = i
self.xf_dict['H'][1] = i_H
self.xf_dict['H'][2] = X[i_H]
#* Q => suction peak near leading edge on lower surface
for i in range(int(0.2*nn)):
ii = iLE - i
if M[ii-1]<=M[ii] and M[ii]>=M[ii+1]:
self.xf_dict['Q'][1] = ii
self.xf_dict['Q'][2] = X[ii]
break
#* M => position of lower surface maximum Mach number
i_M = 0
max1 = -1.0
for i in np.arange(1, iLE, 1):
if M[i-1]<=M[i] and M[i+1]<=M[i] and M[i]>max1:
max1 = M[i]
i_M = i
self.xf_dict['M'][1] = i_M
self.xf_dict['M'][2] = X[i_M]
def locate_sep(self):
'''
Locate the index and position of flow features about du/dy.
### Get value of: S, R, mUy
'''
X = self.x
dudy = self.dudy
nn = X.shape[0]
iLE = self.iLE
#* S => separation start position
#* R => reattachment position
#* mUy => position of min(du/dy)
min_Uy = 1e6
i_S = 0
for i in range(int(0.5*nn)):
ii = iLE + i
if X[ii]<0.02:
continue
if X[ii]>0.98:
break
if dudy[ii]>=0.0 and dudy[ii+1]<0.0 and i_S==0:
i_S = ii
self.xf_dict['S'][1] = ii
self.xf_dict['S'][2] = (0.0-dudy[ii])*(X[ii+1]-X[ii])/(dudy[ii+1]-dudy[ii])+X[ii]
if dudy[ii]<=0.0 and dudy[ii+1]>0.0 and i_S!=0:
self.xf_dict['R'][1] = ii
self.xf_dict['R'][2] = (0.0-dudy[ii])*(X[ii+1]-X[ii])/(dudy[ii+1]-dudy[ii])+X[ii]
if dudy[ii]<min_Uy and dudy[ii-1]>=dudy[ii] and dudy[ii+1]>=dudy[ii]:
min_Uy = dudy[ii]
self.xf_dict['mUy'][1] = ii
self.xf_dict['mUy'][2] = X[ii]
def locate_geo(self):
'''
Locate the index and position of geometry related flow features.\n
### Get value of: Cu, Cl, tu, tl, tm
'''
X = self.x
xx = self.xx
yu = self.yu
yl = self.yl
iLE = self.iLE
n0 = xx.shape[0]
#* tm => maximum thickness
#* tu => highest point on upper surface
#* tl => lowest point on lower surface
x_max = xx[np.argmax(yu-yl)]
x_mu = xx[np.argmax(yu)]
x_ml = xx[np.argmin(yl)]
self.xf_dict['tm'][1] = np.argmin(np.abs(X[iLE:]-x_max)) + iLE
self.xf_dict['tm'][2] = x_max
self.xf_dict['tu'][1] = np.argmin(np.abs(X[iLE:]-x_mu )) + iLE
self.xf_dict['tu'][2] = x_mu
self.xf_dict['tl'][1] = np.argmin(np.abs(X[:iLE]-x_ml ))
self.xf_dict['tl'][2] = x_ml
#* Cu => crest point on upper surface
aa = self.AoA/180.0*np.pi
x0 = np.array([0.0, 0.0])
x1 = np.array([np.cos(aa), np.sin(aa)])
ds = np.zeros(n0)
for i in range(n0):
xt = np.array([xx[i], yu[i]])
if xx[i] > 0.9:
continue
ds[i], _ = ratio_vec(x0, x1, xt)
ii = np.argmax(ds)
self.xf_dict['Cu'][1] = np.argmin(np.abs(X[iLE:]-xx[ii])) + iLE
self.xf_dict['Cu'][2] = xx[ii]
#* Cl => crest point on lower surface
ds = np.zeros(n0)
for i in range(n0):
if xx[i] > 0.9:
continue
xt = np.array([xx[i], yl[i]])
ds[i], _ = ratio_vec(x0, x1, xt)
ii = np.argmax(ds)
self.xf_dict['Cl'][1] = np.argmin(np.abs(X[:iLE]-xx[ii]))
self.xf_dict['Cl'][2] = xx[ii]
def locate_shock(self, dMwcri_1=-1.0, info=False):
'''
Locate the index and position of shock wave related flow features.
### Get value of: 1, 3, F, U, D, A, B
### Inputs:
```text
dMwcri_1: critical value locating shock wave front
```
'''
X = self.x # [n]
xx = self.xx # [1000]
mu = self.mu # [1000]
nn = xx.shape[0]
iLE = self.iLE
dMw = np.zeros(nn)
for i in range(nn-1):
if xx[i]<=0.02:
continue
if xx[i]>=0.98:
continue
dMw[i] = (mu[i+1]-mu[i])/(xx[i+1]-xx[i])
dMw[i] = min(dMw[i], 2)
d2Mw = np.zeros(nn)
for i in range(nn-1):
if xx[i]<0.02 or xx[i]>0.95:
continue
#d2Mw[i] = (dMw[i+2]+dMw[i+1]-dMw[i]-dMw[i-1])/2/(xx[i+1]-xx[i-1])
#d2Mw[i] = (dMw[i+1]-dMw[i-1])/(xx[i+1]-xx[i-1])
d2Mw[i] = (0.5*dMw[i+7]+0.5*dMw[i+4]+2*dMw[i+1]-
2*dMw[i]-0.5*dMw[i-3]-0.5*dMw[i-6])/4.5/(xx[i+1]-xx[i-1])
#* Check shock and shock properties
flag, i_F, i_1, i_U, i_3 = PhysicalSec.check_singleshock(xx, mu, dMw, d2Mw, dMwcri_1, info=info)
self.xf_dict['lSW'][1] = flag
if not flag==1:
return 0
#* F => shock foot position
self.xf_dict['F'][1] = np.argmin(np.abs(X[iLE:]-xx[i_F])) + iLE
self.xf_dict['F'][2] = xx[i_F]
#* 1 => shock wave front position
self.xf_dict['1'][1] = np.argmin(np.abs(X[iLE:]-xx[i_1])) + iLE
self.xf_dict['1'][2] = xx[i_1]
#* 3 => position of just downstream the shock
self.xf_dict['3'][1] = np.argmin(np.abs(X[iLE:]-xx[i_3])) + iLE
self.xf_dict['3'][2] = xx[i_3]
#* U => local sonic position
self.xf_dict['U'][1] = np.argmin(np.abs(X[iLE:]-xx[i_U])) + iLE
self.xf_dict['U'][2] = xx[i_U]
#* D => dent on the suction plateau
# maximum (linear Mw - actual Mw) between L and 1
x_1 = self.xf_dict['1'][2]
x_L = max(self.xf_dict['L'][2], 0.05)
m_1 = self.getValue('1','Mw')
m_L = self.getValue('L','Mw')
lL1 = x_1-x_L
i_D = 0
min_D = 0.0
for i in np.arange(2, i_1-1, 1):
if xx[i]<x_L:
continue
tt = (xx[i]-x_L)/lL1
ss = (1-tt)*m_L + tt*m_1
dM = ss - mu[i]
if dM > min_D:
i_D = i
min_D = dM
if i_D==0:
self.xf_dict['D'][1] = self.xf_dict['L'][1]
self.xf_dict['D'][2] = self.xf_dict['L'][2]
else:
self.xf_dict['D'][1] = np.argmin(np.abs(X[iLE:]-xx[i_D])) + iLE
self.xf_dict['D'][2] = xx[i_D]
#* B => first dent after suction peak [X_L, X_L+0.1]
# minimum Mw between L and L+0.1
x_L = self.xf_dict['L'][2]
i_B = 0
for i in np.arange(2, i_1-1, 1):
if xx[i]<x_L or xx[i]>x_L+0.1:
continue
if mu[i-1]>=mu[i] and mu[i]<=mu[i+1] and i_B==0:
i_B = i
if i_B == 0:
self.xf_dict['B'][1] = self.xf_dict['L'][1]
self.xf_dict['B'][2] = self.xf_dict['L'][2]
else:
self.xf_dict['B'][1] = np.argmin(np.abs(X[iLE:]-xx[i_B])) + iLE
self.xf_dict['B'][2] = xx[i_B]
#* A => maximum Mw after shock
# Find the maximum position of Mw in range [x_3, 0.9]
i_A = 0
max_A = 0.0
for i in np.arange(i_3, nn-1, 1):
if xx[i]>0.9:
break
if mu[i]>max_A:
i_A = i
max_A = mu[i]
elif mu[i]>=mu[i_3]*0.8 and mu[i]>mu[i-1] and mu[i]>mu[i+1]:
i_A = i
x_A = xx[i_A]
self.xf_dict['A'][1] = np.argmin(np.abs(X[iLE:]-x_A)) + iLE
self.xf_dict['A'][2] = x_A
return i_1
def locate_BL(self, i_1):
'''
Locate the index and position of boundary layer related flow features. \n
i-1: index of shock wave front position in self.xx
### Get value of: N, Hi, Hc
'''
X = self.x
xx = self.xx
hu = self.hu
nn = xx.shape[0]
iLE = self.iLE
#* Hi, Hc => position of maximum Hi, Hc after shock wave front
# For cases when shock wave is weak, and Hc just keeps growing, set 0
i_H = 0
max1 = 0.0
for i in np.arange(i_1, nn-2, 1):
if xx[i] > 0.95:
break
if hu[i-1]<=hu[i] and hu[i+1]<=hu[i] and hu[i]>max1:
max1 = hu[i]
i_H = i
x_H = xx[i_H]
self.xf_dict['Hc'][1] = np.argmin(np.abs(X[iLE:]-x_H)) + iLE
self.xf_dict['Hc'][2] = x_H
self.xf_dict['Hi'][1] = self.xf_dict['Hc'][1]
self.xf_dict['Hi'][2] = x_H
#* N => starting position of new flat boundary
# i.e., position of minimum Hc after shock wave front
# For cases when shock wave is weak, and Hc just keeps growing, set 0
i_N = 0
min1 = 1000.0
for i in np.arange(i_1, nn-1, 1):
if hu[i-1]>=hu[i] and hu[i+1]<=hu[i] and hu[i]<min1:
min1 = hu[i]
i_N = i
x_N = xx[i_N]
self.xf_dict['N'][1] = np.argmin(np.abs(X[iLE:]-x_N)) + iLE
self.xf_dict['N'][2] = x_N
@staticmethod
def shock_property(xu, mu, dMw, d2Mw, dMwcri_1):
'''
>>> i_F, i_1, i_U, i_3 = shock_property(xu, mu, dMw, d2Mw, dMwcri_1)
### Return:
```text
Index of xu for: F, 1, U, 3
```
'''
nn = xu.shape[0]
#* F => shock foot position
i_F = np.argmin(dMw)
x_F = xu[i_F]
#* 1 => shock wave front position
# Find the kink position of dMw in range [x_F-0.2, x_F], defined as dMw = -1
i_1 = 0
i_cri = 0
i_md2 = 0
for i in np.arange(i_F, 1, -1):
# 1. Within the range of [x_F-0.2, x_F]
if xu[i]<x_F-0.2:
break
# 2. Locate dMw = dMwcri_1 (tend to go too much upstream)
if dMw[i]>=dMwcri_1 and dMw[i+1]<dMwcri_1 and i_cri==0:
i_cri = i
# 3. Locate min d2Mw/dx2 (tend to go too much downstream)
if d2Mw[i]<=d2Mw[i-1] and d2Mw[i]>d2Mw[i+1] and i_md2==0:
i_md2 = i
if i_md2-i_cri > 2*(i_F-i_md2):
i_1 = i_md2
elif 2*(i_md2-i_cri) < i_F-i_md2:
i_1 = i_cri
else:
i_1 = int(0.5*(i_cri+i_md2))
'''
print(i_cri, i_md2, i_F, xu[i_cri], xu[i_md2], dMw[i_md2], dMw[i_F])
import matplotlib.pyplot as plt
plt.plot(xu, mu, 'b')
plt.plot(xu, d2Mw/1000, 'r')
plt.plot([xu[i_cri], xu[i_md2]], [mu[i_cri], mu[i_md2]], 'bo')
plt.plot([xu[i_1]], [mu[i_1]], 'ro')
plt.show()
'''
#* 3 => position of just downstream the shock
# Find the first flat position of Mw in range [x_F, x_F+0.2], defined as dMw = 0 or -1
i_3 = 0
i_cri = 0
i_md2 = 0
i_flat = 0
for i in np.arange(i_F, nn-1, 1):
# 1. Within the range of [x_F, x_F+0.2]
if xu[i]>x_F+0.2:
break
# 2. Locate dMw = dMwcri_1 (tend to go too much downstream)
if dMw[i]<=dMwcri_1 and dMw[i+1]>dMwcri_1 and i_cri==0:
i_cri = i
# 3. Locate min d2Mw/dx2 (tend to go too much upstream)
if d2Mw[i]<=d2Mw[i-1] and d2Mw[i]>d2Mw[i+1] and i_md2==0:
i_md2 = i
# 4. Locate the first flat position of Mw
if dMw[i]<=0.0 and dMw[i+1]>0.0:
i_flat = i
if i_flat!=0 and i_flat-i_F < 2*(i_cri-i_F):
i_3 = i_flat
elif i_cri-i_md2 > 2*(i_md2-i_F):
i_3 = i_md2
elif 2*(i_cri-i_md2) < i_md2-i_F:
i_3 = i_cri
else:
i_3 = int(0.5*(i_cri+i_md2))
'''
print('F %3d %.2f'%(i_F, xu[i_F]))
print('d2Mw %3d %.2f'%(i_md2, xu[i_md2]))
print('cri %3d %.2f'%(i_cri, xu[i_cri]))
print('dMw=0 %3d %.2f'%(i_flat,xu[i_flat]))
print('3 %3d %.2f'%(i_3, xu[i_3]))
print()
'''
#* U => local sonic position
i_U = 0
for i in np.arange(i_1, i_3, 1):
if mu[i]>=1.0 and mu[i+1]<1.0:
i_U = i
break
#* Neglect small Mw bump near leading edge
if xu[i_1]<0.1 and mu[i_1]<1.10:
i_1=0; i_U=0; i_3=0
return i_F, i_1, i_U, i_3
@staticmethod
def check_singleshock(xu, mu, dMw, d2Mw, dMwcri_1, info=False):
'''
Check whether is single shock wave or not
>>> flag, i_F, i_1, i_U, i_3 = check_singleshock(xu, mu, dMw, d2Mw, dMwcri_1)
### Inputs:
```text
xu: ndarray, x location
mu: ndarray, wall Mach number of upper surface
dMw: ndarray, slope of wall Mach number
dMwcri_1: critical value locating shock wave front
```
### flag:
```text
1: single shock wave
0: shockless
-1: multiple shock waves
```
'''
#* Get 1st shock
i_F, i_1, i_U, i_3 = PhysicalSec.shock_property(xu, mu, dMw, d2Mw, dMwcri_1)
d_F = dMw[i_F]
#* Check if shockless
# Check if Mw jump exists and M1>1.0
if d_F>dMwcri_1 or mu[i_1]<1.0 or i_1==0:
if info:
print(' Shockless: XF=%.2f MF=%.2f dM/dX=%.2f'%(xu[i_F], mu[i_F], d_F))
return 0, 0, 0, 0, 0
#* Check if 2nd shock wave exists
# Remove first shock
dm = dMw.copy()
d2m = d2Mw.copy()
nn = xu.shape[0]
for i in np.arange(i_F, nn, 1, dtype=int):
if dm[i]<=0.0:
dm[i]=0.0
d2m[i]=0.0
else:
break
for i in np.arange(i_F, 0, -1, dtype=int):
if dm[i]<=0.0:
dm[i]=0.0
d2m[i]=0.0
else:
break
# Locate second shock
dMwcri_F = max(dMwcri_1, 0.5*d_F)
_iF, _i1, _iU, _i3 = PhysicalSec.shock_property(xu, mu, dm, d2m, dMwcri_1)
if dm[_iF]<dMwcri_F and _i1!=0 and _i3!=0:
# Locate sharp change of Mw
if mu[_i1]>1.0 and mu[_i3]<1.05:
# Check supersonic wave front and 'subsonic' wave hind
if info:
print(' Second shock: X1=%.2f M1=%.2f M2=%.2f'%(xu[_i1], mu[_i1], mu[_i3]))
return -1, 0, 0, 0, 0
return 1, i_F, i_1, i_U, i_3
def aux_features(self):
'''
Calculate auxiliary features based on basic, geo, and shock features.
### Get value of: Length, lSW, DCp, Err, DMp, FSp, kaf,
### CLU, CLL, CLw, Cdw, CLl, Cdl
'''
X = self.x
Y = self.y
x1 = self.xf_dict['1'][2]
n0 = len(X)
self.xf_dict['L1U'][1] = self.xf_dict['U'][2] - x1
self.xf_dict['L13'][1] = self.xf_dict['3'][2] - x1
self.xf_dict['LSR'][1] = self.xf_dict['R'][2] - self.xf_dict['S'][2]
self.xf_dict['DCp'][1] = self.getValue('3','Cp') - self.getValue('1','Cp')
cosA = np.cos(self.AoA/180.0*np.pi)
sinA = np.sin(self.AoA/180.0*np.pi)
#* Err => Cp integral of suction plateau fluctuation
#* DMp => Mw dent on suction plateau
#* FSp => Mw fluctuation of suction plateau
# If can not find suction peak, err = 0, DMp = 0.0, FSp = 0.0
Err = 0.0
DMp = 0.0
FSp = 0.0
iL = self.xf_dict['L'][1]
if iL!=0:
i1 = self.xf_dict['1'][1]
xL = self.xf_dict['L'][2]
Cp0 = np.array([xL, self.getValue('L','Cp')])
Cp1 = np.array([x1, self.getValue('1','Cp')])
Mw0 = self.getValue('L','Mw')
Mw1 = self.getValue('1','Mw')
lL1 = x1-xL
bump_ = 0.0
dent_ = 0.0
for i in np.arange(iL, i1, 1):
vec = np.array([X[i], self.Cp[i]])
s, _ = ratio_vec(Cp0, Cp1, vec)
Err += s*(X[i+1]-X[i])
tt = (X[i]-xL)/lL1
ss = (1-tt)*Mw0 + tt*Mw1
DMp = max(DMp, ss-self.Mw[i])
local_avg_mw = (self.Mw[i-2]+self.Mw[i]+self.Mw[i+2])/3.0
if self.Mw[i-4]>=local_avg_mw and local_avg_mw<=self.Mw[i+4] and dent_<=0.0:
if bump_>0.0:
FSp += bump_ - local_avg_mw
dent_ = local_avg_mw
bump_ = 0.0
elif self.Mw[i-4]<=local_avg_mw and local_avg_mw>=self.Mw[i+4] and bump_<=0.0:
if dent_>0.0:
FSp += local_avg_mw - dent_
bump_ = local_avg_mw
dent_ = 0.0
self.xf_dict['Err'][1] = abs(Err)*cosA
self.xf_dict['DMp'][1] = DMp
self.xf_dict['FSp'][1] = FSp
#* kaf => average Mw slope of the aft upper surface (3/N~T)
xN = self.xf_dict['N'][2]
mN = self.getValue('N','Mw')
xT = self.xf_dict['T'][2]
mT = self.getValue('T','Mw')
if xN < 0.1:
xN = self.xf_dict['3'][2]
mN = self.getValue('3','Mw')
self.xf_dict['kaf'][1] = (mT-mN)/(xT-xN)
#* CLU => CL of upper surface
# wall vector = [dx,dy]
# outward wall vector = [-dy,dx]
# outward pressure force vector = Cp*[dy,-dx]
PFy = 0.0 # y direction pressure force
PFx = 0.0 # x direction pressure force
for i in np.arange(self.iLE, n0-1, 1):
Cp_ = 0.5*(self.Cp[i]+self.Cp[i+1])
PFx += Cp_*(Y[i+1]-Y[i])
PFy += Cp_*(X[i]-X[i+1])
self.xf_dict['CLU'][1] = PFy*cosA - PFx*sinA
self.xf_dict['CdU'][1] = PFy*sinA + PFx*cosA
PFx = 0.0; PFy = 0.0
for i in | np.arange(0, self.iLE, 1) | numpy.arange |
#!/usr/bin/python
import os, sys
import h5py
import tifffile as tiff
from PIL.TiffTags import TAGS
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, measure, morphology, segmentation, transform
from PIL import Image
import napari
def raw_parameters(rawFilePath):
'''Obtain raw image parameters: zSpacing, xResolution and yResolution from TIFF'''
rawImg = tiff.TiffFile(rawFilePath);
try:
zSpacing = rawImg.imagej_metadata['spacing'];
except Exception as e:
zSpacing = 1;
if rawImg.imagej_metadata['unit'] == 'micron':
measurementsInMicrons = True;
rawImg = Image.open(rawFilePath)
if TAGS[282] == 'XResolution':
xResolution = 1/rawImg.tag_v2[282];
if TAGS[283] == 'YResolution':
yResolution = 1/rawImg.tag_v2[283];
if measurementsInMicrons:
#To nanometers
zSpacing=zSpacing*1000;
xResolution=xResolution*1000;
yResolution=yResolution*1000;
return zSpacing, xResolution, yResolution
def load_raw_seg_images(rawFilePath, segFilePath, resize_img=True):
'''Read segmented image to remove first biggest label and obtain list of cell properties'''
#If TIFF
#rawImg = io.imread(rawFilePath);
#segmentedImg = io.imread(segFilePath)
#If HDF5
rawImgh5 = h5py.File(rawFilePath, 'r')
rawImg = np.array(rawImgh5.get('raw'))
segmentedImgh5 = h5py.File(segFilePath, 'r')
segmentedImg = np.array(segmentedImgh5.get('segmentation'))
segmentedImg = segmentedImg - 1;
if resize_img == True:
rawImg = transform.resize(rawImg, (rawImg.shape[0], 512, 512),
order=0, preserve_range=True, anti_aliasing=False).astype(np.uint32)
segmentedImg = transform.resize(segmentedImg, (segmentedImg.shape[0], 512, 512),
order=0, preserve_range=True, anti_aliasing=False).astype(np.uint32)
#uniqueIds=np.unique(segmentedImg)
#maxId = uniqueIds.max()
props = measure.regionprops(segmentedImg)
return rawImg, segmentedImg, props
def area_array(props):
'''Obtain array of cell areas from props'''
a = np.zeros((len(props),2))
for i in range(len(props)):
a[i,1] = props[i].area
a[i,0] = props[i].label
return a.astype(np.uint16)
def calculate_cell_heights(props, hist=False, bins=30):
'''Obtain array of cell heights (in slices) from props'''
cell_heights = np.zeros((len(props),2))
for i in range(len(props)):
cell_heights[i,0] = props[i].label
cell_heights[i,1] = props[i].bbox[3]-props[i].bbox[0]
#Plot histogram
if hist == True:
plt.hist(cell_heights[:,1],bins=nbins)
plt.xlabel('Z height (in slices)')
plt.ylabel('Frequency density')
plt.show()
return cell_heights
def remove_background(segmentedImg, props):
'''Remove background IDs'''
backgroundIds = np.zeros((0,1))
xdim = segmentedImg.shape[1]
ydim = segmentedImg.shape[2]
#Backgrouynd IDs identified if dimensions match full image
for i in range(len(props)):
if (props[i].bbox[4] == xdim) and (props[i].bbox[5] == ydim):
backgroundIds= np.append(backgroundIds, props[i].label)
for i in range(len(backgroundIds)):
Id = backgroundIds[i]
segmentedImg[segmentedImg==Id] = 0
new_props = measure.regionprops(segmentedImg)
segmentedImg = segmentedImg + 1
return segmentedImg, new_props
def threshold_segments(segmentedImg, props, lower_percentile, higher_percentile):
'''Remove cells outside specified min and max percentile'''
cell_heights = calculate_cell_heights(props)
smallThreshold = np.percentile(cell_heights[:,1], lower_percentile)
bigThreshold = np.percentile(cell_heights[:,1], higher_percentile)
IdsThresholded = np.empty((0,2))
IdsToRemove = np.empty((0,1))
for i in range(len(props)):
if cell_heights[i,1] > smallThreshold and cell_heights[i,1] < bigThreshold:
IdsThresholded = np.append(IdsThresholded, | np.array([cell_heights[i,:]]) | numpy.array |
import pandas as pd
import numpy as np
import networkx as nx
from random import randint
from tqdm import tqdm
class MultibindDriver(object):
def __init__(self, multibind):
if not type(multibind.states) is None and not type(multibind.graph) is None:
self.multibind = multibind
else:
raise ValueError(
"Multibind driver must be passed a Multibind object that has states and a graph file loaded.")
def create_tensor(self, pH_array):
num_states = self.multibind.states.name.shape[0]
self.tensor = np.zeros((num_states, num_states, len(pH_array)))
for i, p in enumerate(pH_array):
self.multibind.build_cycle(pH=p)
self.multibind.MLE()
for j in range(self.tensor.shape[1]):
self.tensor[j, :, i] = self.multibind.g_mle - self.multibind.g_mle[j]
class Multibind(object):
def __init__(self, states_filename=None, graph_filename=None):
# If states are specified in a CSV, may as well fill them in
# here. The same goes for the graph information
if states_filename:
self.read_states(states_filename)
else:
self.states = None
if graph_filename:
self.read_graph(graph_filename)
else:
self.graph = None
self.cycle = None
self.concentrations = {}
def build_cycle(self, pH=5):
"""Constructs the cycle used for calculation"""
# Determine if we have enough information to continue,
# ie states information and graph information
if type(self.states) is None or type(self.graph) is None:
msg = "Need to specify both the state and graph \
information. Try using `read_states` and `read_graph`."
raise RuntimeError(msg)
# Select all ligands that are not H+ and check if their concentrations
# have been defined in the concentrations dictionary
ligands = np.array(self.graph.ligand[(self.graph.ligand != "helm") & (self.graph.ligand != "H+") & (
self.graph.ligand != "h+")])
ligand_map = [x in self.concentrations.keys() for x in ligands]
# if there are undefined ligand concentrations, raise an error and
# warn the user
if not all(ligand_map):
missing_ligands = ligands[[not i for i in ligand_map]]
msg = "Missing ligand concentrations for: {0}\n".format(" ".join(missing_ligands))
msg += "Set them using the `concentrations` attribute"
raise RuntimeError(msg)
G = nx.DiGraph()
# All states are defined in the states dataframe, use them for nodes
G.add_nodes_from(self.states.name)
# iterate through all connections
for i in self.graph.index:
# unpack for readability
state1, state2, value, variance, ligand, standard_state = self.graph.iloc[i]
# if we have protons, it must be a pKa
if ligand.lower() == "h+":
energy = np.log(10) * (pH - value)
var = np.log(10) ** 2 * variance
# using a direct helmholtz free energy
elif ligand == "helm":
energy = value
var = variance
# dealing with binding energies
else:
energy = value - np.log(self.concentrations[ligand] / standard_state)
var = variance # already in kT!
# add a forward and direction energy
G.add_edge(state1, state2, energy=energy, weight=var)
G.add_edge(state2, state1, energy=-energy, weight=var)
self.cycle = G
def MLE(self):
"""Performs a maximum likelihood estimation on the current cycle"""
from scipy.optimize import root
N = len(self.states.name)
def kd(i, j):
return int(i == j)
def grad_log_likelihood(g_t):
"""Returns the gradient of the log likelihood function.
g_t : array of theoretical values for g
"""
# state vector [g1, g2, g3, ... , gn-1, gn]
state_vector = np.zeros(N)
# factor that will be added to one node and subtracted from another
def alphaij(gj, gi, deltaij, varij):
return ((gj - gi) - deltaij) / varij
# indices of state vector
# Iterate over all connections
for r in self.graph.index:
state1, state2, value, variance, ligand, standard_state = self.graph.iloc[r]
i = self.states[self.states.name == state1].index[0]
j = self.states[self.states.name == state2].index[0]
gj = g_t[j]
gi = g_t[i]
edge_attr = self.cycle.edges()[(state1, state2)]
deltaij = edge_attr['energy'] # measured difference
varij = edge_attr['weight'] # measured variance
shared_alpha = alphaij(gj, gi, deltaij, varij)
state_vector[i] += shared_alpha
state_vector[j] -= shared_alpha
return state_vector
def jacobian(g_t):
# g_t here is not used deliberately as it is actually not needed except to avoid throwing an error
J = np.zeros((N, N))
for n in range(N): # component of f
for m in range(N): # derivative with g_m
for k in self.graph.index: # sum over ij
state1, state2, value, variance, ligand, standard_state = self.graph.iloc[k]
i = self.states[self.states.name == state1].index[0]
j = self.states[self.states.name == state2].index[0]
kdelta_factor = kd(n, j) * kd(m, i) - kd(n, j) * kd(m, j) - kd(n, i) * kd(m, i) + kd(n, i) * kd(
m, j)
J[n, m] += 1 / variance * kdelta_factor
return J
# use dijkstra_path to get the initial guess
self.initial_guess = np.zeros(N)
for i in range(1, N):
edge_energies = nx.get_edge_attributes(self.cycle, 'energy')
# edge_var = nx.get_edge_attributes(self.cycle, 'weight')
path = nx.dijkstra_path(self.cycle, self.states.name[0], self.states.name[i])
linked = [(path[j], path[j + 1]) for j, _ in enumerate(path[:-1])]
self.initial_guess[i] = sum([edge_energies[x] for x in linked])
self.MLE_res = root(grad_log_likelihood, self.initial_guess, jac=jacobian)
self.g_mle = self.MLE_res.x - self.MLE_res.x[0]
self.mle_linear_distortion = self.g_mle - (self.initial_guess - self.initial_guess[0])
self.prob_mle = pd.DataFrame(np.exp(-self.g_mle) / np.sum(np.exp(-self.g_mle)), columns=["probability"])
self.prob_mle["name"] = self.states.name
return self.MLE_res
def MLE_dist(self, N_steps=int(1e6), nt=1):
"""Run Monte-Carlo steps to assess quality of MLE results.
"""
def potential(g_t):
potential = 0
# factor that will be added to one node and subtracted from another
# indices of state vector
# Iterate over all connections
for r in self.graph.index:
state1, state2, value, variance, ligand, standard_state = self.graph.iloc[r]
i = self.states[self.states.name == state1].index[0]
j = self.states[self.states.name == state2].index[0]
gj = g_t[j]
gi = g_t[i]
edge_attr = self.cycle.edges()[(state1, state2)]
deltaij = edge_attr['energy'] # measured difference
varij = edge_attr['weight'] # measured variance
potential += - 1 / (2 * varij) * ((gj - gi) - deltaij) ** 2
return potential
def accept(ns, cs):
potential_ns = potential(ns)
potential_cs = potential(cs)
diff = potential_cs - potential_ns
prob = min([1, np.exp(-50 * diff)])
return np.random.random_sample() <= prob
def compute(self, N_steps=N_steps):
current_state = self.g_mle.copy()
new_state = current_state.copy()
step = 1
accepted = 0
rejected = 0
Nstates = len(new_state)
dist = np.zeros((N_steps - 1, Nstates))
pbar = tqdm(total=N_steps, position=0)
while step < N_steps:
# select random state to mutate
state = randint(0, Nstates - 1)
# mutate state
disp = np.random.normal(0, 0.01)
new_state[state] = new_state[state] + disp
# accept/reject change
if accept(new_state, current_state):
current_state = new_state.copy()
dist[step - 1] = current_state[:]
pbar.update(1)
step += 1
accepted += 1
else:
new_state = current_state.copy()
rejected += 1
pbar.close()
print("Accepted: ", accepted)
print("Rejected: ", rejected)
return dist
return compute(self)
def effective_energy_difference(self, macrostate_class, state1, state2):
"""Calculate the effective binding energy between two states.
Parameters
==========
macrostate_class : name of macrostate class (i.e. number of protons)
state1 : first, 'starting' state
state2 : second, 'destination' state
Returns
=======
float : binding free energy in kT
"""
macrostate_class = str(macrostate_class)
microstates_1_indices = self.states[self.states[macrostate_class] == state1].index
microstates_2_indices = self.states[self.states[macrostate_class] == state2].index
energies_1 = | np.array([self.g_mle[i] for i in microstates_1_indices]) | numpy.array |
import numpy as np
import onnx
# pylint: disable=no-member
def create_initializer_tensor(name: str, tensor_array: np.ndarray,
data_type: onnx.TensorProto = onnx.TensorProto.FLOAT) -> onnx.TensorProto:
initializer_tensor = onnx.helper.make_tensor(
name=name,
data_type=data_type,
dims=tensor_array.shape,
vals=tensor_array.flatten().tolist())
return initializer_tensor
class ONNXReferenceModel:
def __init__(self, onnx_model, graph_path):
self.onnx_model = onnx_model
self.path_ref_graph = graph_path
class LinearModel(ONNXReferenceModel):
def __init__(self):
model_input_name = "X"
X = onnx.helper.make_tensor_value_info(model_input_name,
onnx.TensorProto.FLOAT,
[1, 3, 32, 32])
model_output_name = "Y"
model_output_channels = 10
Y = onnx.helper.make_tensor_value_info(model_output_name,
onnx.TensorProto.FLOAT,
[1, model_output_channels, 1, 1])
conv1_output_node_name = "Conv1_Y"
conv1_in_channels, conv1_out_channels, conv1_kernel_shape = 3, 32, (3, 3)
conv1_W = np.ones(shape=(conv1_out_channels, conv1_in_channels, *conv1_kernel_shape)).astype(np.float32)
conv1_B = np.ones(shape=conv1_out_channels).astype(np.float32)
conv1_W_initializer_tensor_name = "Conv1_W"
conv1_W_initializer_tensor = create_initializer_tensor(
name=conv1_W_initializer_tensor_name,
tensor_array=conv1_W,
data_type=onnx.TensorProto.FLOAT)
conv1_B_initializer_tensor_name = "Conv1_B"
conv1_B_initializer_tensor = create_initializer_tensor(
name=conv1_B_initializer_tensor_name,
tensor_array=conv1_B,
data_type=onnx.TensorProto.FLOAT)
conv1_node = onnx.helper.make_node(
name="Conv1",
op_type="Conv",
inputs=[
model_input_name, conv1_W_initializer_tensor_name,
conv1_B_initializer_tensor_name
],
outputs=[conv1_output_node_name],
kernel_shape=conv1_kernel_shape,
)
bn1_output_node_name = "BN1_Y"
bn1_scale = np.random.randn(conv1_out_channels).astype(np.float32)
bn1_bias = np.random.randn(conv1_out_channels).astype(np.float32)
bn1_mean = | np.random.randn(conv1_out_channels) | numpy.random.randn |
from types import SimpleNamespace
import json
import scipy.optimize as optimize
import numpy as np
import noisyopt as nopt
from .misc import check_continue, term_name_key
from .molecule.molecule import Molecule
from .qm.qm_base import HessianOutput
from .molecule.terms import Terms
from .molecule.baseterms import TermABC
def nllsqfuncfloat(params: np.ndarray, qm: HessianOutput, qm_hessian: np.ndarray,
mol: Molecule, loss: list[float]=None) -> float:
"""Function to be minimized by regular non-linear optimizers.
Keyword arguments
-----------------
params : np.ndarray[float](sum of n_params for each term to be fit,)
stores all parameters for the terms
qm : HessianOutput
output from QM hessian file read
qm_hessian : np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,)
the flattened 1D QM hessian
mol : Molecule
the Molecule object
loss : list[float] (default None)
the list to keep track of the loss function over the optimization process
Returns
-------
the loss (float)
"""
return 0.5 * np.sum(nllsqfunc(params, qm, qm_hessian, mol, sorted_terms, loss)**2)
def nllsqfunc(params: np.ndarray, qm: HessianOutput, qm_hessian: np.ndarray, mol: Molecule,
loss: list[float]=None) -> np.ndarray:
"""Residual function for non-linear least-squares optimization based on the difference of MD
and QM hessians.
Keyword arguments
-----------------
params : np.ndarray[float](sum of n_params for each term to be fit,)
stores all parameters for the terms
qm : HessianOutput
output from QM hessian file read
qm_hessian : np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,)
the flattened 1D QM hessian
mol : Molecule
the Molecule object
loss : list[float] (default None)
the list to keep track of the loss function over the optimization process
Returns
-------
The np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,) of residuals
"""
hessian = []
non_fit = []
# print("Calculating the MD hessian matrix elements...")
full_md_hessian = calc_hessian_nl(qm.coords, mol, params)
# print("Fitting the MD hessian parameters to QM hessian values")
for i in range(mol.topo.n_atoms * 3):
for j in range(i + 1):
hes = (full_md_hessian[i, j] + full_md_hessian[j, i]) / 2
hessian.append(hes[:-1])
non_fit.append(hes[-1])
hessian = np.array(hessian)
agg_hessian = | np.sum(hessian, axis=1) | numpy.sum |
# import standard packages
import time
import warnings
import numpy as np
from sklearn.neighbors import KDTree
from scipy.spatial.distance import cdist
from scipy.spatial.distance import pdist, squareform
# only for kmeans++
from sklearn.cluster import kmeans_plusplus
from sklearn.utils.extmath import row_norms
from sklearn.utils import check_random_state
# import package gurobi
try:
import gurobipy as gurobi
except ImportError:
warnings.warn('Gurobi is not available. Please set the argument '
'no_solver=True when calling gb21_mh(...).')
def gb21_mh(X, Q, q, p, t_total,
n_start, g_initial, init, n_target, l, t_local,
mip_gap_global=0.01, mip_gap_local=0.01,
np_seed=1, gurobi_seed=1,
no_local=False, no_solver=False):
# initialize timeStart
timeStart = time.time()
# set numpy random seed
np.random.seed(np_seed)
# setup nodes collection
nodes = np.empty(X.shape[0], dtype=NodeClass)
for idx in np.arange(X.shape[0]):
nodes[idx] = NodeClass(idx, X[idx], Q[idx], q[idx])
# setup inst
inst = InstanceClass(X.shape[0], p, X.shape[1],
np.arange(X.shape[0]), nodes, X)
# initialize help variables
initial_ofv = float('inf')
start_counter = 0
feas_solution_found = False
while start_counter < n_start and time.time() - timeStart < t_total:
# incease start_counter by 1
start_counter += 1
# initialize g
g = min(g_initial, inst.p)
# get initial medians with kmeans++
curr_solution = SolutionClass(inst)
curr_solution.medians = kmeans_pp(inst)
for median in curr_solution.medians:
curr_solution.assignments[median] = median
# initializ curr_ofv
curr_ofv = float('inf')
# initialize help variables
iteration = 0
feasible_assignment_found = False
# set init="capacity-based" and no_local=True
if no_solver:
init = "capacity-based"
no_local = True
# initialize heuristic_assignment (global optimization phase)
if init == "capacity-based":
heuristic_assignment = True
else:
heuristic_assignment = False
# start global optimization phase
while time.time() - timeStart < t_total:
# increase iteration by 1
iteration += 1
# initialize new solution
new_solution = SolutionClass(inst)
new_solution.medians = np.array(curr_solution.medians)
for median in new_solution.medians:
new_solution.assignments[median] = median
# capacity-based initialization method
if heuristic_assignment:
# setup kdtree
tree = KDTree(inst.X[new_solution.medians], metric='euclidean')
dist, ind = tree.query(inst.X, k=g)
# determine order of assignment based on regret value
regret_values = dist[:, 1] - dist[:, 0]
regret_values[new_solution.medians] = 0
assignment_order = np.argsort(regret_values)[::-1]
# iteratively assing nodes to medians
assigned_demand = dict.fromkeys(new_solution.medians, 0)
for median in new_solution.medians:
assigned_demand[median] = inst.nodes[median].q
for node in assignment_order:
time_limit_reached = False
if time.time() - timeStart < t_total:
if node not in new_solution.medians:
capacity_exceeded = True
for next_median in ind[node]:
median = new_solution.medians[next_median]
if assigned_demand[median] + \
inst.nodes[node].q <= inst.nodes[median].Q:
assigned_demand[median] += \
inst.nodes[node].q
new_solution.assignments[node] = median
capacity_exceeded = False
break
if capacity_exceeded:
break
else:
time_limit_reached = True
break
if time_limit_reached:
break
# if capacity_exceeded
if capacity_exceeded:
if g < inst.p:
g = min(g*2, inst.p)
feasible_assignment_found = False
else:
if not feas_solution_found and \
start_counter == n_start:
start_counter += -1
break
else:
feasible_assignment_found = True
# assign nodes using mip
else:
# setup and solve mip
model = setup_mip_assignment(g, t_total, mip_gap_global,
gurobi_seed, inst, new_solution,
timeStart)
model.optimize()
# if model is infeasible
if model.status == 3:
if g < inst.p:
g = min(g*2, inst.p)
feasible_assignment_found = False
else:
if not feas_solution_found and \
start_counter == n_start:
start_counter += -1
break
elif model.SolCount == 0: # if no solution has been found
break
else:
feasible_assignment_found = True
for var in model.getVars():
if var.X > 0.5:
var_name = var.VarName
indices_str = var_name[
var_name.find('[')+1:var_name.find(']')]
i, j = indices_str.split(',')
new_solution.assignments[int(i)] = int(j)
# if feasible assignment found
if feasible_assignment_found:
# feasible solution found
feas_solution_found = True
# recalculate medians
medians_changed = np.zeros(inst.p, dtype=bool)
for k in np.arange(inst.p):
# identify nodes assigned to median k
nodes_in = np.where(
new_solution.assignments ==
curr_solution.medians[k])[0]
# exact median-update step
if inst.n/inst.p <= 10000:
# calculate distances and argsort
dist_in = squareform(pdist(inst.X[nodes_in]))
dist_sum = dist_in.sum(axis=0)
dist_argsort = np.argsort(dist_sum)
# approximate median-update step
else:
# calculate center of gravity
mean_pos = inst.X[nodes_in].sum(
axis=0)/nodes_in.shape[0]
# setup kdtree
tree = KDTree(inst.X[nodes_in], metric='euclidean')
ind = tree.query(
mean_pos.reshape(1, -1), k=nodes_in.shape[0],
return_distance=False)
dist_argsort = ind[0]
# calculate total demand assigned to median k
demand_in = sum([node.q for node in inst.nodes[nodes_in]])
# find new median with sufficient capacity
counter = 0
while demand_in > inst.nodes[
nodes_in[dist_argsort[counter]]].Q:
counter += 1
median = nodes_in[dist_argsort[counter]]
if median != curr_solution.medians[k]:
medians_changed[k] = True
new_solution.medians[k] = median
# update indices of assignments to new medians
for k in np.arange(inst.p):
if medians_changed[k]:
nodes_in = np.where(
new_solution.assignments ==
curr_solution.medians[k])[0]
new_solution.assignments[nodes_in] = \
new_solution.medians[k]
# if improvement has been found
new_ofv = get_ofv(inst, new_solution)
if new_ofv + 0.1 < curr_ofv:
curr_ofv = new_ofv
curr_solution.assignments = \
np.array(new_solution.assignments)
curr_solution.medians = np.array(new_solution.medians)
# reset number of closest medians for assignment
g = min(g_initial, inst.p)
else:
if heuristic_assignment:
# reset number of closest medians for assignment
g = min(g_initial, inst.p)
heuristic_assignment = False
# return solution if no_solver=True
if no_solver:
break
else:
break
# store best solution
if curr_ofv < initial_ofv:
initial_ofv = curr_ofv
initial_solution = SolutionClass(inst)
initial_solution.medians = np.array(curr_solution.medians)
initial_solution.assignments = np.array(curr_solution.assignments)
# log to console
print('{:*^60}'.format(' Global optimization phase '))
print('Final objective: ' + '{: .4f}'.format(initial_ofv))
print('Running time (total): ' +
'{:.2f}s'.format(time.time() - timeStart))
print('{:*^60}'.format(''))
# end if t_total is exceeded
if time.time() - timeStart > t_total or no_local == True:
return initial_solution.medians, initial_solution.assignments
# initialize best_solution
best_ofv = initial_ofv
best_solution = SolutionClass(inst)
best_solution.medians = np.array(initial_solution.medians)
best_solution.assignments = np.array(initial_solution.assignments)
# initialize number of free medians
w = min(max(int(np.ceil(n_target*inst.p/inst.n)), 2), inst.p)
# initialize help variables
iteration = 0
full_model_flag = False
tabu_list = np.array([], dtype=int)
# start local optimization phase
while time.time() - timeStart < t_total:
# increase iteration by 1
iteration += 1
# select subset of medians
subset_medians = get_subset_of_medians(inst, best_solution,
tabu_list, w)
subset_medians_pos = np.where(np.isin(
best_solution.medians, subset_medians))[0]
subset_nodes = np.array([node.idx for node in inst.nodes
if best_solution.assignments[node.idx]
in subset_medians])
# setup and solve
model = setup_mip_improvement(inst, best_solution, subset_medians,
subset_nodes, timeStart, t_total,
t_local, l, mip_gap_local, gurobi_seed)
model.optimize()
# if full model has been solved (break after evaluation of solution)
if w == inst.p:
full_model_flag = True
# if improvement has been found
if model.objVal + 0.1 < get_ofv(
inst, best_solution, subset_medians):
median_counter = 0
for var in model.getVars():
if var.X > 0.5:
var_name = var.VarName
indices_str = \
var_name[var_name.find('[')+1:var_name.find(']')]
i, j = indices_str.split(',')
if i == j:
median_pos = subset_medians_pos[median_counter]
best_solution.medians[median_pos] = int(j)
median_counter += 1
best_solution.assignments[int(i)] = int(j)
best_ofv = get_ofv(inst, best_solution)
# update tabu_list
tabu_list = np.setdiff1d(tabu_list, subset_medians)
# if no improvement has been found
else:
# update tabu_list
tabu_list = np.union1d(tabu_list, subset_medians)
if np.setdiff1d(best_solution.medians, tabu_list).shape[0] == 0:
tabu_list = np.array([], dtype=int)
w = min(inst.p, w*2)
# break if full model has been solved
if full_model_flag:
break
# log to console
print('{:*^60}'.format(' Local optimization phase '))
print('Final objective: ' + '{:.4f}'.format(best_ofv))
print('Running time (total): ' + '{:.2f}s'.format(time.time() - timeStart))
print('{:*^60}'.format(''))
return best_solution.medians, best_solution.assignments
def kmeans_pp(inst):
random_state = check_random_state(None)
x_squared_norms = row_norms(inst.X, squared=True)
centers, indices = kmeans_plusplus(inst.X, inst.p,
random_state=random_state,
x_squared_norms=x_squared_norms)
return indices
def setup_mip_assignment(g, t_total, mip_gap_global, gurobi_seed,
inst, solution, timeStart):
# setup kdtree
tree = KDTree(inst.X[solution.medians], metric='euclidean')
dist, ind = tree.query(inst.X, k=g)
# transform dist and ind to dicts (incl. keys)
ind = dict(zip(inst.I, solution.medians[ind]))
dist = dict(zip(inst.I, dist))
for node in inst.I:
dist[node] = dict(zip(ind[node], dist[node]))
# setup sets
I = | np.setdiff1d(inst.I, solution.medians) | numpy.setdiff1d |
"""
This file contains various utils for creating molecular embeddings and for
decoding synthetic trees.
"""
import numpy as np
import rdkit
from tqdm import tqdm
import torch
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from sklearn.neighbors import BallTree
from dgl.nn.pytorch.glob import AvgPooling
from dgl.nn.pytorch.glob import AvgPooling
from dgllife.model import load_pretrained
from dgllife.utils import mol_to_bigraph, PretrainAtomFeaturizer, PretrainBondFeaturizer
from tdc.chem_utils import MolConvert
from syn_net.models.mlp import MLP
from syn_net.utils.data_utils import SyntheticTree
# create a random seed for NumPy
np.random.seed(6)
# get a GIN pretrained model to use for creating molecular embeddings
model_type = 'gin_supervised_contextpred'
device = 'cpu'
gin_pretrained_model = load_pretrained(model_type).to(device) # used to learn embedding
gin_pretrained_model.eval()
# general functions
def can_react(state, rxns):
"""
Determines if two molecules can react using any of the input reactions.
Args:
state (np.ndarray): The current state in the synthetic tree.
rxns (list of Reaction objects): Contains available reaction templates.
Returns:
np.ndarray: The sum of the reaction mask tells us how many reactions are
viable for the two molecules.
np.ndarray: The reaction mask, which masks out reactions which are not
viable for the two molecules.
"""
mol1 = state.pop()
mol2 = state.pop()
reaction_mask = [int(rxn.run_reaction([mol1, mol2]) is not None) for rxn in rxns]
return sum(reaction_mask), reaction_mask
def get_action_mask(state, rxns):
"""
Determines which actions can apply to a given state in the synthetic tree
and returns a mask for which actions can apply.
Args:
state (np.ndarray): The current state in the synthetic tree.
rxns (list of Reaction objects): Contains available reaction templates.
Raises:
ValueError: There is an issue with the input state.
Returns:
np.ndarray: The action mask. Masks out unviable actions from the current
state using 0s, with 1s at the positions corresponding to viable
actions.
"""
# Action: (Add: 0, Expand: 1, Merge: 2, End: 3)
if len(state) == 0:
return np.array([1, 0, 0, 0])
elif len(state) == 1:
return np.array([1, 1, 0, 1])
elif len(state) == 2:
can_react_, _ = can_react(state, rxns)
if can_react_:
return np.array([0, 1, 1, 0])
else:
return np.array([0, 1, 0, 0])
else:
raise ValueError('Problem with state.')
def get_reaction_mask(smi, rxns):
"""
Determines which reaction templates can apply to the input molecule.
Args:
smi (str): The SMILES string corresponding to the molecule in question.
rxns (list of Reaction objects): Contains available reaction templates.
Raises:
ValueError: There is an issue with the reactants in the reaction.
Returns:
reaction_mask (list of ints, or None): The reaction template mask. Masks
out reaction templates which are not viable for the input molecule.
If there are no viable reaction templates identified, is simply None.
available_list (list of lists, or None): Contains available reactants if
at least one viable reaction template is identified. Else is simply
None.
"""
# Return all available reaction templates
# List of available building blocks if 2
# Exclude the case of len(available_list) == 0
reaction_mask = [int(rxn.is_reactant(smi)) for rxn in rxns]
if sum(reaction_mask) == 0:
return None, None
available_list = []
mol = rdkit.Chem.MolFromSmiles(smi)
for i, rxn in enumerate(rxns):
if reaction_mask[i] and rxn.num_reactant == 2:
if rxn.is_reactant_first(mol):
available_list.append(rxn.available_reactants[1])
elif rxn.is_reactant_second(mol):
available_list.append(rxn.available_reactants[0])
else:
raise ValueError('Check the reactants')
if len(available_list[-1]) == 0:
reaction_mask[i] = 0
else:
available_list.append([])
return reaction_mask, available_list
def graph_construction_and_featurization(smiles):
"""
Constructs graphs from SMILES and featurizes them.
Args:
smiles (list of str): Contains SMILES of molecules to embed.
Returns:
graphs (list of DGLGraph): List of graphs constructed and featurized.
success (list of bool): Indicators for whether the SMILES string can be
parsed by RDKit.
"""
graphs = []
success = []
for smi in tqdm(smiles):
try:
mol = Chem.MolFromSmiles(smi)
if mol is None:
success.append(False)
continue
g = mol_to_bigraph(mol, add_self_loop=True,
node_featurizer=PretrainAtomFeaturizer(),
edge_featurizer=PretrainBondFeaturizer(),
canonical_atom_order=False)
graphs.append(g)
success.append(True)
except:
success.append(False)
return graphs, success
def one_hot_encoder(dim, space):
"""
Create a one-hot encoded vector of length=`space`, with a non-zero element
at the index given by `dim`.
Args:
dim (int): Non-zero bit in one-hot vector.
space (int): Length of one-hot encoded vector.
Returns:
vec (np.ndarray): One-hot encoded vector.
"""
vec = np.zeros((1, space))
vec[0, dim] = 1
return vec
def mol_embedding(smi, device='cpu', readout=AvgPooling()):
"""
Constructs a graph embedding using the GIN network for an input SMILES.
Args:
smi (str): A SMILES string.
device (str): Indicates the device to run on ('cpu' or 'cuda:0'). Default 'cpu'.
Returns:
np.ndarray: Either a zeros array or the graph embedding.
"""
# get the embedding
if smi is None:
return np.zeros(300)
else:
mol = Chem.MolFromSmiles(smi)
# convert RDKit.Mol into featurized bi-directed DGLGraph
g = mol_to_bigraph(mol, add_self_loop=True,
node_featurizer=PretrainAtomFeaturizer(),
edge_featurizer=PretrainBondFeaturizer(),
canonical_atom_order=False)
bg = g.to(device)
nfeats = [bg.ndata.pop('atomic_number').to(device),
bg.ndata.pop('chirality_type').to(device)]
efeats = [bg.edata.pop('bond_type').to(device),
bg.edata.pop('bond_direction_type').to(device)]
with torch.no_grad():
node_repr = gin_pretrained_model(bg, nfeats, efeats)
return readout(bg, node_repr).detach().cpu().numpy().reshape(-1, ).tolist()
def get_mol_embedding(smi, model, device='cpu', readout=AvgPooling()):
"""
Computes the molecular graph embedding for the input SMILES.
Args:
smi (str): SMILES of molecule to embed.
model (dgllife.model, optional): Pre-trained NN model to use for
computing the embedding.
device (str, optional): Indicates the device to run on. Defaults to 'cpu'.
readout (dgl.nn.pytorch.glob, optional): Readout function to use for
computing the graph embedding. Defaults to readout.
Returns:
torch.Tensor: Learned embedding for the input molecule.
"""
mol = Chem.MolFromSmiles(smi)
g = mol_to_bigraph(mol, add_self_loop=True,
node_featurizer=PretrainAtomFeaturizer(),
edge_featurizer=PretrainBondFeaturizer(),
canonical_atom_order=False)
bg = g.to(device)
nfeats = [bg.ndata.pop('atomic_number').to(device),
bg.ndata.pop('chirality_type').to(device)]
efeats = [bg.edata.pop('bond_type').to(device),
bg.edata.pop('bond_direction_type').to(device)]
with torch.no_grad():
node_repr = model(bg, nfeats, efeats)
return readout(bg, node_repr).detach().cpu().numpy()[0]
def mol_fp(smi, _radius=2, _nBits=4096):
"""
Computes the Morgan fingerprint for the input SMILES.
Args:
smi (str): SMILES for molecule to compute fingerprint for.
_radius (int, optional): Fingerprint radius to use. Defaults to 2.
_nBits (int, optional): Length of fingerprint. Defaults to 1024.
Returns:
features (np.ndarray): For valid SMILES, this is the fingerprint.
Otherwise, if the input SMILES is bad, this will be a zero vector.
"""
if smi is None:
return np.zeros(_nBits)
else:
mol = Chem.MolFromSmiles(smi)
features_vec = AllChem.GetMorganFingerprintAsBitVect(mol, _radius, _nBits)
return np.array(features_vec)
def cosine_distance(v1, v2, eps=1e-15):
"""
Computes the cosine similarity between two vectors.
Args:
v1 (np.ndarray): First vector.
v2 (np.ndarray): Second vector.
eps (float, optional): Small value, for numerical stability. Defaults
to 1e-15.
Returns:
float: The cosine similarity.
"""
return (1 - np.dot(v1, v2)
/ (np.linalg.norm(v1, ord=2) * | np.linalg.norm(v2, ord=2) | numpy.linalg.norm |
import copy
import numpy as np
import george
from george.kernels import ExpSquaredKernel, Matern52Kernel, \
ExpKernel, RationalQuadraticKernel, Matern32Kernel
import scipy.optimize as op
#Assert statements to guarantee the linter doesn't complain
assert ExpSquaredKernel
assert Matern52Kernel
assert ExpKernel
assert Matern32Kernel
assert RationalQuadraticKernel
class _pmesh_emulator(object):
"""
An emulator for particle mesh simulations. The emulator is trained
on a set of input power spectra at given locations in cosmological
parameter space. The power spectra are evaluated over a set of
redshifts and wavenumbers (h/Mpc com.).
Args:
parameters (array-like): locations in parameter space
of the input power spectra.
redshifts (float array-like): list of redshifts.
Can be a single number.
k (array-like): wavenumbers of the input power spectra.
power_spectra (array-like): 2D array of power spectra
evaluated at each location in parameter space.
"""
def __init__(self, parameters, redshifts, k, power_spectra,
number_of_principle_components=6, kernel=None):
parameters = np.asarray(parameters)
redshifts = np.asarray(redshifts)
k = np.asarray(k)
power_spectra = np.asarray(power_spectra)
if parameters.ndim != 2:
raise Exception("Parameters must be 2D array.")
if power_spectra.ndim != 2:
raise Exception("Power spectra must be a 2D array of dimensions "+
"N_parameters x (N_k*N_z).")
if len(parameters) != len(power_spectra):
raise Exception("Power spectra must be a 2D array of dimensions "+
"N_parameters x (N_k*N_z).")
if len(redshifts)*len(k) != len(power_spectra[0]):
raise Exception("Power spectra must be a 2D array of dimensions "+
"N_parameters x (N_k*N_z).")
self.parameters = parameters
self.redshifts = redshifts
self.k = k
self.power_spectra = power_spectra
self.Npars = len(self.parameters[0])
self.NPC = number_of_principle_components
metric_guess = np.std(self.parameters, 0)
if kernel is None:
kernel = 1.*ExpSquaredKernel(metric=metric_guess, ndim=self.Npars)
self.kernel = kernel
def train(self):
"""Train the emulator.
Args:
None
Return:
None
"""
zs = self.redshifts
k = self.k
p = self.power_spectra
k2p = copy.deepcopy(p)
Nk = len(k)
Nz = len(zs)
#Multiply each P(k) by k^2, but note the shapes
#of the power spectra array we have to deal with
for i in range(Nz):
lo = i*Nk
hi = (i+1)*Nk
k2p[:, lo:hi] *= k**2
#Take the log -- this reduces the dynamic range
lnk2p = np.log(k2p)
#Remove the mean and make it unit variance in each k bin
lnk2p_mean = np.mean(lnk2p)
lnk2p_std = np.std(lnk2p, 0)
lnk2p = (lnk2p - lnk2p_mean)/lnk2p_std
#Save what we have now
self.lnk2p = lnk2p
self.lnk2p_mean = lnk2p_mean
self.lnk2p_std = lnk2p_std
#Do SVD to pull out principle components
u,s,v = np.linalg.svd(lnk2p, 0) #Do the PCA
s = np.diag(s)
N = len(s)
P = np.dot(v.T, s)/np.sqrt(N)
Npc = self.NPC #number of principle components
phis = P.T[:Npc]
ws = np.sqrt(N) * u.T[:Npc]
#Save the weights and PCs
self.ws = ws
self.phis = phis
#Create the GPs and save them
gplist = []
for i in range(Npc):
ws = self.ws[i, :]
kern = copy.deepcopy(self.kernel)
gp = george.GP(kernel=kern, fit_kernel=True, mean=np.mean(ws))
gp.compute(self.parameters)
gplist.append(gp)
continue
self.gplist = gplist
#Train the GPs
for i, gp in enumerate(self.gplist):
ws = self.ws[i, :]
def nll(p):
gp.set_parameter_vector(p)
ll = gp.log_likelihood(ws, quiet=True)
return -ll if np.isfinite(ll) else 1e25
def grad_nll(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(ws, quiet=True)
p0 = gp.get_parameter_vector()
result = op.minimize(nll, p0, jac=grad_nll)
gp.set_parameter_vector(result.x)
continue
self.trained=True
return
def predict(self, params):
"""Predict the power spectrum at a set of cosmological parameters.
Args:
params (float or array-like): parameters of the requested
power spectra
Returns:
(array-like): length (Nz x Nk) 1D array with the predicted
power spectra for the requested cosmology
"""
if not self.trained:
raise Exception("Need to train the emulator first.")
params = np.atleast_1d(params)
if params.ndim > 1:
raise Exception("'params' must be a single point in parameter "+
"space; a 1D array at most.")
if len(params) != self.Npars:
raise Exception("length of 'params' does not match training "+\
"parameters.")
#For higher dimensional trianing data, george requires a 2D array...
if len(params) > 1:
params = np.atleast_2d(params)
#Loop over d GPs and predict weights
wp = np.array([gp.predict(ws, params)[0] for ws, gp in\
zip(self.ws, self.gplist)])
#Multiply by the principle components to get predicted lnk2p
lnk2p_pred = wp[0]*self.phis[0]
for i in range(1, self.NPC):
lnk2p_pred += wp[i]*self.phis[i]
#Multiply on the stddev and add on the mean
lnk2p_pred = lnk2p_pred *self.lnk2p_std + self.lnk2p_mean
k2p_pred = np.exp(lnk2p_pred)
k = self.k
zs = self.redshifts
Nk = len(k)
Nz = len(zs)
P_pred = k2p_pred
#Multiply each P(k) by k^2, but note the shapes
#of the power spectra array we have to deal with
for i in range(Nz):
lo = i*Nk
hi = (i+1)*Nk
P_pred[lo:hi] /= k**2
return P_pred
class pmesh_emulator(object):
def __init__(self, excluded_indices=None, number_of_principle_components=6):
import os, inspect
data_path = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))+"/"
self.number_of_principle_components = number_of_principle_components
self.params = | np.loadtxt(data_path+"training_points.txt") | numpy.loadtxt |
# coding: utf-8
""" Simulate low level RFI
We are interested in the effects of RFI signals that cannot be detected in the visibility data. Therefore,
in our simulations we add attenuation selected to give SNR about 1 in the unaveraged time-frequency data.
This is about 180dB for a DTV station in Perth.
The scenario is:
* There is a TV station at a remote location (e.g. Perth), emitting a broadband signal (7MHz) of known power (50kW).
* The emission from the TV station arrives at LOW stations with phase delay and attenuation. Neither of these are
well known but they are probably static.
* The RFI enters LOW stations in a sidelobe of the station beam. Calculations by <NAME> indicate that this
provides attenuation of about 55 - 60dB for a source close to the horizon.
* The RFI enters each LOW station with fixed delay and zero fringe rate (assuming no e.g. ionospheric ducting or
reflection from a plane)
* In tracking a source on the sky, the signal from one station is delayed and fringe-rotated to stop the fringes for
one direction on the sky.
* The fringe rotation stops the fringe from a source at the phase tracking centre but phase rotates the RFI, which
now becomes time-variable.
* The correlation data are time- and frequency-averaged over a timescale appropriate for the station field of view.
This averaging decorrelates the RFI signal.
* We want to study the effects of this RFI on statistics of the visibilities, and on images made on source and
at the pole.
The simulate_low_rfi_visibility.py script averages the data producing baseline-dependent decorrelation.
The effect of averaging is not more than about -20dB but it does vary with baseline giving the radial
power spectrum we see. The 55-60 dB is part of the 180dB. To give a signal to noise on 1 or less, the
terrain propagation must be about 100dB.
The simulation is implemented in some functions in ARL, and the script simulate_low_rfi_visibility is available
in the SKA Github repository sim-lowlevel-rfi. Distributed processing is implemented via Dask. The outputs are
fits file and plots of the images: on signal channels and on pure noise channels, and for the source of
interest and the Southern Celestial Pole. The unaveraged MeasurementSets are also output, one per time chunk.
"""
import os
import pprint
import time
import matplotlib.pyplot as plt
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord, EarthLocation
import astropy.constants as const
from data_models.polarisation import PolarisationFrame
from processing_components.simulation.rfi import calculate_averaged_correlation, simulate_rfi_block
from processing_library.image.operations import create_image
from processing_library.util.array_functions import average_chunks
from processing_components.visibility.base import export_blockvisibility_to_ms
from workflows.arlexecute.imaging.imaging_arlexecute import invert_list_arlexecute_workflow, \
sum_invert_results_arlexecute
from wrappers.arlexecute.execution_support.arlexecute import arlexecute
from wrappers.arlexecute.execution_support.dask_init import get_dask_Client
from wrappers.arlexecute.image.operations import show_image, export_image_to_fits
from wrappers.arlexecute.simulation.configurations import create_named_configuration
from wrappers.arlexecute.visibility.base import create_blockvisibility
from wrappers.arlexecute.visibility.coalesce import convert_blockvisibility_to_visibility
def add_noise(bvis):
# The specified sensitivity (effective area / T_sys) is roughly 610 m ^ 2 / K in the range 160 - 200MHz
# sigma_vis = 2 k T_sys / (area * sqrt(tb)) = 2 k 512 / (610 * sqrt(tb)
sens = 610
bt = bvis.channel_bandwidth[0] * bvis.integration_time[0]
sigma = 2 * 1e26 * const.k_B.value / ((sens/512) * (numpy.sqrt(bt)))
sshape = bvis.vis.shape
bvis.data['vis'] += numpy.random.normal(0.0, sigma, sshape) + 1j * numpy.random.normal(0.0, sigma, sshape)
return bvis
def simulate_rfi_image(config, times, frequency, channel_bandwidth, phasecentre, polarisation_frame,
time_average, channel_average, attenuation, noise,
emitter_location, emitter_power, use_pole, waterfall, write_ms):
averaged_frequency = numpy.array(average_chunks(frequency, numpy.ones_like(frequency), channel_average))[0]
averaged_channel_bandwidth, wts = numpy.array(
average_chunks(channel_bandwidth, numpy.ones_like(frequency), channel_average))
averaged_channel_bandwidth *= wts
averaged_times = numpy.array(average_chunks(times, numpy.ones_like(times), time_average))[0]
s2r = numpy.pi / 43200.0
bvis = create_blockvisibility(config, s2r * times, frequency,
channel_bandwidth=channel_bandwidth,
phasecentre=phasecentre,
polarisation_frame=polarisation_frame,
zerow=False)
bvis = simulate_rfi_block(bvis, emitter_location=emitter_location,
emitter_power=emitter_power, attenuation=attenuation, use_pole=use_pole)
if noise:
bvis = add_noise(bvis)
if waterfall:
plot_waterfall(bvis)
if write_ms:
msname = "simulate_rfi_%.1f.ms" % (times[0])
export_blockvisibility_to_ms(msname, [bvis], "RFI")
averaged_bvis = create_blockvisibility(config, s2r * averaged_times, averaged_frequency,
channel_bandwidth=averaged_channel_bandwidth,
phasecentre=phasecentre,
polarisation_frame=polarisation_frame,
zerow=False)
npol = 1
for itime, _ in enumerate(averaged_times):
atime = itime * time_average
for ant2 in range(nants):
for ant1 in range(ant2, nants):
for ichan, _ in enumerate(averaged_frequency):
achan = ichan * channel_average
for pol in range(npol):
averaged_bvis.data['vis'][itime, ant2, ant1, ichan, pol] = \
calculate_averaged_correlation(
bvis.data['vis'][atime:(atime+time_average), ant2, ant1, achan:(achan+channel_average), pol],
time_average, channel_average)[0,0]
averaged_bvis.data['vis'][itime, ant1, ant2, ichan, pol] = \
numpy.conjugate(averaged_bvis.data['vis'][itime, ant2, ant1, ichan, pol])
achan += 1
atime += 1
del bvis
if noise:
averaged_bvis = add_noise(averaged_bvis)
return averaged_bvis
def plot_waterfall(bvis):
print(bvis.uvw.shape)
uvdist = numpy.hypot(bvis.uvw[0,:,:,0], bvis.uvw[0,:,:,1])
print(uvdist.shape)
uvdistmax = 0.0
max_ant1=0
max_ant2=0
for ant2 in range(bvis.nants):
for ant1 in range(ant2+1):
if uvdist[ant2, ant1] > uvdistmax:
uvdistmax = uvdist[ant2, ant1]
max_ant1 = ant1
max_ant2 = ant2
basename = os.path.basename(os.getcwd())
fig=plt.figure()
fig.suptitle('%s: Baseline [%d, %d], ha %.2f' % (basename, max_ant1, max_ant2, bvis.time[0]))
plt.subplot(121)
plt.gca().set_title("Amplitude")
plt.gca().imshow(numpy.abs(bvis.vis[: , max_ant1, max_ant2, :, 0]), origin='bottom')
plt.gca().set_xlabel('Channel')
plt.gca().set_ylabel('Time')
plt.subplot(122)
plt.gca().imshow(numpy.angle(bvis.vis[: , max_ant1, max_ant2, :, 0]), origin='bottom')
plt.gca().set_title("Phase")
plt.gca().set_xlabel('Channel')
plt.gca().set_ylabel('Time')
plt.savefig('waterfall_%d_%d_ha_%.2f.png' % (max_ant1, max_ant2, bvis.time[0]))
plt.show(block=False)
if __name__ == '__main__':
start_epoch = time.asctime()
print("\nSKA LOW RFI simulation using ARL\nStarted at %s\n" % start_epoch)
pp = pprint.PrettyPrinter()
import argparse
parser = argparse.ArgumentParser(description='Simulate DTV RFI')
parser.add_argument('--use_dask', type=str, default='True', help='Use Dask to distribute processing?')
parser.add_argument('--context', type=str, default='DTV', help='DTV')
parser.add_argument('--rmax', type=float, default=3e3, help='Maximum distance of station from centre (m)')
parser.add_argument('--seed', type=int, default=18051955, help='Random number seed')
parser.add_argument('--station_skip', type=int, default=33, help='Decimate stations by this factor')
parser.add_argument('--show', type=str, default='False', help='Show images?')
parser.add_argument('--attenuation', type=float, default=1.0, help='Attenuation factor')
parser.add_argument('--noise', type=str, default='False', help='Add noise?')
parser.add_argument('--ngroup_visibility', type=int, default=8, help='Process in visibility groups this large')
parser.add_argument('--do_psf', type=str, default="False", help='Make the PSF?')
parser.add_argument('--use_agg', type=str, default="False", help='Use Agg matplotlib backend?')
parser.add_argument('--write_fits', type=str, default="True", help='Write fits files?')
parser.add_argument('--declination', type=float, default=-45.0, help='Declination (degrees)')
parser.add_argument('--npixel', type=int, default=1025, help='Number of pixel per axis in image')
parser.add_argument('--nchannels_per_chunk', type=int, default=1024, help='Number of channels in a chunk')
parser.add_argument('--channel_average', type=int, default=16, help="Number of channels in a chunk to average")
parser.add_argument('--frequency_range', type=float, nargs=2, default=[170.5e6, 184.5e6],
help="Frequency range (Hz)")
parser.add_argument('--nintegrations_per_chunk', type=int, default=64,
help='Number of integrations in a time chunk')
parser.add_argument('--time_average', type=int, default=16, help="Number of integrations in a chunk to average")
parser.add_argument('--integration_time', type=float, default=0.25, help="Integration time (s)")
parser.add_argument('--time_range', type=float, nargs=2, default=[-6.0, 6.0], help="Hourangle range (hours)")
parser.add_argument('--emitter_longitude', type=float, default=115.8605, help="Emitter longitude")
parser.add_argument('--emitter_latitude', type=float, default=-31.9505, help="Emitter latitude")
parser.add_argument('--emitter_power', type=float, default=5e4, help="Emitter power (W)]")
parser.add_argument('--use_pole', type=str, default="False", help='Set RFI source at pole?')
parser.add_argument('--waterfall', type=str, default="False", help='Plot waterfalls?')
parser.add_argument('--write_ms', type=str, default="False", help='Write measurmentsets?')
args = parser.parse_args()
print("Starting LOW low level RFI simulation")
pp.pprint(vars(args))
write_ms = args.write_ms == "True"
numpy.random.seed(args.seed)
if args.use_dask == "True":
client = get_dask_Client(threads_per_worker=1,
processes=True,
memory_limit=32 * 1024 * 1024 * 1024,
n_workers=8)
arlexecute.set_client(client=client)
print(arlexecute.client)
else:
print("Running in serial mode")
arlexecute.set_client(use_dask=False)
emitter_location = EarthLocation(lon=args.emitter_longitude, lat=args.emitter_latitude, height=0.0)
emitter_power = args.emitter_power
print("Emitter is %.1f kW at location %s" % (1e-3 * emitter_power, emitter_location.geodetic))
if args.waterfall == "True":
waterfall = True
else:
waterfall = False
if args.noise == "True":
noise = True
print("Adding noise to simulated data")
else:
noise = False
if args.use_pole == "True":
print("Placing emitter at the southern celestial pole")
use_pole= True
else:
use_pole = False
rmax = args.rmax
low = create_named_configuration('LOWR3', rmax=rmax)
nants = len(low.names)
print("There are %d stations" % nants)
station_skip = args.station_skip
low.data = low.data[::station_skip]
nants = len(low.names)
print("There are %d stations after decimation" % nants)
npixel = args.npixel
declination = args.declination
phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=declination * u.deg, frame='icrs', equinox='J2000')
pole = SkyCoord(ra=+0.0 * u.deg, dec=-90.0 * u.deg, frame='icrs', equinox='J2000')
# Number of integrations in a time chunk
nintegrations_per_chunk = args.nintegrations_per_chunk
# Integration time within a chunk
integration_time = args.integration_time
# Number of integrations to average
time_average = args.time_average
# Integration time after averaging
average_integration_time = time_average * integration_time
print("Each chunk has %d integrations of duration %.2f (s)" %
(args.nintegrations_per_chunk, integration_time))
frequency = numpy.linspace(args.frequency_range[0], args.frequency_range[1], args.nchannels_per_chunk)
channel_bandwidth = (frequency[-1] - frequency[0]) / (args.nchannels_per_chunk - 1)
channel_average = args.channel_average
print("Each chunk has %d frequency channels of width %.3f (MHz)" %
(args.nchannels_per_chunk, channel_bandwidth * 1e-6))
channel_bandwidth = numpy.ones_like(frequency) * channel_bandwidth
start_times = numpy.arange(args.time_range[0] * 3600.0, args.time_range[1] * 3600.0,
nintegrations_per_chunk * integration_time)
print("Start times", start_times)
results = list()
pole_results = list()
chunk_start_times = [start_times[i:i + args.ngroup_visibility]
for i in range(0, len(start_times), args.ngroup_visibility)]
print("Chunk start times", [c[0] for c in chunk_start_times])
dopsf = args.do_psf == "True"
# Find the average frequencies
averaged_frequency = numpy.array(average_chunks(frequency, | numpy.ones_like(frequency) | numpy.ones_like |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 01 10:52:23 2018
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import PolynomialOrderStar as POS
#Plot #1: Trapezium Rule Order Star
def p(z):
return (1.0 + 0.5 * z) / (1.0 - 0.5 * z)
POS.polyOrderStar(p, -3, 3, -2, 2)
#Plot #2: Index Example
def p(z):
return 1.0 + z + z ** 2 /2.0 + z ** 3 / 6.0 + z ** 4 / 24.0
#Plot #3
x = np.linspace(0, np.pi, 1000)
c1 = 1 + np.exp(x * 1j) / 2.0
c2 = np.sqrt(2) / 2.0 + (np.sqrt(2) * 1j) / 2.0 + np.exp(x * 1j) / 2.0
c3 = 1j + np.exp(x * 1j) / 2.0
c4 = - np.sqrt(2) / 2.0 + (np.sqrt(2) * 1j) / 2.0 + np.exp(x * 1j) / 2.0
c5 = -1 + np.exp(x * 1j) / 2.0
#Initialize a Figure
fig = plt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
#plot first chain
ax.plot(np.real(c1), np.imag(c1), color = 'C2')
ax.plot(np.real(c1), - np.imag(c1), color = 'C2')
ax.fill_between(np.real(c1), np.imag(c1), -np.imag(c1), color = 'C2',
alpha = 0.1)
ax.plot(np.real(c2), np.imag(c2), color = 'C0')
ax.plot(np.real(c2), 2 * np.imag(c2[0]) - np.imag(c2), color = 'C0')
ax.fill_between(np.real(c2), np.imag(c2), 2 * np.imag(c2[0]) -
np.imag(c2), color = 'C0', alpha = 0.1)
ax.plot(np.real(c3), np.imag(c3), color = 'C0')
ax.plot(np.real(c3), 2 * np.imag(c3[0]) - np.imag(c3), color = 'C0')
ax.fill_between(np.real(c3), np.imag(c3), 2 * np.imag(c3[0]) -
np.imag(c3), color = 'C0', alpha = 0.1)
ax.plot(np.real(c4), np.imag(c4), color = 'C0')
ax.plot(np.real(c4), 2 * np.imag(c4[0]) - np.imag(c4), color = 'C0')
ax.fill_between(np.real(c4), np.imag(c4), 2 * np.imag(c4[0]) -
np.imag(c4), color = 'C0', alpha = 0.1)
ax.plot(np.real(c5), np.imag(c5), color = 'C3')
ax.plot(np.real(c5), - np.imag(c5), color = 'C3')
ax.fill_between(np.real(c5), np.imag(c5), -np.imag(c5), color = 'C3',
alpha = 0.1)
#plot second chain
ax.plot(np.real(c1), np.imag(c1), color = 'C2')
ax.plot(np.real(c1), - np.imag(c1), color = 'C2')
ax.fill_between(np.real(c1), np.imag(c1), -np.imag(c1), color = 'C2',
alpha = 0.1)
ax.plot(np.real(c2), - np.imag(c2), color = 'C1')
ax.plot(np.real(c2), - (2 * np.imag(c2[0]) - np.imag(c2)), color = 'C1')
ax.fill_between(np.real(c2), - np.imag(c2), - (2 * np.imag(c2[0])
- np.imag(c2)), color = 'C1', alpha = 0.1)
ax.plot(np.real(c3), - np.imag(c3), color = 'C1')
ax.plot(np.real(c3), - (2 * np.imag(c3[0]) - np.imag(c3)), color = 'C1')
ax.fill_between(np.real(c3), - np.imag(c3), - (2 * np.imag(c3[0]) -
np.imag(c3)), color = 'C1', alpha = 0.1)
ax.plot(np.real(c4), - np.imag(c4), color = 'C1')
ax.plot(np.real(c4), - (2 * np.imag(c4[0]) - np.imag(c4)), color = 'C1')
ax.fill_between(np.real(c4), - np.imag(c4), - (2 * np.imag(c4[0]) -
np.imag(c4)), color = 'C1', alpha = 0.1)
ax.plot(np.real(c5), np.imag(c5), color = 'C3')
ax.plot(np.real(c5), - np.imag(c5), color = 'C3')
ax.fill_between(np.real(c5), np.imag(c5), -np.imag(c5), color = 'C3',
alpha = 0.1)
#setup legend
omega_1 = mpatches.Rectangle((0, 0), 1, 1, fc="C0",alpha=0.1)
omega_2 = mpatches.Rectangle((0, 0), 1, 1, fc="C1",alpha=0.1)
omega_3 = mpatches.Rectangle((0, 0), 1, 1, fc="C2",alpha=0.2)
omega_4 = mpatches.Rectangle((0, 0), 1, 1, fc="C3",alpha=0.2)
handles = [omega_3, omega_4, omega_1, omega_2]
labels = [r'$\Omega$', r'$\Omega_*$', r'$\Omega_i$', r'$\widetilde{\Omega}_i$']
ax.legend(handles, labels, fontsize = 14)
#setup plot window
ax = plt.gca()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
# moving left spine to the right to position x == 0:
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
ax.grid(b = 'on')
plt.xlim(-1.6, 2.6)
plt.ylim(-1.6, 1.6)
plt.axes().set_aspect('equal')
#Plot #4
def func1(z):
return (2.0 + np.sqrt(1 + 2 * z)) / (3.0 - 2.0 * z)
def func2(z):
return (2.0 - np.sqrt(1 + 2 * z)) / (3.0 - 2.0 * z)
#setup grid for function evaluations
A = | np.linspace(-5, 5, 1000) | numpy.linspace |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
from PIL import Image
import numpy as np
from scipy.misc import comb
import cv2
from pycocotools import mask
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default="cityscapes_instance_only", type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default="output", type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted", default="input", type=str)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
# for Cityscapes
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000)
def getBezierPoint(polyPoint):
xvals, yvals = bezier_curve(polyPoint, nTimes=5*len(polyPoint))
point = []
for x, y in zip(xvals, yvals):
point.append([x, y])
return point[::-1]
def getPointByPoly2d(poly2d):
ann = []
curve = []
for p in poly2d:
if p[2] == "C":
curve.append([p[0], p[1]])
else:
if len(curve) > 0:
ann.extend(getBezierPoint(curve))
curve = []
ann.append([p[0], p[1]])
if poly2d[-1] == poly2d[0]:
pass
else:
return []
# np_ann = np.array(ann)
# np_ann[:, 0] -= 5
# np_ann = np_ann.tolist()
# repair = np.array(ann[::-1])
# repair[:, 0] += 5
# repair = repair.tolist()
# np_ann.extend(repair)
# ann.extend(np_ann)
return ann
def getBoxByObj(obj):
if obj.has_key("box2d"):
box2d = obj["box2d"]
return [box2d["x1"], box2d["y1"],
box2d["x2"] - box2d["x1"],
box2d["y2"] - box2d["y1"]]
else:
return []
def getPointByObj(obj):
ann = []
box2d = []
if obj.has_key("box2d"):
ann.append([[obj["box2d"]["x1"], obj["box2d"]["y1"]], [obj["box2d"]["x2"], obj["box2d"]["y2"]]])
return ann
elif obj.has_key("poly2d"):
area = getPointByPoly2d(obj["poly2d"])
if len(area) > 0:
ann.append(area)
return ann
elif obj.has_key("segments2d"):
for poly in obj["segments2d"]:
ann.append(getPointByPoly2d(poly))
return ann
def getAreaByObj(polygon_points_array, h, w, category_id):
line_type = 1 # cv2.CV_AA
color = category_id
sum = 0
for poly_points in polygon_points_array:
points = poly_points
seg = []
for j in range(len(points)):
coordx = points[j][0]
coordy = points[j][1]
point = []
point.append(int(coordx))
point.append(int(coordy))
seg.append(point)
labelMask = np.zeros((h, w))
cv2.fillPoly(labelMask, np.array([seg], dtype=np.int32), color, line_type)
mask_new, contours, hierarchy = cv2.findContours((labelMask).astype(np.uint8), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
##----------------------------------------------
polygons = []
# In practice, only one element.
for contour in contours:
contour = contour.flatten().tolist()
polygons.append(contour)
labelMask[:, :] = labelMask == color
labelMask = | np.expand_dims(labelMask, axis=2) | numpy.expand_dims |
import numpy as np
import scipy.sparse as sparse
from pyamg.gallery import poisson, load_example
from pyamg.graph import maximal_independent_set, vertex_coloring,\
bellman_ford, lloyd_cluster, connected_components,\
bellman_ford_reference
from pyamg import amg_core
from numpy.testing import TestCase, assert_equal
def canonical_graph(G):
# convert to expected format
# - remove diagonal entries
# - all nonzero values = 1
G = sparse.coo_matrix(G)
mask = G.row != G.col
G.row = G.row[mask]
G.col = G.col[mask]
G.data = G.data[mask]
G.data[:] = 1
return G
def assert_is_mis(G, mis):
G = canonical_graph(G)
# no MIS vertices joined by an edge
if G.nnz > 0:
assert((mis[G.row] + mis[G.col]).max() <= 1)
# all non-set vertices have set neighbor
assert((mis + G*mis).min() == 1)
def assert_is_vertex_coloring(G, c):
G = canonical_graph(G)
# no colors joined by an edge
assert((c[G.row] != c[G.col]).all())
# all colors up to K occur at least once
assert((np.bincount(c) > 0).all())
class TestGraph(TestCase):
def setUp(self):
cases = []
np.random.seed(651978631)
for i in range(5):
A = np.random.rand(8, 8) > 0.5
cases.append(canonical_graph(A + A.T).astype(float))
cases.append(np.zeros((1, 1)))
cases.append(np.zeros((2, 2)))
cases.append(np.zeros((8, 8)))
cases.append(np.ones((2, 2)) - np.eye(2))
cases.append(poisson((5,)))
cases.append(poisson((5, 5)))
cases.append(poisson((11, 11)))
cases.append(poisson((5, 5, 5)))
for name in ['airfoil', 'bar', 'knot']:
cases.append(load_example(name)['A'])
cases = [canonical_graph(G) for G in cases]
self.cases = cases
def test_maximal_independent_set(self):
# test that method works with diagonal entries
assert_equal(maximal_independent_set(np.eye(2)), [1, 1])
for algo in ['serial', 'parallel']:
for G in self.cases:
mis = maximal_independent_set(G, algo=algo)
assert_is_mis(G, mis)
for G in self.cases:
for k in [1, 2, 3, 4]:
mis = maximal_independent_set(G, k=k)
if k > 1:
G = (G + np.eye(G.shape[0]))**k
G = canonical_graph(G)
assert_is_mis(G, mis)
def test_vertex_coloring(self):
# test that method works with diagonal entries
assert_equal(vertex_coloring( | np.eye(1) | numpy.eye |
from collections.abc import Iterable
import os
from random import randint
from astropy.io import fits
import cv2
from lacosmic import lacosmic
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
from scipy import interpolate
from skimage import filters
from skimage.morphology import disk
from xspectre.settings.settings import settings_default
from xspectre.utils import errors
class BaseImage:
"""
BaseImage class creates an object with all of the general image manipulation techniques
"""
def __init__(self, bad_pixel_map=None):
self.image = None
self.image_hdu = 0
self.header = fits.Header()
self.bad_pixel_map = bad_pixel_map
def generate_bad_pixel_map(self):
if self.bad_pixel_map is None:
self.bad_pixel_map = np.zeros(self.image.shape)
else:
self.bad_pixel_map = image_file_or_array_to_array(self.bad_pixel_map)
def show(self):
"""
Displays the current image using pyplot
"""
plt.figure(randint(0, 256))
plt.imshow(self.image,)
plt.xticks([]), plt.yticks([])
plt.show()
def save(self, filename, hdu=None):
"""
saves image and header to fits file
Parameters
----------
filename : str
file path to save to
hdu : int
image hdu number
Returns
-------
"""
# TODO: fix header saving issue
file_dir = os.path.dirname(filename)
if hdu is None:
hdu = self.image_hdu
if not os.path.exists(file_dir):
os.makedirs(file_dir)
print(self.header)
# hdu_primary = fits.PrimaryHDU(header=self.header)
hdu_primary = fits.PrimaryHDU()
if hdu == 0:
hdu_primary.data = self.image
hdu_list = fits.HDUList([hdu_primary])
else:
hdu_image = fits.ImageHDU(self.image)
hdu_list = fits.HDUList([hdu_primary, hdu_image])
hdu_list.writeto(filename, overwrite=True)
def histogram(
self, bin_width='auto', histogram_range=(), x_range=(), y_range=(), show_plot=False, save_plot=False,
save_filepath="histogram.txt"
):
"""
creates intensity histogram of the image
Parameters
----------
bin_width : int or sequence or str, optional
width of histogram bins
histogram_range :
x_range :
y_range :
show_plot :
save_plot :
save_filepath :
Returns
-------
"""
if histogram_range == ():
histogram_range = (self.image.min(), self.image.max())
y_max, x_max = self.image.shape
if x_range == ():
x_range = (0, x_max-1)
if y_range == ():
y_range = (0, y_max-1)
a = self.image[y_range[0]:y_range[1]][x_range[0]:x_range[1]]
a = a.flatten()
histogram = np.histogram(a, bin_width, histogram_range)
if show_plot or save_plot:
plt.figure(randint(0, 256))
plt.hist(a, bin_width, histogram_range)
plt.title('Image Intensity Histogram')
plt.ylabel('Intensity')
if show_plot:
plt.show()
if save_plot:
plt.savefig(save_filepath)
return histogram
def masked_interpolation(self, method='cubic'):
"""
Interpolates over masked locations
Parameters
----------
method : str
Returns
-------
"""
bad_pixel_mask = self.bad_pixel_map > 0
x = np.arange(0, self.image.shape[1])
y = np.arange(0, self.image.shape[0])
self.image[bad_pixel_mask] = np.nan
self.image = np.ma.masked_invalid(self.image)
xx, yy = np.meshgrid(x, y)
x1 = xx[~self.image.mask]
y1 = yy[~self.image.mask]
newarr = self.image[~self.image.mask]
assert isinstance(x1, np.ndarray)
assert isinstance(y1, np.ndarray)
self.image = interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method=method)
def median_filter(self, disk_radius=2):
"""
Replaces pixel value with median of neighborhood
Parameters
----------
disk_radius : int
Returns
-------
alters self.image
"""
# TODO: figure out why this isn't working, consider using astropy.convolution instead
self.image = filters.median(self.image, selem=disk(disk_radius))
def mean_filter(self, disk_radius=2):
"""
Replaces pixel value with mean of neighborhood
Parameters
----------
disk_radius : int
Returns
-------
alters self.image
"""
# TODO: figure out why this isn't working, consider using astropy.convolution instead
self.image = filters.rank.mean(self.image, selem=disk(disk_radius))
def cosmic_filter(
self, contrast, cr_threshold, neighbor_threshold, error=None, mask=None, background=None,
effective_gain=None, readnoise=None, maxiter=4, border_mode=u'mirror'
):
"""
Cleans cosmic rays from image using lacosmic algorithm
Parameters
----------
contrast :
cr_threshold :
neighbor_threshold :
error :
mask :
background :
effective_gain :
readnoise :
maxiter :
border_mode :
Returns
-------
"""
# TODO: figure out which parameters work best for this. Ask JWST folks, Dale or Bernie, what they used for this
self.image = lacosmic(
self.image, contrast=contrast, cr_threshold=cr_threshold, neighbor_threshold=neighbor_threshold,
error=error, mask=mask, background=background, effective_gain=effective_gain, readnoise=readnoise,
maxiter=maxiter, border_mode=border_mode
)
def slice(self, x_range=(), y_range=()):
"""
Selects part of the image
Parameters
----------
x_range : Iterable
min and max for slice
y_range : Iterable
min and max for slice
Returns
-------
"""
# TODO: determine if we actually want it to replace the image, or return another array
y_max, x_max = self.image.shape
if x_range == ():
x_range = (0, x_max)
if y_range == ():
y_range = (0, y_max)
self.image = self.image[y_range[0]:y_range[1], x_range[0]:x_range[1]]
def border(self, x_left=0, x_right=0, y_top=0, y_bottom=0, border_value=0):
"""
adds a border around image
Parameters
----------
x_left : int
adds border before 0th index along axis=1
x_right : int
adds border after maximum index along axis=1
y_top : int
adds border after maximum index along axis=0
y_bottom : int
adds border before 0th index along axis=0
border_value : int or float
pixel value of added border
Returns
-------
"""
y_max, x_max = self.image.shape
border_array = np.zeros((y_top+y_max+y_bottom, x_left+x_max+x_right)) + border_value
border_array[y_bottom:y_bottom+y_max, x_left:x_left+x_max] = self.image
self.image = border_array
def resize(self, width_scale_factor=1, height_scale_factor=1): # , resample=0):
if width_scale_factor < 0:
self.image = np.flip(self.image, 1)
width_scale_factor = | np.abs(width_scale_factor) | numpy.abs |
from make_tree_from_parent_vec import make_tree_from_parent_vec
from collections import OrderedDict
from auxilliary import Aux
import numpy as np
import cell
from file_io import *
from get_parent_from_neuron import get_parent_from_neuron
import scipy.io as sio
from io import StringIO
import csv
import math
# input_dict = clean_creat_aux_3_mat(load_creat_aux_3_mat('/home/devloop0/inputCreatAux3.mat'))
# A = input_dict['A']
# Parent = input_dict['Parent']
# cmVec = input_dict['cmVec']
# NSeg = input_dict['NSeg']
# N = input_dict['N']
# nrn = create_neuron(input_dict)
# FN_TopoList = './64TL.csv'
fmatrixFN = './Fmatrix.csv'
def create_auxilliary_data_3(A, N, NSeg, Parent, cmVec,parent_seg,bool_model,seg_start,n_segs,seg_to_comp,data_dir):
bool_model = np.array(bool_model)
FTYPESTR = 'float'
FatherBase = [0 for i in range(N - 1)]
for i in range(N - 1, 0, -1):#iterating all over the matrix from the end
if A[i - 1, i] !=0: # if i-1 element's parents is i then k is i+1
k = i
else:# find where
k = np.where(A[i:,i - 1] != 0)[0] + i + 1
k = k[0]
FatherBase[i - 1] = k
FatherBase = np.array(FatherBase)
d = np.diag(A).T
e, f = [0 for i in range(N)], [0 for i in range(N)]
for i in range(1, N-1):
f[i-1] = A[i-1, FatherBase[i-1]-1]
e[i] = A[FatherBase[i-1]-1, i-1]
f[-1] = 0
f[-2] = A[-2,-1]
e[-1] = A[-1,-2]
f = np.array(f)
e = np.array(e)
[e,f] = readEFDirectly(fmatrixFN)
Ksx = np.array(parent_seg)
Ks = [0]
for i in range(2, Ksx.size + 1):
print(str(i) + ',' + str(N + 2 - i - 1))
Ks.append(N + 1 - Ksx[N + 2 - i - 1])
Ks = np.array(Ks)
aux = Aux()
aux.Ks = Ks.astype(np.int)
FatherBase = Ks[1:]
Father = np.append(FatherBase, [FatherBase.size + 2, FatherBase.size + 2])
FIdxsX = []
for i in range(1, int(np.ceil(np.log2(N)) + 3 + 1)):
CurF = np.array(list(range(1, Father.size + 1)))
for j in range(1, 2 ** (i - 1) + 1):
CurF = Father[np.subtract(CurF, 1)].astype(np.int)
FIdxsX.append(CurF)
FIdxsX = np.array(FIdxsX)
ind = np.where(np.all(FIdxsX == FIdxsX[-1], 1))[0][0] + 1
if ind != 0:
FIdxsX = FIdxsX[:ind - 1,:]
LognDepth = FIdxsX.shape[0]
FIdxsX = FIdxsX[:,:N]
aux.FIdxsX = FIdxsX
aux.LognDepth = LognDepth
Nx = N
SonNoVec, ParentUsed = np.zeros(Nx), np.zeros(Nx)
for seg in range(1, Nx + 1):
if seg == 1:
parentIndex = 1
else:
parentIndex = Nx + 1 - aux.Ks[Nx + 2 - seg - 1]
ParentUsed[parentIndex - 1] = ParentUsed[parentIndex - 1] + 1
SonNoVec[seg - 1] = ParentUsed[parentIndex - 1]
SonNoVec[0] = 0
aux.SonNoVec = SonNoVec
if np.max(SonNoVec) > 2:
raise ValueError('error np.max(SonNoVec) > 2')
tree_dict = make_tree_from_parent_vec(aux, Ks, N)
Depth = tree_dict['Depth']
Level = tree_dict['Level']
FLevel = tree_dict['FLevel']
SegStartI = tree_dict['SegStartI']
SegEndI = tree_dict['SegEndI']
Fathers = tree_dict['Fathers']
aux.Depth = Depth
aux.Level = Level
aux.FLevel = FLevel
aux.SegStartI = SegStartI
aux.SegEndI = SegEndI
aux.Fathers = Fathers
RelVec = tree_dict['RelVec']
RelStarts = tree_dict['RelStarts']
RelEnds = tree_dict['RelEnds']
aux.RelVec = np.add(RelVec,1)
aux.RelStarts = np.add(RelStarts,1)
aux.RelEnds = np.add(RelEnds,1)
LastLevelsI = np.where(Level == np.max(Level))[0][0] + 1
EndLastLevelsI = SegEndI[LastLevelsI - 1]
KsB = Ks
KsB = np.append(KsB, [EndLastLevelsI])
aux.KsB = KsB
FN = data_dir + '/BasicConst' + str(N) + 'Seg.mat'
FNP = data_dir + '/BasicConst' + str(N) + 'SegP.mat'
FNM = data_dir + '/ParamsMat' + str(N) + '.mat'
FN_csv = data_dir + '/BasicConst' + 'Seg.csv'
FNP_csv = data_dir + '/BasicConst' + 'SegP.csv'
FN_uint16 = data_dir + '/BasicConst' + str(N) + 'Seg_uint16.mat'
FN_double = data_dir + '/BasicConst' + str(N) + 'Seg_double.mat'
FNP_uint16 = data_dir + '/BasicConst' + str(N) + 'SegP_uint16.mat'
FNP_double = data_dir + '/BasicConst' + str(N) + 'SegP_double.mat'
aux.d = d
aux.e = e
aux.f = f
aux.Cms = cmVec
FN_dict = OrderedDict()
FN_dict['N'] = np.array([np.uint16(N)])
FN_dict['e'] = np.double(e)
FN_dict['f'] = np.double(f)
FN_dict['Ks'] = np.uint16(Ks)
FN_dict['auxCms'] = np.double(aux.Cms);
FN_dict['nrnHasHH'] = np.uint16(bool_model)
FN_data = ''
for k in FN_dict:
s = StringIO()
np.savetxt(s, FN_dict[k].flatten(), fmt='%.9f', newline=',')
st = s.getvalue()
FN_data += st + '\n'
with open(FN_csv, 'w') as fn_f:
fn_f.write(FN_data)
sio.savemat(FN, FN_dict)
FN_dict_uint16 = {}
FN_dict_uint16['N'] = np.uint16(N)
FN_dict_uint16['Ks'] = np.uint16(Ks)
FN_dict_uint16['nrnHasHH'] = np.uint16(bool_model)
sio.savemat(FN_uint16, FN_dict_uint16)
FN_dict_double = {}
FN_dict_double['e'] = np.double(e)
FN_dict_double['f'] = np.double(f)
FN_dict_double['auxCms'] = np.double(aux.Cms)
sio.savemat(FN_double, FN_dict_double)
CompByLevel32 = np.zeros((0, 32))
CompByFLevel32 = np.zeros((0, 32))
nFComps, nComps = np.array([]), np.array([])
LRelated, FLRelated = [], []
nRoundForThisLevel = np.array([])
for CurLevel in range(Depth + 1):
CurComps = np.add(np.where(Level == CurLevel)[0], 1)
nComps = np.append(nComps, [CurComps.size])
Longer = np.multiply(np.ones(int(np.ceil(CurComps.size / 32.0) * 32)), CurComps[-1])
Longer[:CurComps.size] = CurComps
StuffToAdd = Longer.reshape((int(Longer.size / 32), 32))
StartPoint = CompByLevel32.shape[0] + 1
CompByLevel32 = np.vstack((CompByLevel32, StuffToAdd))
EndPoint = CompByLevel32.shape[0]
LRelated.append(list(range(StartPoint, EndPoint + 1)))
nRoundForThisLevel = np.append(nRoundForThisLevel, [CompByLevel32.shape[0]])
if CurLevel < Depth:
CurComps = np.add(np.where(FLevel == CurLevel + 1)[0], 1)
nFComps = np.append(nFComps, [CurComps.size])
Longer = np.multiply(np.ones(int(np.ceil(CurComps.size / 32.0) * 32)), CurComps[-1])
Longer[:CurComps.size] = CurComps
StuffToAdd = Longer.reshape((int(Longer.size / 32), 32))
StartPoint = CompByFLevel32.shape[0] + 1
CompByFLevel32 = np.vstack((CompByFLevel32, StuffToAdd))
EndPoint = CompByFLevel32.shape[0]
FLRelated.append(list(range(StartPoint, EndPoint + 1)))
LRelated = np.array(LRelated)
FLRelated = np.array(FLRelated).astype(object)
LRelStarts, LRelEnds, LRelCN, LRelVec = cell.cell_2_vec(LRelated)
LRelStarts = np.add(LRelStarts, 1)
LRelEnds = np.add(LRelEnds, 1)
if Depth == 0:
FLRelStarts, FLRelEnds, FLRelCN, FLRelVec = [], [], [], []
else:
FLRelStarts, FLRelEnds, FLRelCN, FLRelVec = cell.cell_2_vec(FLRelated)
FLRelStarts = | np.add(FLRelStarts, 1) | numpy.add |
"""
Function that partitions the domain
"""
import numpy as np
import math
from find_parametric_intersect import find_parametric_intersect
def partition_domain(act_frac_sys, frac_order_vec, tolerance_intersect, number_partitions_x, number_partitions_y):
"""
:param frac_order_vec:
:param act_frac_sys:
:param tolerance_intersect:
:param number_partitions_x:
:param number_partitions_y:
:return:
"""
# Define lowest and highest possible x and y values.
xmin = act_frac_sys[:, [0,2]].min(axis=None)
xmax = act_frac_sys[:, [0,2]].max(axis=None)
ymin = act_frac_sys[:, [1,3]].min(axis=None)
ymax = act_frac_sys[:, [1,3]].max(axis=None)
interval_x = (xmax - xmin) / number_partitions_x
interval_y = (ymax - ymin) / number_partitions_y
# Assume the maximum values found above define the domain.
# Then, to partition the domain:
partitions_x = np.zeros((number_partitions_x - 1, 4))
for i in range(1, number_partitions_x):
x_part = xmin + interval_x * i
partitions_x[i - 1, :] = np.array([x_part, ymin, x_part, ymax])
partitions_y = np.zeros((number_partitions_y - 1, 4))
for j in range(1, number_partitions_y):
y_part = ymin + interval_y * j
partitions_y[j - 1, :] = np.array([xmin, y_part, xmax, y_part])
# This array will contain information about the partitioning lines which which can be used to find intersections.
# [x0, y0, x1, y1]
partitions = np.vstack((partitions_x, partitions_y))
# We use this little trick to make sure the subdomains are determined correctly.
act_frac_sys[np.where(act_frac_sys == xmax)] = xmax - 0.01
act_frac_sys[np.where(act_frac_sys == ymax)] = ymax - 0.01
# Variables used to store information in an array later in the program.
old_index = 0
new_index = 0
subdomain_sys = np.transpose([np.floor((act_frac_sys[:, 0] - xmin) / interval_x),
np.floor((act_frac_sys[:, 1] - ymin) / interval_y),
np.floor((act_frac_sys[:, 2] - xmin) / interval_x),
np.floor((act_frac_sys[:, 3] - ymin) / interval_y)])
# Change back what we did to get the right subdomains
act_frac_sys[np.where(act_frac_sys == xmax - 0.01)] = xmax
act_frac_sys[np.where(act_frac_sys == ymax - 0.01)] = ymax
fracs_to_part_x = np.where(subdomain_sys[:, 0] != subdomain_sys[:, 2])[0]
fracs_to_part_y = np.where(subdomain_sys[:, 1] != subdomain_sys[:, 3])[0]
# An array of indices referring to fractures that must be split due to partitioning.
fracs_to_part = np.union1d(fracs_to_part_x, fracs_to_part_y)
part_frac_sys = act_frac_sys[fracs_to_part]
part_frac_subdomains = subdomain_sys[fracs_to_part]
# CHECK
tot_new_fracs = np.sum(np.abs(subdomain_sys[fracs_to_part, 2] - subdomain_sys[fracs_to_part, 0]) + \
np.abs(subdomain_sys[fracs_to_part, 3] - subdomain_sys[fracs_to_part, 1]), dtype=int) + len(fracs_to_part)
# Array where all newly found partitioned fractures will be stored. The number of rows is pretty arbitrary.
part_fracs = np.zeros((tot_new_fracs, 5))
# Arrays where all information is stored to, in the end, form frac_order_vec_list.
part_frac_order_vec = np.zeros(tot_new_fracs)
# To clear some memory, the subdomains which are in part_frac_subdomains can now be deleted from the original array.
subdomain_sys = np.delete(subdomain_sys, fracs_to_part, axis=0)
ii = -1
for ii_frac in part_frac_sys:
ii += 1
# The subdomains of points in this fracture
ii_subdomains = part_frac_subdomains[ii, :]
# I do not expect a fracture to cross more than 6 partition lines. Still an estimate though.
num_ints = int(abs(ii_subdomains[2] - ii_subdomains[0]) + abs(ii_subdomains[3] - ii_subdomains[1]))
part_int = np.zeros((num_ints, 2))
# Counts the amount of intersections between the given ii fracture and all partitioning lines.
int_counter = 0
# Partition IDs. [subdomain xmin, subdomain xmax, subdomain ymin, subdomain ymax]
# (an offset was added to subdomains of y to establish the difference between x and y)
partition_ids = [int(min(ii_subdomains[0], ii_subdomains[2])),
int(max(ii_subdomains[0], ii_subdomains[2])),
int(number_partitions_x - 1 + min(ii_subdomains[1], ii_subdomains[3])),
int(number_partitions_x - 1 + max(ii_subdomains[1], ii_subdomains[3]))]
# x partitions
for jj_part in partitions[partition_ids[0]:partition_ids[1]]:
t, s, int_coord = find_parametric_intersect(ii_frac, jj_part)
if (t >= (0 - tolerance_intersect) and t <= (1 + tolerance_intersect)) and \
(s >= (0 - tolerance_intersect) and s <= (1 + tolerance_intersect)):
# Only store intersections of segments that don't already share a node:
if not (np.linalg.norm(ii_frac[:2] - jj_part[:2]) < tolerance_intersect or
np.linalg.norm(ii_frac[:2] - jj_part[2:]) < tolerance_intersect or
np.linalg.norm(ii_frac[2:] - jj_part[:2]) < tolerance_intersect or
np.linalg.norm(ii_frac[2:] - jj_part[2:]) < tolerance_intersect):
# Store the intersection coordinates in part_int
part_int[int_counter, :] = np.array([int_coord[0], int_coord[1]])
int_counter += 1
# y partitions
for jj_part in partitions[partition_ids[2]:partition_ids[3]]:
t, s, int_coord = find_parametric_intersect(ii_frac, jj_part)
if (t >= (0 - tolerance_intersect) and t <= (1 + tolerance_intersect)) and \
(s >= (0 - tolerance_intersect) and s <= (1 + tolerance_intersect)):
# Only store intersections of segments that don't already share a node:
if not (np.linalg.norm(ii_frac[:2] - jj_part[:2]) < tolerance_intersect or
np.linalg.norm(ii_frac[:2] - jj_part[2:]) < tolerance_intersect or
np.linalg.norm(ii_frac[2:] - jj_part[:2]) < tolerance_intersect or
np.linalg.norm(ii_frac[2:] - jj_part[2:]) < tolerance_intersect):
# Store the intersection coordinates in part_int
part_int[int_counter, :] = np.array([int_coord[0], int_coord[1]])
int_counter += 1
# Add x0 and y0 of fracture ii to start of part_int, and x1 and y1 to the end of it.
part_int = np.vstack((np.vstack((ii_frac[:2], part_int)), ii_frac[2:]))
# Sort on x values
part_int = part_int[np.lexsort((part_int[:, 1], part_int[:, 0]))]
# Initialization of the array that will contain the information about the new fractures.
new_fracs = np.zeros((num_ints+1, 5))
for mm in range(0, num_ints + 1):
x0, y0, x1, y1 = part_int[mm, 0], part_int[mm, 1], part_int[mm + 1, 0], part_int[mm + 1, 1]
# This is how we find out in which subdomain the fracture will be. We add this ID to new_fracs
subdomain_id = math.floor((((x0 + x1) / 2) - xmin) / interval_x) + \
math.floor((((y0 + y1) / 2) - ymin) / interval_y) * number_partitions_x
new_fracs[mm, :] = | np.array([x0, y0, x1, y1, subdomain_id]) | numpy.array |
import os
import sys
import h5py
import numpy as np
from datetime import datetime
import gfunc2d.gridtools as gt
from gfunc2d.marg_mu import marginalise_mu as margm
from gfunc2d.marg_mu import marginalise_mu_simple as margm2
from gfunc2d.gplot import loglik_save, contour_save, hr_save
from gfunc2d.gstats import print_age_stats
from gfunc2d.utilities import is_color
def gfunc2d(isogrid, fitparams, alpha, isodict=None, margm_fast=True):
'''
Python version of the MATLAB script gFunc2D.m
<NAME>, 2017 Oct 27 (translated to Python)
<NAME>, Lund Observatory, 2016 Sep 15 (adapted to YY and plx)
<NAME>, 2016 Oct 4 (returning 2D G function)
- based on gFunc.m by <NAME>.
Calculates, for a single star, a 2D array of the G-function as a function
of age and metallicity.
Parameters
----------
isogrid : str
Name of the isochrone hdf5 grid (including the full path).
fitparams : dict
Dictionary consisting of the parameters to be fitted in the format
fitparams = {'param_name': (value, uncertainty), ...}.
alpha : float
Value of [alpha/Fe]. Must exist in the grid.
isodict : dict, optional
The isochrone hdf5 grid loaded as a dictionary using
gridtools.load_as_dict(). Supplying this dictionary is optional but it
speeds up the code significantly since the data has already been loaded
into the memory (very useful when looping this function over several
stars).
margm_fast : bool, optional
If fitting to the parallax ('plx' in fitparams), one can choose a fast
method for the marginalisation over the distance modulus by setting this
value to True. A slower (but slightly more exact) method is used otherwise.
Default value is True.
Returns
-------
g2D : array of float
2D array of the G-function as a function of age (rows) and metallicity
(columns).
ages : array of float
Array of ages in the rows of g2D.
fehs : array of float
Array of metallicities in the columns of g2D.
'''
### Some fixed parameters which could be given as input instead
# Exponent for power-law IMF
beta = 2.35
# Prior on the distance modulus
mu_prior = 10
# Statistical weight on mu_prior
mu_prior_w = 0.0
###
with h5py.File(isogrid, 'r') as gridfile:
# Get arrays of alpha, metallicities, and ages
alphas, fehs, ages = gt.get_afa_arrays(gridfile)
# Check that the chosen [alpha/Fe] is available in the grid
if alpha not in alphas:
raise ValueError('[alpha/Fe] = ' + str(alpha) +\
' not found in grid. ' +\
'Change alpha to one of the following: '+\
str(alphas))
# Check that the chosen fitparams are accommodated by the grid
# and add metadata (attribute) used in the fitting proccess.
fitparams, app_mag = gt.prepare_fitparams(gridfile, fitparams)
# The hdf5 grid is loaded into a python dictionary if the dictionary
# has not been loaded (and passed to this function) in advance.
if isodict is None:
isodict = gt.load_as_dict(gridfile, (alpha, alpha))
# Initialize g-function
g2D = np.zeros((len(ages), len(fehs)))
for i_feh, feh in enumerate(fehs):
for i_age, age in enumerate(ages):
g2D_i = 0
# Get the hdf5 path to the desired isochrone and pick out the
# isochrone
isopath = gt.get_isopath(alpha, feh, age)
iso_i = isodict[isopath]
# Get mass array and calculate the change in mass for each
# entry based on the two surrounding entries
masses = iso_i['Mini']
dm = (masses[2:] - masses[:-2]) / 2
# Pick out the values for which the change in mass is positive
pdm = dm > 0
masses = masses[1:-1][pdm]
dm = dm[pdm]
# Calculate total chi2 for all parameters but the distance modulus
# The parallax is skipped explicitly, and any magnitudes are
# skipped due to their attribute being 'mag' which is not handled.
chi2 = np.zeros(len(masses))
for param in fitparams:
if param == 'plx':
continue
obs_val, obs_unc, attr = fitparams[param]
if attr == 'none':
if param == 'logT' or param == 'logL':
iso_val = 10**iso_i[param][1:-1][pdm]
else:
iso_val = iso_i[param][1:-1][pdm]
chi2 += ((obs_val - iso_val)/obs_unc)**2
elif attr == 'color':
colors = param.split('-')
m1 = iso_i[colors[0]][1:-1][pdm]
m2 = iso_i[colors[1]][1:-1][pdm]
iso_val = m1 - m2
chi2 += ((obs_val - iso_val)/obs_unc)**2
# The value of the G-function is calculated based on all models
# with chi2 < 100 (if any)
low_chi2 = chi2 < 100
if any(low_chi2):
# Initial mass function (prior on mass)
phi_M = masses[low_chi2]**(-beta)
# chi2 and mass change for models with low X2
chi2 = chi2[low_chi2]
dm = dm[low_chi2]
# The array to be summed in order to marginalise over the
# mass
marg_m = dm * phi_M * np.exp(-0.5*chi2)
# At this step the marginalisation over the distance
# modulus is carried out for each model individually (i.e.
# for each mass) if an apparent magnitude is in fitparams.
if app_mag is not None:
lik_int_mu = np.ones(len(chi2))
# Get data
obs_mag, obs_unc = fitparams[app_mag][:2]
iso_mags = iso_i[app_mag][1:-1][pdm][low_chi2]
plx_obs, plx_unc = fitparams['plx'][:2]
# Define 3-sigma interval of distance modulus based on
# observed parallax
plx_int = [plx_obs-3*plx_unc, plx_obs+3*plx_unc]
mu_plx_int = [-5*np.log10(plx_int[1]/100),
-5* | np.log10(plx_int[0]/100) | numpy.log10 |
import unittest
import math
import numpy as np
class TestUnittestAssertions(unittest.TestCase):
def test_slices(self):
print('--- 2D ARRAYS ---')
M = 7
N = 9
v = np.array(list(range(M * N)))
A = v.reshape((M, N))
A0 = v.reshape((M * N))
print('--- SINGLETON DIM 0 ---')
# slice = [0:M:1, 0:N:1], shape = [M, N], array_shape = [M, N]
# slice = [j*N:(j+1)*N:1], shape = [N], array_shape = [M*N]
for j in range(M):
X = A0[j*N:(j+1)*N:1]
assert np.all(X == A[j, :])
assert np.shape(X) == (N,)
print('--- SINGLETON DIM 0 WITH SLICE---')
# p = ceil((b-a)/c)
# slice = [0:M:1, a:b:c], shape = [M, P], array_shape = [M, N]
# slice = [a+j*N:b+j*N:c], shape = [p], array_shape = [M*N]
b = M - 1
c = 3
for a in range(1,b):
p = math.ceil((b-a)/c)
for j in range(M):
X = A0[a+j*N:b+j*N:c]
assert np.all(X == A[:, a:b:c][j, :])
assert np.shape(X) == (p,)
print('--- SINGLETON DIM 1 ---')
# slice = [0:M:1, 0:N:1], shape = [M, N], array_shape = [M, N]
# slice = [j:j+M*N:N], shape = [N], array_shape = [M*N]
for j in range(N):
X = A0[j:j+M*N:N]
assert np.all(X == A[:, j])
assert np.shape(X) == (M,)
print('--- SINGLETON DIM 1 WITH SLICE ---')
# P = ceil((b-a)/c)
# slice = [a:b:c, 0:1:N], shape = [P, N], array_shape = [M, N]
# slice = [a*N+j:b*N+j:c*N], shape = [p], array_shape = [M*N]
b = M - 1
c = 3
a = 1
p = math.ceil((b-a)/c)
for j in range(N):
X = A0[a*N+j:b*N+j:c*N]
assert np.all(X == A[a:b:c, :][:, j])
assert | np.shape(X) | numpy.shape |
"""
author: <NAME>
"""
import numpy as np
import time
import copy
from numba import njit
from numba.typed import List
from gglasso.solver.ggl_helper import phiplus, prox_od_1norm, prox_2norm, prox_rank_norm
from gglasso.helper.ext_admm_helper import check_G
def ext_ADMM_MGL(S, lambda1, lambda2, reg , Omega_0, G,\
X0 = None, X1 = None, tol = 1e-5 , rtol = 1e-4, stopping_criterion = 'boyd',\
rho= 1., max_iter = 1000, verbose = False, measure = False, latent = False, mu1 = None):
"""
This is an ADMM algorithm for solving the Group Graphical Lasso problem
where not all instances have the same number of dimensions, i.e. some variables are present in some instances and not in others.
A group sparsity penalty is applied to all pairs of variables present in multiple instances.
IMPORTANT: As the arrays are non-conforming in dimensions here, we operate on dictionaries with keys 1,..,K (as int) and each value is a array of shape :math:`(p_k,p_k)`.
If ``latent=False``, this function solves
.. math::
\min_{\Omega,\Theta,\Lambda} \sum_{k=1}^K - \log \det(\Omega^{(k)}) + \mathrm{Tr}(S^{(k)}\Omega^{(k)}) + \sum_{k=1}^K \lambda_1 ||\Theta^{(k)}||_{1,od}
+ \sum_{l} \lambda_2 \\beta_l ||\Lambda_{[l]}||_2
s.t. \quad \Omega^{(k)} = \Theta^{(k)} \quad k=1,\dots,K
\quad \quad \Lambda^{(k)} = \Theta^{(k)} \quad k=1,\dots,K
where l indexes the groups of overlapping variables and :math:`\Lambda_{[l]}` is the array of all respective components.
To account for differing group sizes we multiply with :math:`\\beta_l`, the square root of the group size.
If ``latent=True``, this function solves
.. math::
\min_{\Omega,\Theta,\Lambda,L} \sum_{k=1}^K - \log \det(\Omega^{(k)}) + \mathrm{Tr}(S^{(k)}\Omega^{(k)}) + \sum_{k=1}^K \lambda_1 ||\Theta^{(k)}||_{1,od}
+ \sum_{l} \lambda_2 \\beta_l ||\Lambda_{[l]}||_2 +\sum_{k=1}^{K} \mu_{1,k} \|L^{(k)}\|_{\star}
s.t. \quad \Omega^{(k)} = \Theta^{(k)} - L^{(k)} \quad k=1,\dots,K
\quad \quad \Lambda^{(k)} = \Theta^{(k)} \quad k=1,\dots,K
Note:
* Typically, ``sol['Omega']`` is positive definite and ``sol['Theta']`` is sparse.
* We use scaled ADMM, i.e. X0 and X1 are the scaled (with 1/rho) dual variables for the equality constraints.
Parameters
----------
S : dict
empirical covariance matrices. S should have keys 1,..,K (as integers) and S[k] contains the :math:`(p_k,p_k)`-array of the empirical cov. matrix of the k-th instance.
Each S[k] needs to be symmetric and positive semidefinite.
lambda1 : float, positive
sparsity regularization parameter.
lambda2 : float, positive
group sparsity regularization parameter.
reg : str
so far only Group Graphical Lasso is available, hence choose 'GGL'.
Omega_0 : dict
starting point for the Omega variable. Should be of same form as S. If no better starting point is available, choose
Omega_0[k] = np.eye(p_k) for k=1,...,K
G : array
bookkeeping arrays which contains information where the respective entries for each group can be found.
X0 : dict, optional
starting point for the X0 variable. If not specified, it is set to zeros.
X1 : dict, optional
starting point for the X1 variable. If not specified, it is set to zeros.
rho : float, positive, optional
step size paramater for the augmented Lagrangian in ADMM. The default is 1. Tune this parameter for optimal performance.
max_iter : int, optional
maximum number of iterations. The default is 1000.
tol : float, positive, optional
tolerance for the primal residual. See "Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers", Boyd et al. for details.
The default is 1e-7.
rtol : float, positive, optional
tolerance for the dual residual. The default is 1e-4.
stopping_criterion : str, optional
* 'boyd': Stopping criterion after Boyd et al.
* 'kkt': KKT residual is chosen as stopping criterion. This is computationally expensive to compute.
The default is 'boyd'.
verbose : boolean, optional
verbosity of the solver. The default is False.
measure : boolean, optional
turn on/off measurements of runtime per iteration. The default is False.
latent : boolean, optional
Solve the GGL problem with or without latent variables (see above for the exact formulations).
The default is False.
mu1 : float, positive, optional
low-rank regularization parameter, possibly different for each instance k=1,..,K. Only needs to be specified if latent=True.
Returns
-------
sol : dict
contains the solution, i.e. Omega, Theta, X0, X1 (and L if latent=True) after termination. All elements are dictionaries with keys 1,..,K and (p_k,p_k)-arrays as values.
info : dict
status and measurement information from the solver.
"""
K = len(S.keys())
p = np.zeros(K, dtype= int)
for k in np.arange(K):
p[k] = S[k].shape[0]
if type(lambda1) == np.float64 or type(lambda1) == float:
lambda1 = lambda1*np.ones(K)
if latent:
if type(mu1) == np.float64 or type(mu1) == float:
mu1 = mu1*np.ones(K)
assert mu1 is not None
assert np.all(mu1 > 0)
assert min(lambda1.min(), lambda2) > 0
assert reg in ['GGL']
check_G(G, p)
assert rho > 0, "ADMM penalization parameter must be positive."
# initialize
Omega_t = Omega_0.copy()
Theta_t = Omega_0.copy()
L_t = dict()
for k in np.arange(K):
L_t[k] = np.zeros((p[k],p[k]))
# helper and dual variables
Lambda_t = Omega_0.copy()
Z_t = dict()
if X0 is None:
X0_t = dict()
for k in np.arange(K):
X0_t[k] = np.zeros((p[k],p[k]))
else:
X0_t = X0.copy()
if X1 is None:
X1_t = dict()
for k in np.arange(K):
X1_t[k] = np.zeros((p[k],p[k]))
else:
X1_t = X1.copy()
runtime = np.zeros(max_iter)
residual = np.zeros(max_iter)
status = ''
if verbose:
print("------------ADMM Algorithm for Multiple Graphical Lasso----------------")
if stopping_criterion == 'boyd':
hdr_fmt = "%4s\t%10s\t%10s\t%10s\t%10s"
out_fmt = "%4d\t%10.4g\t%10.4g\t%10.4g\t%10.4g"
print(hdr_fmt % ("iter", "r_t", "s_t", "eps_pri", "eps_dual"))
elif stopping_criterion == 'kkt':
hdr_fmt = "%4s\t%10s"
out_fmt = "%4d\t%10.4g"
print(hdr_fmt % ("iter", "kkt residual"))
##################################################################
### MAIN LOOP STARTS
##################################################################
for iter_t in np.arange(max_iter):
if measure:
start = time.time()
# Omega Update
Omega_t_1 = Omega_t.copy()
for k in np.arange(K):
W_t = Theta_t[k] - L_t[k] - X0_t[k] - (1/rho) * S[k]
eigD, eigQ = np.linalg.eigh(W_t)
Omega_t[k] = phiplus(beta = 1/rho, D = eigD, Q = eigQ)
# Theta Update
for k in np.arange(K):
V_t = (Omega_t[k] + L_t[k] + X0_t[k] + Lambda_t[k] - X1_t[k]) * 0.5
Theta_t[k] = prox_od_1norm(V_t, lambda1[k]/(2*rho))
#L Update
if latent:
for k in np.arange(K):
C_t = Theta_t[k] - X0_t[k] - Omega_t[k]
C_t = (C_t.T + C_t)/2
eigD, eigQ = np.linalg.eigh(C_t)
L_t[k] = prox_rank_norm(C_t, mu1[k]/rho, D = eigD, Q = eigQ)
# Lambda Update
Lambda_t_1 = Lambda_t.copy()
for k in np.arange(K):
Z_t[k] = Theta_t[k] + X1_t[k]
Lambda_t = prox_2norm_G(Z_t, G, lambda2/rho)
# X Update
for k in | np.arange(K) | numpy.arange |
# -*- coding: utf-8 -*-
import os
import sys
import h5py
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from presto.utils import rotate_opt
rcParams['figure.dpi'] = 108.8
if len(sys.argv) == 2:
load_filename = sys.argv[1]
print(f"Loading: {os.path.realpath(load_filename)}")
else:
load_filename = None
def load(load_filename):
with h5py.File(load_filename, "r") as h5f:
num_averages = h5f.attrs["num_averages"]
control_freq = h5f.attrs["control_freq"]
control_if = h5f.attrs["control_if"]
readout_freq = h5f.attrs["readout_freq"]
readout_duration = h5f.attrs["readout_duration"]
control_duration = h5f.attrs["control_duration"]
readout_amp = h5f.attrs["readout_amp"]
control_amp_90 = h5f.attrs["control_amp_90"]
control_amp_180 = h5f.attrs["control_amp_180"]
sample_duration = h5f.attrs["sample_duration"]
nr_delays = h5f.attrs["nr_delays"]
dt_delays = h5f.attrs["dt_delays"]
wait_delay = h5f.attrs["wait_delay"]
readout_sample_delay = h5f.attrs["readout_sample_delay"]
t_arr = h5f["t_arr"][()]
store_arr = h5f["store_arr"][()]
source_code = h5f["source_code"][()]
print(f"Control frequency: {control_freq / 1e9:.2f} GHz")
t_low = 1500 * 1e-9
t_high = 2000 * 1e-9
t_span = t_high - t_low
idx_low = np.argmin(np.abs(t_arr - t_low))
idx_high = np.argmin(np.abs(t_arr - t_high))
idx = np.arange(idx_low, idx_high)
nr_samples = len(idx)
# Plot raw store data for first iteration as a check
fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)
ax11, ax12 = ax1
ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax11.plot(1e9 * t_arr, np.abs(store_arr[0, 0, :]))
ax12.plot(1e9 * t_arr, np.angle(store_arr[0, 0, :]))
ax12.set_xlabel("Time [ns]")
fig1.show()
# Analyze T2
resp_arr = np.mean(store_arr[:, 0, idx], axis=-1)
data = rotate_opt(resp_arr)
delay_arr = dt_delays * | np.arange(nr_delays) | numpy.arange |
# SPDX-FileCopyrightText: Copyright 2021, <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
import numpy
import scipy.optimize
from functools import partial
from .._utilities.plot_utilities import * # noqa: F401, F403
from .._utilities.plot_utilities import load_plot_settings, save_plot, plt
# =================
# Direct Likelihood
# =================
class DirectLikelihood(object):
# ==============
# log likelihood
# ==============
@staticmethod
def log_likelihood(z, X, K_mixed, sign_switch, hyperparam):
"""
Here we use direct parameter, sigma and sigma0
sign_switch chnages the sign of the output from lp to -lp. When True,
this is used to minimizing (instad of maximizing) the negative of
log-likelihood function.
"""
# hyperparameters
sigma = hyperparam[0]
sigma0 = hyperparam[1]
n, m = X.shape
# S is the (sigma**2) * K + (sigma0**2) * I, but we don't construct it.
# Instead, we consruct Kn = K + eta I, where eta = (sigma0 / sigma)**2
tol = 1e-8
if numpy.abs(sigma) < tol:
# Ignore (sigma**2 * K) compared to (sigma0**2 * I) term.
logdet_S = n * numpy.log(sigma0**2)
Y = X / sigma0**2
else:
eta = (sigma0 / sigma)**2
logdet_Kn = K_mixed.logdet(eta)
logdet_S = n * | numpy.log(sigma**2) | numpy.log |
"""
Prepare data for Part-GPNN model.
Need:
Node feature at different scales
Edge feature for valid edges
Adjacency matrix GT (parse graph GT)
Edge weight (corresponds to node level)
Edge label GT
"""
import json
import os
import pickle
import warnings
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import cv2
import feature_model
import metadata
import skimage.io
import torch
import torchvision
part_ids = {'Torso': [1, 2],
'Right Hand': [3],
'Left Hand': [4],
'Left Foot': [5],
'Right Foot': [6],
'Upper Leg Right': [7, 9],
'Upper Leg Left': [8, 10],
'Lower Leg Right': [11, 13],
'Lower Leg Left': [12, 14],
'Upper Arm Left': [15, 17],
'Upper Arm Right': [16, 18],
'Lower Arm Left': [19, 21],
'Lower Arm Right': [20, 22],
'Head': [23, 24],
'Upper Body': [1, 2, 3, 4, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
'Lower Body': [5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
'Left Arm': [4, 15, 17, 19, 21],
'Right Arm': [3, 16, 18, 20, 22],
'Left Leg': [5, 8, 10, 12, 14],
'Right Leg': [6, 7, 9, 11, 13],
'Full Body': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
}
__PART_WEIGHT_L1 = 0.1 # hand
__PART_WEIGHT_L2 = 0.3 # arm
__PART_WEIGHT_L3 = 0.5 # upper body
__PART_WEIGHT_L4 = 1.0 # human
part_weights = {'Torso': __PART_WEIGHT_L1,
'Right Hand': __PART_WEIGHT_L1,
'Left Hand': __PART_WEIGHT_L1,
'Left Foot': __PART_WEIGHT_L1,
'Right Foot': __PART_WEIGHT_L1,
'Upper Leg Right': __PART_WEIGHT_L1,
'Upper Leg Left': __PART_WEIGHT_L1,
'Lower Leg Right': __PART_WEIGHT_L1,
'Lower Leg Left': __PART_WEIGHT_L1,
'Upper Arm Left': __PART_WEIGHT_L1,
'Upper Arm Right': __PART_WEIGHT_L1,
'Lower Arm Left': __PART_WEIGHT_L1,
'Lower Arm Right': __PART_WEIGHT_L1,
'Head': __PART_WEIGHT_L1,
'Upper Body': __PART_WEIGHT_L3,
'Lower Body': __PART_WEIGHT_L3,
'Left Arm': __PART_WEIGHT_L2,
'Right Arm': __PART_WEIGHT_L2,
'Left Leg': __PART_WEIGHT_L2,
'Right Leg': __PART_WEIGHT_L2,
'Full Body': __PART_WEIGHT_L4
}
part_names = list(part_ids.keys())
part_graph = {'Torso': [],
'Right Hand': [],
'Left Hand': [],
'Left Foot': [],
'Right Foot': [],
'Upper Leg Right': [],
'Upper Leg Left': [],
'Lower Leg Right': [],
'Lower Leg Left': [],
'Upper Arm Left': [],
'Upper Arm Right': [],
'Lower Arm Left': [],
'Lower Arm Right': [],
'Head': [],
'Upper Body': ['Head', 'Torso', 'Left Arm', 'Right Arm'],
'Lower Body': ['Left Leg', 'Right Leg'],
'Left Arm': ['Upper Arm Left', 'Lower Arm Left', 'Left Hand'],
'Right Arm': ['Upper Arm Right', 'Lower Arm Right', 'Right Hand'],
'Left Leg': ['Upper Leg Left', 'Lower Leg Left', 'Left Foot'],
'Right Leg': ['Upper Leg Right', 'Lower Leg Right', 'Right Foot'],
'Full Body': ['Head', 'Torso', 'Upper Body', 'Lower Body']
}
def get_intersection(box1, box2):
return np.hstack((np.maximum(box1[:2], box2[:2]), np.minimum(box1[2:], box2[2:])))
def compute_area(box):
side1 = box[2]-box[0]
side2 = box[3]-box[1]
if side1 > 0 and side2 > 0:
return side1 * side2
else:
return 0.0
def compute_iou(box1, box2):
intersection_area = compute_area(get_intersection(box1, box2))
iou = intersection_area / (compute_area(box1) + compute_area(box2) - intersection_area)
return iou
def get_node_index(bbox, det_boxes):
bbox = np.array(bbox, dtype=np.float32)
max_iou = 0.5 # Use 0.5 as a threshold for evaluation
max_iou_index = -1
for i_node in range(len(det_boxes)):
# check bbox overlap
iou = compute_iou(bbox, det_boxes[i_node])
if iou > max_iou:
max_iou = iou
max_iou_index = i_node
return max_iou_index
def combine_box(box1, box2):
return np.hstack((np.minimum(box1[:2], box2[:2]), | np.maximum(box1[2:], box2[2:]) | numpy.maximum |
#! usr/bin/env python
import copy
from itertools import product as cartesian
import warnings
import numpy as np
import numexpr as ne
import calib.gen as gen
import calib.data as data
import calib.misc as misc
from calib.misc import default, push_random_state
from calib.misc import multiget as get
def transition_cx_change(prev_cs, prev_cf, next_cs, next_cf):
"""
Computes the capacitors that changed from a feedback position to a
feedfoward position, the ones that did the oposite during a
configuration transition and the ones that kept their positions.
:param prev_cs: Feedfoward capacitors indexes of the previous
configuration.
:type prev_cs: :class:`numpy.array`
:param prev_cf: Feedback capacitors indexes of the previous configuration.
:type prev_cf: :class:`numpy.array`
:param next_cs: Feedfoward capacitors indexes of the next configuration.
:type next_cs: :class:`numpy.array`
:param next_cf: Feedback capacitors indexes of the next configuration.
:type next_cf: :class:`numpy.array`
:returns: The indexes of the capacitors that changed from the feedfoward to
feedback position (cs_cf). The indexes of the capacitors that
changed from the feedfoward to feedback position (cf_cs). The indexes
of the feedfoward capacitors that kept their positions (cs_cs) and the
indexes of the feedback capacitors that kept their positions (cf_cf).
The result order is (cs_cf, cf_cs, cs_cs, cf_cf,).
The shape of each variable is:
* shape(cs_cf) = (n_conf, n_cs_prev, n_diff)
* shape(cf_cs) = (n_conf, n_cs_next, n_diff)
* shape(cs_cs) = (n_conf, n_cs_next, n_diff)
* shape(cf_cf) = (n_conf, n_cf_next, n_diff)
:rtype: (:class:`numpy.ndarray`, :class:`numpy.ndarray`,
:class:`numpy.ndarray`, :class:`numpy.ndarray`,)
"""
def intersect(source, reference, axis):
result = np.copy(source)
ogrid = tuple(np.ogrid[tuple(slice(ss) for ss in np.shape(source))])
def ogrid_idx(idx):
idx = np.reshape(idx, (1,)*len(np.shape(source)))
return ogrid[:axis] + (idx,) + ogrid[(axis+1):]
for ii in range(np.size(source, axis)):
idx = ogrid_idx(ii)
mask = source[idx] == reference
mask = np.any(mask, axis=axis, keepdims=True)
mask = np.logical_not(mask) # Elements not in the row
sub_view = result[idx]
sub_view[mask] = -1
result[idx] = sub_view
return result
cs_cf = intersect(prev_cs, next_cf, -2)
cf_cs = intersect(next_cs, prev_cf, -2)
cs_cs = intersect(next_cs, prev_cs, -2)
cf_cf = intersect(next_cf, prev_cf, -2)
return cs_cf, cf_cs, cs_cs, cf_cf
class StageTestbench(data.JsonData):
@property
def stages(self):
return self._stages
@property
def ins(self):
return self._ins
@property
def shape(self):
return self._shape
@property
def conf_shape(self):
return np.shape(self.configuration_sequence.data)
@property
def is_scalar(self):
return len(self.shape) + len(self.conf_shape) == 0
@property
def configuration_sequence(self):
return self._configuration_sequence
@classmethod
def Scalar(cls, stage, ins, configuration_sequence, data_location=None):
return cls(stage, ins, configuration_sequence, shape=tuple(), data_location=data_location)
def __init__(self, stages, ins, configuration_sequence, shape=None, data_location=None):
super().__init__(data_location)
NestedStages = data.nested_lists_of(gen.StageParameters)
NestedSequences = data.nested_lists_of(gen.ConfigurationSequence)
if not isinstance(configuration_sequence, NestedSequences):
conf_shape = np.shape(configuration_sequence)
configuration_sequence = NestedSequences(configuration_sequence, len(conf_shape))
conf_shape = np.shape(configuration_sequence.data)
if not isinstance(stages, NestedStages):
shape = default(shape, np.shape(stages))
dims = len(shape)
cur = stages
root_arr = stages
# Check a valid 0,0,0...
for dd in range(dims):
assert isinstance(cur, (tuple, list,))
cur = cur[0]
# Check elements
for idx in cartesian(*tuple(range(ss) for ss in shape)):
assert isinstance(misc.getitem(stages, idx), gen.StageParameters)
# Check regularity
def rec_check(lst, shape):
valid = (not isinstance(lst, list) and len(shape) == 0) or len(lst) == shape[0]
if len(shape) > 1:
sub_shape = shape[1:]
valid = valid and all([rec_check(llst, sub_shape) for llst in lst])
return valid
assert rec_check(stages, shape)
else:
stages_shape = np.shape(stages.data)
shape = default(shape, stages_shape)
assert shape == stages_shape
dims = len(shape)
root_arr = stages.data
ref_element = misc.getitem(root_arr, (0,)*dims)
ins = data.at_least_ndarray(ins)
if len(np.shape(ins)) == 1:
ins = ins[..., np.newaxis]
# Broadcast ins
if len(np.shape(ins)) == 2:
ins = ins[(np.newaxis,)*dims + (Ellipsis,)]
ins = np.tile(ins, shape + (1, 1,))
assert len(np.shape(ins)) == dims + 2
if np.size(ins, -1) != ref_element.meta.n_diff:
cm = ref_element.meta.common_mode
ins = np.concatenate((cm-ins, cm+ins,), axis=1)
# All meta the same
for idx in cartesian(*tuple(range(ss) for ss in shape)):
c_element = misc.getitem(root_arr, idx)
assert c_element.meta == ref_element.meta
self._stages = NestedStages.EnsureIsInstance(stages)
self._shape = shape
self._ins = ins
self._configuration_sequence = configuration_sequence
def _to_json_dict(self, path_context, memo=None):
dct = {}
dct["stages"] = self.stages.save(path_context, memo=memo)
if np.size(self.ins) == 0:
dct["ins_shape"] = np.shape(self.ins)
dct["ins"] = data.at_least_numpydata(self.ins).save(path_context, memo=memo)
dct["configuration_sequence"] = self.configuration_sequence.save(path_context, memo=memo)
dct["shape"] = self.shape
return dct
@classmethod
def _JsonDictToArgs(cls, path_context, data_location, dct, memo=None):
_, args, kwargs = super()._JsonDictToArgs(path_context, data_location, dct, memo=memo)
StagesType = data.nested_lists_of(gen.StageParameters)
ConfType = data.nested_lists_of(gen.ConfigurationSequence)
args.append(StagesType.Load(path_context, dct["stages"], memo=memo))
ins = data.NumpyData.Load(path_context, dct["ins"], memo=memo)
try:
shape = dct["ins_shape"]
ins = np.reshape(ins, shape)
except KeyError:
pass
args.append(ins)
args.append(ConfType.Load(path_context, dct["configuration_sequence"], memo=memo))
kwargs["shape"] = None if dct["shape"] is None else tuple(dct["shape"])
return cls, args, kwargs
def iter_idx(self):
return cartesian(self.iter_conf_idx(), self.iter_stages_idx())
def iter_conf_idx(self):
if len(self.conf_shape) == 0:
yield tuple()
else:
for idx in cartesian(*tuple(range(ss) for ss in self.conf_shape)):
yield idx
def iter_stages_idx(self):
if len(self.shape) == 0:
yield tuple()
else:
for idx in cartesian(*tuple(range(ss) for ss in self.shape)):
yield idx
def as_scalars(self):
result = np.zeros(self.conf_shape + self.shape, dtype=object)
for idx_conf, idx_stage in self.iter_idx():
ins = self.ins[idx_stage + (Ellipsis,)]
stage = self.stages[idx_stage]
conf = self.configuration_sequence[idx_conf]
result[idx_conf + idx_stage] = StageTestbench.Scalar(stage, ins, conf)
ResultType = data.nested_lists_of(StageTestbench)
return ResultType(result.tolist(), dims=len(np.shape(result)))
def simulation_args(self, conf_idx):
shape = self.shape
sample_idx = next(self.iter_stages_idx())
sample_stage = self.stages[sample_idx]
eff_shape = np.shape(sample_stage.eff)
caps_shape = np.shape(sample_stage.caps)
refs_shape = np.shape(sample_stage.refs)
thres_shape = np.shape(sample_stage.thres)
cm_shape = np.shape(sample_stage.common_mode)
eff = np.zeros(shape + eff_shape)
caps = np.zeros(shape + caps_shape)
refs = np.zeros(shape + refs_shape)
thres = np.zeros(shape + thres_shape)
cm = np.zeros(shape + cm_shape)
for idx in self.iter_stages_idx():
stage = self.stages[idx]
eff[idx + (Ellipsis,)] = stage.eff
caps[idx + (Ellipsis,)] = stage.caps
refs[idx + (Ellipsis,)] = stage.refs
thres[idx + (Ellipsis,)] = stage.thres
cm[idx + (Ellipsis,)] = stage.common_mode
ins = self.ins
cal_seq = self.configuration_sequence[conf_idx]
return eff, caps, refs, thres, ins, cm, cal_seq
def simulate(self, simulator, raise_=False):
conf_shape = self.conf_shape
if len(conf_shape) == 0:
return simulator.simulate(*self.simulation_args(tuple()), raise_=raise_)
else:
codes = np.full(conf_shape, None)
us = np.full(conf_shape, None)
for conf_idx in self.iter_conf_idx():
code, u = simulator.simulate(*self.simulation_args(conf_idx), raise_=raise_)
codes[conf_idx] = code
us[conf_idx] = u
codes = np.array(codes.tolist(), dtype=int)
us = np.array(us.tolist())
transpose_idx = len(conf_shape)
transpose_idx = ( (transpose_idx,)
+ tuple(range(0, transpose_idx))
+ tuple(range(transpose_idx + 1, len(np.shape(codes)))) )
codes = np.transpose(codes, transpose_idx)
us = np.transpose(us, transpose_idx)
return codes, us
def sweep_parameters(self, sweep_dicts):
def sweep(dct):
sw_type = dct.get("type", "linear")
if sw_type == "linear":
values = np.linspace(dct["start"], dct["end"], dct["samples"])
elif sw_type == "log":
values = np.logspace(dct["start"], dct["end"], dct["samples"])
else:
raise ValueError("sweep type {} not recognized.".format(sw_type))
def gen_dict(value):
copy_keys = ("parameter", "index",)
result = {key: dct[key] for key in copy_keys}
result["value"] = value
return result
return [gen_dict(value) for value in values]
values_axes = tuple(sweep(dct) for dct in sweep_dicts)
shape = tuple(len(axis) for axis in values_axes)
ins = np.zeros(shape + self.shape, dtype=int)
stages = np.zeros(shape + self.shape, dtype=object)
for idx in cartesian(*tuple(range(ss) for ss in shape)):
val = tuple(vals[ii] for ii, vals in zip(idx, values_axes))
this_stages = copy.deepcopy(self.stages)
in_ = np.array(self.ins)
new_val = tuple()
for vall in val:
if vall["parameter"] == "test":
in_[(Ellipsis, vall["index"], slice(None),)] = vall["value"]
else:
new_val = (vall,) + new_val
for sub_idx in this_stages.iter_idx():
this_stages[idx] = this_stages[idx].create_modified(new_val)
ins[idx + (Ellipsis,)] = in_
stages[idx + (Ellipsis,)] = this_stages
return values_axes, StageTestbench(stages.tolist(), ins,
self.configuration_sequence, shape=shape + self.shape)
class Simulator(data.JsonData):
@property
def seed(self):
return self._seed
@property
def ref_snr(self):
return self._ref_snr
@property
def thres_snr(self):
return self._thres_snr
@property
def in_snr(self):
return self._in_snr
@property
def u_history(self):
return self._u_history
def __init__(self, seed, ref_snr=0, thres_snr=0, in_snr=0, u_history=True, data_location=None):
super().__init__(data_location)
self._seed = seed
self._u_history = u_history
self._ref_snr = ref_snr
self._thres_snr = thres_snr
self._in_snr = in_snr
with push_random_state() as state_store:
np.random.seed(self._seed)
self._random_state = state_store
def _to_json_dict(self, path_context, memo=None):
dct = {}
dct["u_history"] = self.u_history
dct["seed"] = self.seed
dct["in_snr"] = data.at_least_numpydata(self.in_snr).save(path_context, memo=memo)
dct["ref_snr"] = data.at_least_numpydata(self.ref_snr).save(path_context, memo=memo)
dct["thres_snr"] = data.at_least_numpydata(self.thres_snr).save(path_context, memo=memo)
return dct
@classmethod
def _JsonDictToArgs(cls, path_context, data_location, dct, memo=None):
_, args, kwargs = super()._JsonDictToArgs(path_context, data_location, dct, memo=memo)
args.append(dct["seed"])
for attr in ("in_snr", "ref_snr", "thres_snr",):
kwargs[attr] = data.NumpyData.Load(path_context, dct[attr], memo=memo)
kwargs["u_history"] = dct["u_history"]
return cls, args, kwargs
def _standard_deviations(self, meta, data):
# Noise magnitude, computed using https://en.wikipedia.org/wiki/Signal-to-noise_ratio
ref_snr = self.ref_snr
in_snr = self.in_snr
thres_snr = self.thres_snr
fsr = meta.fsr
fsr = fsr[1] - fsr[0]
s_ref = 0 if ref_snr == 0 else np.sqrt(np.power(fsr, 2)/ref_snr)
s_thres = 0 if thres_snr == 0 else np.sqrt(np.power(fsr, 2)/thres_snr)
s_in = 0 if in_snr == 0 else np.sqrt(np.power(fsr, 2)/in_snr)
s_dict = { "s_ref": s_ref,
"s_thres": s_thres,
"s_in": s_in}
data["std"] = s_dict
@staticmethod
def simulate_setup(eff, caps, refs, thres, ins, common_mode, conf_seq, scalar):
if scalar:
eff = eff[np.newaxis, ...]
caps = caps[np.newaxis, ...]
refs = refs[np.newaxis, ...]
thres = thres[np.newaxis, ...]
ins = ins[np.newaxis, ...]
common_mode = common_mode[np.newaxis, ...]
base_shape = np.shape(eff)
base_len = len(base_shape)
assert len(np.shape(common_mode)) == base_len, "Expected same dimensions as eff."
assert len(np.shape(caps)) == base_len + 2, "Expected 2 extra dimensions."
assert len(np.shape(refs)) == base_len + 3, "Expected 3 extra dimensions."
assert len(np.shape(thres)) == base_len + 1, "Expected 1 extra dimensions."
assert len(np.shape(ins)) == base_len + 2, "Expected 2 extra dimensions."
common_mode = np.reshape(common_mode, np.shape(common_mode) + (1, 1,))
meta = conf_seq.meta
n_diff = meta.n_diff
n_conf = conf_seq.n_conf
assert n_diff == np.size(caps, -1), "Inconsistent data with meta."
assert n_diff == np.size(refs, -1), "Inconsistent data with meta."
assert n_diff == np.size(ins, -1), "Inconsistent data with meta."
n_caps = meta.n_caps
n_refs = meta.n_refs
# n_ins = np.size(refs, -2)
n_thres = np.size(thres, -1)
n_codes = n_thres + 1
assert n_caps == np.size(caps, -2), "Inconsistent data with meta."
assert n_refs == np.size(refs, -2), "Inconsistent data with meta."
assert n_caps == np.size(refs, -3), "Inconsistent data with meta."
# Create extended versions
zl = np.zeros_like
ins_shape = np.shape(ins)
ins_extra_shape = ins_shape[:-2] + (1, ins_shape[-1],)
eff = eff[..., np.newaxis, np.newaxis]
ins = np.concatenate((ins, np.zeros(ins_extra_shape),), axis=-2)
caps = np.concatenate((caps, zl(caps[..., 0:1, :]),), axis=-2)
refs = np.concatenate((refs, zl(refs[..., 0:1, :]),), axis=-2)
refs = np.concatenate((refs, zl(refs[..., 0:1, :, :]),), axis=-3)
diff_ii = misc.ogrid(2, n_diff, 3)
diff_ii_ext = misc.ogrid(base_len + 2, n_diff, base_len + 3)
base_ii = tuple(misc.ogrid(ii, ss, base_len + 3) for ii, ss in enumerate(base_shape))
cap_axis = base_len + 1
ext_dct = { "eff": eff,
"caps": caps,
"refs": refs,
"thres": thres,
"ins": ins,
"cm": common_mode }
n_dct = { "n_conf": n_conf,
"n_caps": n_caps,
"n_refs": n_refs,
"n_thres": n_thres,
"n_codes": n_codes,
"n_diff": n_diff }
idx_dct = { "base_shape": base_shape,
"base_len": base_len,
"diff_ii": diff_ii,
"diff_ii_ext": diff_ii_ext,
"base_ii": base_ii,
"cap_axis": cap_axis }
s_dict = {"s_ref": 0, "s_thes": 0, "s_in": 0}
data_dict = { "extended": ext_dct,
"n": n_dct,
"indexing": idx_dct,
"std": s_dict,
"ds_offset": 0 }
return data_dict
@staticmethod
def init_seq_idx(conf_seq, data):
idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n")
base_shape, diff_ii = get(idx_dct, "base_shape", "diff_ii")
caps, refs, cm = get(ext_dct, "caps", "refs", "cm")
n_conf, n_diff = get(n_dct, "n_conf", "n_diff")
ic = conf_seq.initial_conditions
# Set initial condition
# shape (n_conf, n_cf, n_diff,)
ic_ii = np.array(list(iic.ref_ii for iic in ic), dtype=int)
# shape (n_conf, n_cf, n_diff,)
cf_ii = conf_seq.configuration_sets[0].cf
m_refs_idx = (Ellipsis, cf_ii, ic_ii, diff_ii,)
m_caps_idx = (Ellipsis, cf_ii, diff_ii,)
init_idxs = {
"m_refs_idx": m_refs_idx,
"m_caps_idx": m_caps_idx }
return init_idxs
@staticmethod
def init_seq(indexes, data):
idx_dct, ext_dct, n_dct, std = get(
data, "indexing", "extended", "n", "std")
base_shape = get(idx_dct, "base_shape")[0]
caps, refs, cm = get(ext_dct, "caps", "refs", "cm")
n_conf, n_diff = get(n_dct, "n_conf", "n_diff")
s_ref = get(std, "s_ref")[0]
m_refs_idx, m_caps_idx = get(indexes, "m_refs_idx", "m_caps_idx")
u = np.zeros(base_shape + (n_conf, n_diff,))
# shape (base_shape, n_conf, n_cf, n_diff,)
ic_refs = refs[m_refs_idx]
if s_ref > 0:
ic_refs = np.random.normal(ic_refs, s_ref, size=np.shape(ic_refs))
ic_cf = caps[m_caps_idx]
ic_g = ic_cf / np.sum(ic_cf, axis=-2, keepdims=True)
u += np.sum(ic_g * ic_refs, axis=-2)
if n_diff == 2:
u += cm - np.mean(u, axis=-1, keepdims=True)
u = u[np.newaxis, ...]
return u
@staticmethod
def init_set(conf_set, data, prev_set_data, prev_du_idx):
idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n")
n_refs, n_codes = get(n_dct, "n_refs", "n_codes")
base_len, diff_ii = get(idx_dct, "base_len", "diff_ii")
n_cs = conf_set.n_cs
cs_ii = conf_set.cs
cf_ii = conf_set.cf
cs_ii_base = cs_ii[(np.newaxis,)*base_len + (Ellipsis,)]
meta = conf_set.meta
ds_map = gen.ds_map(n_cs, n_refs, n_codes, meta.differential)
if prev_du_idx is None:
prev_dct = None
else:
prev_dct = {"r_ref": prev_du_idx["r_ref"],
"cs_ii": prev_set_data["indexing"]["cs_ii"],
"cf_ii": prev_set_data["indexing"]["cf_ii"] }
return { "indexing" : {
"cs_ii" : cs_ii,
"cf_ii": cf_ii,
"cs_ii_base": cs_ii_base, },
"n": {"n_cs": n_cs },
"previous": prev_dct,
"ds_map": ds_map}
@staticmethod
def transition_step_idx(conf_set, set_data, data, code):
assert np.size(code, 0) == 1
idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n")
idx_set, pre_set, n_set, ds_map = get(set_data,
"indexing", "previous", "n", "ds_map")
cs_ii, cf_ii, cs_ii_base = get(idx_set, "cs_ii", "cf_ii", "cs_ii_base")
prev_cs_ii, prev_cf_ii = get(pre_set, "cs_ii", "cf_ii")
prev_r_ref = get(pre_set, "r_ref")[0][-1, ...]
diff_ii, diff_ii_ext, cap_axis = get(idx_dct,
"diff_ii", "diff_ii_ext", "cap_axis")
base_shape, base_len, base_ii = get(idx_dct,
"base_shape", "base_len", "base_ii")
eff, caps, refs, thres, ins, cm = get(ext_dct,
"eff", "caps", "refs", "thres", "ins", "cm")
n_conf, n_diff = get(n_dct, "n_conf", "n_diff")
# Compute transition
cs_cf_ii, cf_cs_ii, cs_cs_ii, cf_cf_ii = transition_cx_change(
prev_cs_ii, prev_cf_ii, cs_ii, cf_ii)
# shape(n_cs, ..., n_conf, n_diff)
this_ref_ii = ds_map[:, code, :]
# shape(..., n_conf, n_cs, n_diff)
ds_map_transpose = tuple(range(1, base_len + 1 + 2)) + (0, -1,)
# shape = (n_samples, base_shape, n_conf, n_cs, n_diff) (before idx)
this_ref_ii = np.transpose(this_ref_ii, ds_map_transpose)[0, ...]
ds_offset = data["ds_offset"]
in_ref_ii, in_ins_ii = conf_set.generate_in(1, ds_offset)
data["ds_offset"] = ds_offset + 1
m_this_ref_idx = base_ii + (cs_ii_base, this_ref_ii, diff_ii_ext,)
r_this_ref_idx = np.ravel_multi_index(m_this_ref_idx, refs.shape)
transition_idx = {
"m_cf_cs_idx": (Ellipsis, cf_cs_ii, diff_ii,),
"m_cs_cf_idx": (Ellipsis, cs_cf_ii, diff_ii,),
"m_cs_cs_idx": (Ellipsis, cs_cs_ii, diff_ii,),
"m_cf_cf_idx": (Ellipsis, cf_cf_ii, diff_ii,),
"r_this_r_ref": r_this_ref_idx,
"r_prev_r_ref": prev_r_ref,
"m_this_in_ref": (Ellipsis, cs_ii, in_ref_ii[0, ...], diff_ii,),
"m_this_in_ins": (Ellipsis, in_ins_ii[0, ...], diff_ii,) }
return transition_idx
@staticmethod
def transition_step(indexes, u, set_data, data):
idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n")
cap_axis = get(idx_dct, "cap_axis")[0]
n_diff = get(n_dct, "n_diff")[0]
eff, caps, refs, thres, ins, cm = get(ext_dct,
"eff", "caps", "refs", "thres", "ins", "cm")
m_cf_cs_idx, m_cs_cf_idx, m_cs_cs_idx, m_cf_cf_idx = get(indexes,
"m_cf_cs_idx", "m_cs_cf_idx", "m_cs_cs_idx", "m_cf_cf_idx")
r_this_r_ref, r_prev_r_ref = get(indexes,
"r_this_r_ref", "r_prev_r_ref")
m_this_in_ref, m_this_in_ins = get(indexes,
"m_this_in_ref", "m_this_in_ins")
std = get(data, "std")[0]
s_ref, s_in = get(std, "s_ref", "s_in")
cf_cs = caps[m_cf_cs_idx]
cs_cf = caps[m_cs_cf_idx]
cs_cs = caps[m_cs_cs_idx] # used in du
cf_cf = caps[m_cf_cf_idx]
prev_ref = refs.ravel().take(r_prev_r_ref)
if s_ref > 0:
prev_ref = np.random.normal(prev_ref, s_ref, size=np.shape(prev_ref))
this_ref = refs.ravel().take(r_this_r_ref)
if s_ref > 0:
this_ref = np.random.normal(this_ref, s_ref, size=np.shape(this_ref))
this_in_ref = refs[m_this_in_ref] # used in du
if s_ref > 0:
this_in_ref = np.random.normal(this_in_ref, s_ref, size=np.shape(this_in_ref))
this_in_ins = ins[m_this_in_ins] # used in du
if s_in > 0:
this_in_ins = np.random.normal(this_in_ins, s_in, size=np.shape(this_in_ins))
# used in du
u_gain = (np.sum(cf_cf, axis=-2) + np.sum(cf_cs, axis=-2)
) / (np.sum(cf_cf, axis=-2) + np.sum(cs_cf, axis=-2))
# Sum on next_cs shaped
du_stmt = ("sum(cs_cs*(this_in_ref + this_in_ins)"
" - (cf_cs + cs_cs)*(this_ref), axis={})").format(cap_axis)
du = ne.evaluate(du_stmt)
# Sum on prev_cs shaped
du_stmt = ("sum(cs_cf*prev_ref, axis={})").format(cap_axis)
du += ne.evaluate(du_stmt)
assert np.size(u, 0) == 1, "Only one sample."
CF = np.sum(cf_cf, axis=cap_axis) + np.sum(cs_cf, axis=cap_axis)
# Apply gain and charge loss
u = ne.evaluate("u*u_gain*eff + (1-eff)*cm + du/CF")
# common mode feedback
if n_diff == 2:
u += cm - | np.mean(u, axis=-1, keepdims=True) | numpy.mean |
import numpy as np
from numpy.testing import assert_almost_equal as aae
from pytest import raises
from functools import reduce
from .. import utils
def test_prime_radius_bad_semiaxes():
'major semiaxis lower than minor semiaxis'
latitude = np.ones(100)
raises(AssertionError, utils.prime_vertical_curv, 10, 23, latitude)
def test_prime_radius_negative_semiaxes():
'null semiaxis'
latitude = np.ones(100)
raises(AssertionError, utils.prime_vertical_curv, -10, -23, latitude)
def test_meridian_radius_bad_semiaxes():
'major semiaxis lower than minor semiaxis'
latitude = np.ones(100)
raises(AssertionError, utils.meridian_curv, 10, 23, latitude)
def test_meridian_radius_negative_semiaxes():
'null semiaxis'
latitude = np.ones(100)
raises(AssertionError, utils.meridian_curv, -10, -23, latitude)
def test_relationship_curvatures():
'verify relationship between the curvatures'
latitude = np.linspace(-90, 90, 181)
a = 6378137.0
f = 1.0/298.257223563
b = a*(1-f)
N = utils.prime_vertical_curv(a, b, latitude)
M = utils.meridian_curv(a, b, latitude)
e2 = (a*a - b*b)/(a*a)
sin2lat = np.sin(np.deg2rad(latitude))
sin2lat *= sin2lat
M_relationship = ((1 - e2)/(1 - e2*sin2lat))*N
aae(M, M_relationship, decimal=8)
def test_prime_radius_known_input():
'verify results obtained for known input'
a = 6378137.0
f = 1.0/298.257223563
b = a*(1-f)
e2 = (a*a - b*b)/(a*a)
N_true_0 = a
N_true_90 = a/np.sqrt(1 - e2)
N_calc_0 = utils.prime_vertical_curv(a, b, 0)
N_calc_90 = utils.prime_vertical_curv(a, b, 90)
aae(N_true_0, N_calc_0, decimal=15)
aae(N_true_90, N_calc_90, decimal=15)
def test_prime_radius_known_input_2():
'verify results obtained for known input'
# true geodetic coordinates
lat_true = np.array([0, -15, 22.5, -30, 45, -60, 75, -90])
sinlat = np.array([0, -(np.sqrt(6) - np.sqrt(2))/4,
(np.sqrt(2 - np.sqrt(2)))/2, -0.5,
np.sqrt(2)/2, -np.sqrt(3)/2,
(np.sqrt(6) + np.sqrt(2))/4, -1])
# major semiaxis, flattening, minor semiaxis and squared first eccentricity
a = 6378137.0
f = 1.0/298.257223563
b = a*(1-f)
# squared first eccentricity
e2 = (a**2. - b**2.)/(a**2.)
# true prime vertical radius of curvature
N_true = a/np.sqrt(1 - e2*sinlat*sinlat)
# computed prime vertical radius of curvature
N = utils.prime_vertical_curv(a, b, lat_true)
aae(N_true, N, decimal=15)
def test_meridian_radius_known_input():
'verify results obtained for known input'
a = 6378137.0
f = 1.0/298.257223563
b = a*(1-f)
e2 = (a*a - b*b)/(a*a)
M_true_0 = a*(1 - e2)
M_true_90 = a/np.sqrt(1 - e2)
M_calc_0 = utils.meridian_curv(a, b, 0)
M_calc_90 = utils.meridian_curv(a, b, 90)
aae(M_true_0, M_calc_0, decimal=15)
aae(M_true_90, M_calc_90, decimal=8)
def test_meridian_radius_known_input_2():
'verify results obtained for known input'
# true geodetic coordinates
lat_true = np.array([0, -15, 22.5, -30, 45, -60, 75, -90])
sinlat = np.array([0, -(np.sqrt(6) - | np.sqrt(2) | numpy.sqrt |
import os
import pickle as pkl
import pandas as pd
import xarray as xr
import SimpleITK as sitk
import numpy as np
import glob
class DicomToXArray:
def __init__(self, patient_dir):
self.dir = patient_dir
self.SAs = glob.glob(patient_dir+'/SA*')
self.raw_image_dict = {}
self.image_list = []
self.mask_list = []
self.metadata_dict = {}
self.timestamp_dict = {}
self.weirdness_dict = {}
self.loc_dict = {}
self.slice_dict = {}
self.shape_dict = {}
self.spacing_dict = {}
self.direction_dict = {}
self.bad_SAs = set()
self.good_SAs = set()
#print(self.SAs)
self.get_images_and_metadata()
self.make_xarray()
def get_reader(self, SA):
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(SA)
reader.SetFileNames(dicom_names)
reader.MetaDataDictionaryArrayUpdateOn() # Get DICOM Info
reader.LoadPrivateTagsOn() # Get DICOM Info
return reader
def get_reader_and_image(self, SA):
reader = self.get_reader(SA)
image = reader.Execute()
return reader, image
def load_metadata(self, SA, reader, slices):
# load metadata for first timestamp
# (except the timestamp value, need to get that for all)
vals = []
for k in reader.GetMetaDataKeys(0):
vals.append(reader.GetMetaData(0, k))
self.metadata_dict[SA] = pd.Series(vals, reader.GetMetaDataKeys(0))
locs = [float(a) for a in self.metadata_dict[SA]['0020|0032'].split('\\')]
self.loc_dict[SA] = locs
self.slice_dict[SA] = float(self.metadata_dict[SA]['0020|1041'])
spacing = [float(a) for a in self.metadata_dict[SA]['0028|0030'].split('\\')]
self.spacing_dict[SA] = spacing
self.timestamp_dict[SA] = []
for i in range(slices):
self.timestamp_dict[SA].append(int(reader.GetMetaData(i, '0020|0013')))
def calc_weirdness(self):
for SA in self.SAs:
weirdness = 0
for SA_test in self.SAs:
if SA != SA_test:
weirdness += np.sum(~self.metadata_dict[SA].eq(self.metadata_dict[SA_test]))
self.weirdness_dict[SA] = weirdness
def mark_bads(self):
# mark slices as bad if any exist
for SA in self.SAs:
for SA_test in self.SAs:
if SA_test == SA or (SA_test in self.bad_SAs):
continue
too_close = np.isclose(self.loc_dict[SA][-1], self.loc_dict[SA_test][-1], atol=0.5)
too_close_slice = np.isclose(self.slice_dict[SA],
self.slice_dict[SA_test], atol=0.5)
if too_close or too_close_slice:
if self.weirdness_dict[SA] > self.weirdness_dict[SA_test]:
self.bad_SAs.add(SA)
else:
self.bad_SAs.add(SA_test)
print('bad slices:', self.bad_SAs)
self.good_SAs = self.bad_SAs.symmetric_difference(self.SAs)
def get_ideal_params(self):
x_shapes = [self.shape_dict[SA][0] for SA in self.good_SAs]
y_shapes = [self.shape_dict[SA][1] for SA in self.good_SAs]
x_spacings = [self.spacing_dict[SA][0] for SA in self.good_SAs]
y_spacings = [self.spacing_dict[SA][1] for SA in self.good_SAs]
x_origins = [self.loc_dict[SA][0] for SA in self.good_SAs]
y_origins = [self.loc_dict[SA][1] for SA in self.good_SAs]
directions = [self.direction_dict[SA] for SA in self.good_SAs]
self.ideal_x_shape = int(np.mean(x_shapes))
self.ideal_y_shape = int(np.mean(y_shapes))
self.ideal_x_spacing = np.mean(x_spacings)
self.ideal_y_spacing = np.mean(y_spacings)
self.ideal_x_origin = np.mean(x_origins)
self.ideal_y_origin = np.mean(y_origins)
directions = np.asarray(directions)
self.ideal_directions = directions.mean(axis=0)
def get_images_and_metadata(self):
for i, SA in enumerate(self.SAs):
reader, image = self.get_reader_and_image(SA)
self.shape_dict[SA] = image.GetSize()
self.direction_dict[SA] = image.GetDirection()
self.load_metadata(SA, reader, image.GetSize()[-1])
self.raw_image_dict[SA] = image
self.calc_weirdness()
self.mark_bads()
self.get_ideal_params()
#print(self.good_SAs)
for SA in self.good_SAs:
mask = self.get_mask(SA)
image, mask = self.resample_images(SA, mask)
self.image_list.append(sitk.GetArrayFromImage(image))
self.mask_list.append(sitk.GetArrayFromImage(mask))
def resample_images(self, SA, mask):
image = self.raw_image_dict[SA]
ref_image = sitk.Image((int(self.ideal_x_shape), int(self.ideal_y_shape),
int(image.GetSize()[-1])), 2)
ref_image.SetSpacing((self.ideal_x_spacing, self.ideal_y_spacing, 1))
ref_image.SetOrigin((self.ideal_x_origin, self.ideal_y_origin, image.GetOrigin()[-1]))
ref_image.SetDirection(self.ideal_directions)
ref_image = sitk.Cast(ref_image, image.GetPixelIDValue())
center = sitk.CenteredTransformInitializer(
ref_image, image, sitk.AffineTransform(3),
sitk.CenteredTransformInitializerFilter.GEOMETRY
)
new_image = sitk.Resample(image, ref_image, center,
sitk.sitkNearestNeighbor)
new_mask = sitk.Resample(mask, ref_image, center,
sitk.sitkNearestNeighbor)
return new_image, new_mask
def get_mask(self, SA):
image = self.raw_image_dict[SA]
png_names = SA + '/*.png'
tmp_mask_list = []
for i, fn in enumerate(sorted(glob.glob(png_names))):
tmp_mask = sitk.GetArrayFromImage(sitk.ReadImage(fn))
tmp_mask_list.append(tmp_mask[:, :, 0])
mask_array = np.zeros((len(tmp_mask_list), tmp_mask_list[0].shape[0],
tmp_mask_list[0].shape[1]), dtype=np.float32)
for i, m in enumerate(tmp_mask_list):
mask_array[i, :, :] = m
mask = sitk.GetImageFromArray(mask_array)
mask.SetDirection(image.GetDirection())
mask.SetOrigin(image.GetOrigin())
mask.SetSpacing(image.GetSpacing())
return mask
def make_xarray(self):
xs = np.arange(self.ideal_x_origin,
self.ideal_x_origin + (self.ideal_x_spacing * self.ideal_x_shape),
self.ideal_x_spacing)
ys = np.arange(self.ideal_y_origin,
self.ideal_y_origin + (self.ideal_y_spacing * self.ideal_y_shape),
self.ideal_y_spacing)
zs = [self.loc_dict[SA][-1] for SA in self.good_SAs]
self.ds = xr.Dataset({'image': (['z', 't', 'y', 'x'], self.image_list),
'mask': (['z', 't', 'y', 'x'], self.mask_list)},
coords={'t': self.timestamp_dict[list(self.good_SAs)[0]],
'x': xs,
'y': ys,
'z': zs})
self.ds = self.ds.sortby(['t', 'z'])
def generate_3D_nifti(self, t_slice=0):
"Write out a nifti images of the 3D volume and 3d mask for a particular time slice."
xr_3D_slice = self.ds.isel(t=t_slice)
# sitk must get numpy array by [z,y,x]
nifti_image = sitk.GetImageFromArray(xr_3D_slice.image.transpose('z', 'y', 'x'))
nifti_mask = sitk.GetImageFromArray(xr_3D_slice.mask.transpose('z', 'y', 'x'))
self.set_sitk_metadata(nifti_image)
self.set_sitk_metadata(nifti_mask)
sitk.WriteImage(nifti_image, os.path.join(self.dir, f'CT_tslice_{t_slice}.nii'))
sitk.WriteImage(nifti_mask, os.path.join(self.dir, f'mask_tslice_{t_slice}.nii'))
def set_sitk_metadata(self, image):
image.SetOrigin((self.ds.x.values[0], self.ds.y.values[0], self.ds.z.values[0]))
image.SetDirection(self.ideal_directions)
# Cheating a bit with z-spacing....
image.SetSpacing((self.ideal_x_spacing, self.ideal_y_spacing, | np.mean(self.ds.z.values) | numpy.mean |
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets.samples_generator import make_blobs
def random2():
x, y = make_blobs(n_samples=160, n_features=3, centers=[[3, 3, 3], [0, 0, 0], [1, 1, 1], [2, 2, 2]], cluster_std=[0.2, 0.1, 0.2, 0.2])
return x
def random():
a = [[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1],
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1]]
r = None
for i in range(20):
if r is None:
r = a* | np.random.randint(0, 10, (1, 3)) | numpy.random.randint |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from sklearn.neighbors import NearestNeighbors
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def wraptopi(phi):
return np.mod(phi + np.pi, 2*np.pi) - np.pi
def best_fit_transform(A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m-1,:] *= -1
R = np.dot(Vt.T, U.T)
# translation
t = centroid_B.T - np.dot(R,centroid_A.T)
# homogeneous transformation
T = np.identity(m+1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
def nearest_neighbor(src, dst):
'''
Find the nearest (Euclidean) neighbor in dst for each point in src
Input:
src: Nxm array of points
dst: Nxm array of points
Output:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
'''
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def icp(A, B, init_pose=None, max_iterations=25, tolerance=0.0001):
'''
The Iterative Closest Point method: finds best-fit transform that maps points A on to points B
Input:
A: Nxm numpy array of source mD points
B: Nxm numpy array of destination mD point
init_pose: (m+1)x(m+1) homogeneous transformation
max_iterations: exit algorithm after max_iterations
tolerance: convergence criteria
Output:
T: final homogeneous transformation that maps A on to B
distances: Euclidean distances (errors) of the nearest neighbor
i: number of iterations to converge
'''
# get number of dimensions
m = A.shape[1]
# make points homogeneous, copy them to maintain the originals
src = np.ones((m+1,A.shape[0]))
dst = np.ones((m+1,B.shape[0]))
src[:m,:] = np.copy(A.T)
dst[:m,:] = np.copy(B.T)
# apply the initial pose estimation
if init_pose is not None:
src = np.dot(init_pose, src)
prev_error = 0
for i in range(max_iterations):
# find the nearest neighbors between the current source and destination points
distances, indices = nearest_neighbor(src[:m,:].T, dst[:m,:].T)
# compute the transformation between the current source and nearest destination points
T,_,_ = best_fit_transform(src[:m,:].T, dst[:m,indices].T)
# update the current source
src = np.dot(T, src)
# check error
mean_error = np.max(distances)
#if mean_error < tolerance:#np.abs(prev_error - mean_error) < tolerance:
# break
prev_error = mean_error
# calculate final transformation
_,R,t = best_fit_transform(A, src[:m,:].T)
return R, t, distances, i
class ScanICP(object):
phi = np.linspace(-2*np.pi/3, 2*np.pi/3, 682)
def __init__(self, r):
mask = (r < 1.5) & (r > 0.1)
self.r = r[mask]
self.phi = ScanICP.phi[mask]
self.m = len(self.r)
self.x = self.r*np.cos(self.phi)
self.y = self.r*np.sin(self.phi)
self.P = np.array([self.x, self.y]).T
def icp_match(self, prev_scan):
if prev_scan.m > self.m:
P_prev = prev_scan.P[np.random.randint(prev_scan.m, size=self.m), :]
P_new = self.P
elif prev_scan.m < self.m:
P_new = self.P[np.random.randint(self.m, size=prev_scan.m), :]
P_prev = prev_scan.P
else:
P_prev = prev_scan.P
P_new = self.P
Ricp, Ticp, d, i = icp(P_prev, P_new)
while | np.any(d >= 0.025) | numpy.any |
#!/usr/bin/python
'''
Improved Minima Controlled Recursive Averaging (IMCRA) single channel
noise estmation after
[1] Israel Cohen, Noise Spectrum estimation in Adverse Environments:
Improved Minima Controlled Recursive Averaging. IEEE. Trans. Acoust.
Speech Signal Process. VOL. 11, NO. 5, Sep 2003.
<NAME> Feb2015
'''
import numpy as np
import sys
import os
# Add the path of the toolbox root
# from ns import MMSE_LSA
# For debugging purposes
#import ipdb
#np.seterr(divide='ignore',invalid='raise')
def post_speech_prob(Y_l, q, Gamma, xi):
'''
Posterior speech probability given prior speech absence and the complex
Gaussian model of speech distortion
Input: Y_l [K, 1] STFT frame
Input: q [K, 1] a priori speech presence
Input: Gamma [K, 1] A posteriori SNR
Input: xi [K, 1] A priori SNR
'''
nu = Gamma*xi/(1+xi)
p = np.zeros(Y_l.shape)
p[q < 1] = 1./(1+(q[q < 1]/(1-q[q < 1]))*(1+xi[q < 1])*np.exp(-nu[q < 1]))
return p
def sym_hanning(n):
'''
Same Hanning as the matlab default
'''
# to float
n = float(n)
if np.mod(n, 2) == 0:
# Even length window
half = n/2;
else:
# Odd length window
half = (n+1)/2;
w = .5*(1 - np.cos(2*np.pi*np.arange(1,half+1)/(n+1)))
return np.concatenate((w, w[:-1]));
# Default buffer size
L_MAX = 1000
class imcra_se():
'''
Simple class for enhancement using IMCRA
'''
def __init__(self, nfft, Lambda_D=None, Bmin=3.2, alpha =0.92, xi_min=10**(-25./20), IS=10):
# Decision directed smoothing factor
self.alpha = alpha
# Decision directed a priori SNR floor
self.xi_min = xi_min
self.nfft = int(nfft/2+1)
#
self.store = {}
self.store['Lambda_D'] = np.zeros((self.nfft, L_MAX))
self.store['p'] = np.zeros((self.nfft, L_MAX))
self.store['xi'] = np.zeros((self.nfft, L_MAX))
self.store['MSE'] = np.zeros((self.nfft, L_MAX))
self.l = 0
# IMCRA initial background segment (frames)
# Initialization
self.imcra = imcra(nfft, IS=IS, Bmin=Bmin)
self.G = 1
self.p = np.zeros([self.nfft, 1])
# Initial noise estimate
if Lambda_D is None:
self.Lambda_D = 1e-6*np.ones([self.nfft, 1])
else:
self.Lambda_D = Lambda_D
def update(self, Y):
hat_X = | np.zeros(Y.shape, dtype=complex) | numpy.zeros |
"""
Proto
Contains the following library code useful for prototyping robotic algorithms:
- YAML
- TIME
- PROFILING
- MATHS
- LINEAR ALGEBRA
- GEOMETRY
- LIE
- TRANSFORM
- MATPLOTLIB
- CV
- DATASET
- FILTER
- STATE ESTIMATION
- CALIBRATION
- SIMULATION
- UNITTESTS
"""
import os
import sys
import glob
import math
import time
import copy
import random
import pickle
import json
import signal
from datetime import datetime
from pathlib import Path
from enum import Enum
from dataclasses import dataclass
from collections import namedtuple
from types import FunctionType
from typing import Optional
import cv2
import yaml
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
import pandas
import cProfile
from pstats import Stats
###############################################################################
# YAML
###############################################################################
def load_yaml(yaml_path):
""" Load YAML and return a named tuple """
assert yaml_path is not None
assert yaml_path != ""
# Load yaml_file
yaml_data = None
with open(yaml_path, "r") as stream:
yaml_data = yaml.safe_load(stream)
# Convert dict to named tuple
data = json.dumps(yaml_data) # Python dict to json
data = json.loads(
data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
return data
###############################################################################
# TIME
###############################################################################
def sec2ts(time_s):
""" Convert time in seconds to timestamp """
return int(time_s * 1e9)
def ts2sec(ts):
""" Convert timestamp to seconds """
return ts * 1e-9
###############################################################################
# PROFILING
###############################################################################
def profile_start():
""" Start profile """
prof = cProfile.Profile()
prof.enable()
return prof
def profile_stop(prof, **kwargs):
""" Stop profile """
key = kwargs.get('key', 'cumtime')
N = kwargs.get('N', 10)
stats = Stats(prof)
stats.strip_dirs()
stats.sort_stats(key).print_stats(N)
###############################################################################
# MATHS
###############################################################################
from math import pi
from math import isclose
from math import sqrt
# from math import floor
from math import cos
from math import sin
from math import tan
from math import acos
from math import atan
def rmse(errors):
""" Root Mean Squared Error """
return np.sqrt(np.mean(errors**2))
###############################################################################
# LINEAR ALGEBRA
###############################################################################
from numpy import rad2deg
from numpy import deg2rad
from numpy import sinc
from numpy import zeros
from numpy import ones
from numpy import eye
from numpy import trace
from numpy import diagonal as diag
from numpy import cross
from numpy.linalg import norm
from numpy.linalg import inv
from numpy.linalg import pinv
from numpy.linalg import matrix_rank as rank
from numpy.linalg import eig
from numpy.linalg import svd
from numpy.linalg import cholesky as chol
def normalize(v):
""" Normalize vector v """
n = np.linalg.norm(v)
if n == 0:
return v
return v / n
def full_rank(A):
""" Check if matrix A is full rank """
return rank(A) == A.shape[0]
def skew(vec):
""" Form skew-symmetric matrix from vector `vec` """
assert vec.shape == (3,) or vec.shape == (3, 1)
x, y, z = vec
return np.array([[0.0, -z, y], [z, 0.0, -x], [-y, x, 0.0]])
def skew_inv(A):
""" Form skew symmetric matrix vector """
assert A.shape == (3, 3)
return np.array([A[2, 1], A[0, 2], A[1, 0]])
def fwdsubs(L, b):
"""
Solving a lower triangular system by forward-substitution
Input matrix L is an n by n lower triangular matrix
Input vector b is n by 1
Output vector x is the solution to the linear system
L x = b
"""
assert L.shape[1] == b.shape[0]
n = b.shape[0]
x = zeros((n, 1))
for j in range(n):
if L[j, j] == 0:
raise RuntimeError('Matrix is singular!')
x[j] = b[j] / L[j, j]
b[j:n] = b[j:n] - L[j:n, j] * x[j]
def bwdsubs(U, b):
"""
Solving an upper triangular system by back-substitution
Input matrix U is an n by n upper triangular matrix
Input vector b is n by 1
Output vector x is the solution to the linear system
U x = b
"""
assert U.shape[1] == b.shape[0]
n = b.shape[0]
x = zeros((n, 1))
for j in range(n):
if U[j, j] == 0:
raise RuntimeError('Matrix is singular!')
x[j] = b[j] / U(j, j)
b[0:j] = b[0:j] - U[0:j, j] * x[j]
def solve_svd(A, b):
"""
Solve Ax = b with SVD
"""
# compute svd of A
U, s, Vh = svd(A)
# U diag(s) Vh x = b <=> diag(s) Vh x = U.T b = c
c = np.dot(U.T, b)
# diag(s) Vh x = c <=> Vh x = diag(1/s) c = w (trivial inversion of a diagonal matrix)
w = np.dot(np.diag(1 / s), c)
# Vh x = w <=> x = Vh.H w (where .H stands for hermitian = conjugate transpose)
x = np.dot(Vh.conj().T, w)
return x
def schurs_complement(H, g, m, r, precond=False):
""" Shurs-complement """
assert H.shape[0] == (m + r)
# H = [Hmm, Hmr
# Hrm, Hrr];
Hmm = H[0:m, 0:m]
Hmr = H[0:m, m:]
Hrm = Hmr.T
Hrr = H[m:, m:]
# g = [gmm, grr]
gmm = g[1:]
grr = g[m:]
# Precondition Hmm
if precond:
Hmm = 0.5 * (Hmm + Hmm.T)
# Invert Hmm
assert rank(Hmm) == Hmm.shape[0]
(w, V) = eig(Hmm)
W_inv = diag(1.0 / w)
Hmm_inv = V * W_inv * V.T
# Schurs complement
H_marg = Hrr - Hrm * Hmm_inv * Hmr
g_marg = grr - Hrm * Hmm_inv * gmm
return (H_marg, g_marg)
def is_pd(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = chol(B)
return True
except np.linalg.LinAlgError:
return False
def nearest_pd(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if is_pd(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not is_pd(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def matrix_equal(A, B, tol=1e-8, verbose=False):
""" Compare matrices `A` and `B` """
diff = A - B
if len(diff.shape) == 1:
for i in range(diff.shape[0]):
if abs(diff[i]) > tol:
if verbose:
print("A - B:")
print(diff)
elif len(diff.shape) == 2:
for i in range(diff.shape[0]):
for j in range(diff.shape[1]):
if abs(diff[i, j]) > tol:
if verbose:
print("A - B:")
print(diff)
return False
return True
def plot_compare_matrices(title_A, A, title_B, B):
""" Plot compare matrices """
plt.matshow(A)
plt.colorbar()
plt.title(title_A)
plt.matshow(B)
plt.colorbar()
plt.title(title_B)
diff = A - B
plt.matshow(diff)
plt.colorbar()
plt.title(f"{title_A} - {title_B}")
print(f"max_coeff({title_A}): {np.max(np.max(A))}")
print(f"max_coeff({title_B}): {np.max(np.max(B))}")
print(f"min_coeff({title_A}): {np.min(np.min(A))}")
print(f"min_coeff({title_B}): {np.min(np.min(B))}")
print(f"max_diff: {np.max(np.max(np.abs(diff)))}")
plt.show()
def check_jacobian(jac_name, fdiff, jac, threshold, verbose=False):
""" Check jacobians """
# Check if numerical diff is same as analytical jacobian
if matrix_equal(fdiff, jac, threshold):
if verbose:
print(f"Check [{jac_name}] passed!")
return True
# Failed - print differences
if verbose:
fdiff_minus_jac = fdiff - jac
print(f"Check [{jac_name}] failed!")
print("-" * 60)
print("J_fdiff - J:")
print(np.round(fdiff_minus_jac, 4))
print()
print("J_fdiff:")
print(np.round(fdiff, 4))
print()
print("J:")
print(np.round(jac, 4))
print()
print("-" * 60)
return False
###############################################################################
# GEOMETRY
###############################################################################
def lerp(x0, x1, t):
""" Linear interpolation """
return (1.0 - t) * x0 + t * x1
def lerp2d(p0, p1, t):
""" Linear interpolation 2D """
assert len(p0) == 2
assert len(p1) == 2
assert t <= 1.0 and t >= 0.0
x = lerp(p0[0], p1[0], t)
y = lerp(p0[1], p1[1], t)
return np.array([x, y])
def lerp3d(p0, p1, t):
""" Linear interpolation 3D """
assert len(p0) == 3
assert len(p1) == 3
assert t <= 1.0 and t >= 0.0
x = lerp(p0[0], p1[0], t)
y = lerp(p0[1], p1[1], t)
z = lerp(p0[2], p1[2], t)
return np.array([x, y, z])
def circle(r, theta):
""" Circle """
x = r * cos(theta)
y = r * sin(theta)
return np.array([x, y])
def sphere(rho, theta, phi):
"""
Sphere
Args:
rho (float): Sphere radius
theta (float): longitude [rad]
phi (float): Latitude [rad]
Returns:
Point on sphere
"""
x = rho * sin(theta) * cos(phi)
y = rho * sin(theta) * sin(phi)
z = rho * cos(theta)
return np.array([x, y, z])
def circle_loss(c, x, y):
"""
Calculate the algebraic distance between the data points and the mean
circle centered at c=(xc, yc)
"""
xc, yc = c
# Euclidean dist from center (xc, yc)
Ri = np.sqrt((x - xc)**2 + (y - yc)**2)
return Ri - Ri.mean()
def find_circle(x, y):
"""
Find the circle center and radius given (x, y) data points using least
squares. Returns `(circle_center, circle_radius, residual)`
"""
x_m = np.mean(x)
y_m = np.mean(y)
center_init = x_m, y_m
center, _ = scipy.optimize.leastsq(circle_loss, center_init, args=(x, y))
xc, yc = center
radii = np.sqrt((x - xc)**2 + (y - yc)**2)
radius = radii.mean()
residual = np.sum((radii - radius)**2)
return (center, radius, residual)
def bresenham(p0, p1):
"""
Bresenham's line algorithm is a line drawing algorithm that determines the
points of an n-dimensional raster that should be selected in order to form
a close approximation to a straight line between two points. It is commonly
used to draw line primitives in a bitmap image (e.g. on a computer screen),
as it uses only integer addition, subtraction and bit shifting, all of
which are very cheap operations in standard computer architectures.
Args:
p0 (np.array): Starting point (x, y)
p1 (np.array): End point (x, y)
Returns:
A list of (x, y) intermediate points from p0 to p1.
"""
x0, y0 = p0
x1, y1 = p1
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1.0 if x0 < x1 else -1.0
sy = 1.0 if y0 < y1 else -1.0
err = dx - dy
line = []
while True:
line.append([x0, y0])
if x0 == x1 and y0 == y1:
return line
e2 = 2 * err
if e2 > -dy:
# overshot in the y direction
err = err - dy
x0 = x0 + sx
if e2 < dx:
# overshot in the x direction
err = err + dx
y0 = y0 + sy
###############################################################################
# LIE
###############################################################################
def Exp(phi):
""" Exponential Map """
assert phi.shape == (3,) or phi.shape == (3, 1)
if norm(phi) < 1e-3:
C = eye(3) + skew(phi)
return C
phi_norm = norm(phi)
phi_skew = skew(phi)
phi_skew_sq = phi_skew @ phi_skew
C = eye(3)
C += (sin(phi_norm) / phi_norm) * phi_skew
C += ((1 - cos(phi_norm)) / phi_norm**2) * phi_skew_sq
return C
def Log(C):
""" Logarithmic Map """
assert C.shape == (3, 3)
# phi = acos((trace(C) - 1) / 2);
# u = skew_inv(C - C') / (2 * sin(phi));
# rvec = phi * u;
C00, C01, C02 = C[0, :]
C10, C11, C12 = C[1, :]
C20, C21, C22 = C[2, :]
tr = np.trace(C)
rvec = None
if tr + 1.0 < 1e-10:
if abs(C22 + 1.0) > 1.0e-5:
x = np.array([C02, C12, 1.0 + C22])
rvec = (pi / np.sqrt(2.0 + 2.0 * C22)) @ x
elif abs(C11 + 1.0) > 1.0e-5:
x = np.array([C01, 1.0 + C11, C21])
rvec = (pi / np.sqrt(2.0 + 2.0 * C11)) @ x
else:
x = np.array([1.0 + C00, C10, C20])
rvec = (pi / np.sqrt(2.0 + 2.0 * C00)) @ x
else:
tr_3 = tr - 3.0 # always negative
if tr_3 < -1e-7:
theta = acos((tr - 1.0) / 2.0)
magnitude = theta / (2.0 * sin(theta))
else:
# when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
# use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
# see https://github.com/borglab/gtsam/issues/746 for details
magnitude = 0.5 - tr_3 / 12.0
rvec = magnitude @ np.array([C21 - C12, C02 - C20, C10 - C01])
return rvec
def Jr(theta):
"""
Right jacobian
Forster, Christian, et al. "IMU preintegration on manifold for efficient
visual-inertial maximum-a-posteriori estimation." Georgia Institute of
Technology, 2015.
[Page 2, Equation (8)]
"""
theta_norm = norm(theta)
theta_norm_sq = theta_norm * theta_norm
theta_norm_cube = theta_norm_sq * theta_norm
theta_skew = skew(theta)
theta_skew_sq = theta_skew @ theta_skew
J = eye(3)
J -= ((1 - cos(theta_norm)) / theta_norm_sq) * theta_skew
J += (theta_norm - sin(theta_norm)) / (theta_norm_cube) * theta_skew_sq
return J
def Jr_inv(theta):
""" Inverse right jacobian """
theta_norm = norm(theta)
theta_norm_sq = theta_norm * theta_norm
theta_skew = skew(theta)
theta_skew_sq = theta_skew @ theta_skew
A = 1.0 / theta_norm_sq
B = (1 + cos(theta_norm)) / (2 * theta_norm * sin(theta_norm))
J = eye(3)
J += 0.5 * theta_skew
J += (A - B) * theta_skew_sq
return J
def boxplus(C, alpha):
""" Box plus """
# C_updated = C [+] alpha
C_updated = C * Exp(alpha)
return C_updated
def boxminus(C_a, C_b):
""" Box minus """
# alpha = C_a [-] C_b
alpha = Log(inv(C_b) * C_a)
return alpha
###############################################################################
# TRANSFORM
###############################################################################
def homogeneous(p):
""" Turn point `p` into its homogeneous form """
return np.array([*p, 1.0])
def dehomogeneous(hp):
""" De-homogenize point `hp` into `p` """
return hp[0:3]
def rotx(theta):
""" Form rotation matrix around x axis """
row0 = [1.0, 0.0, 0.0]
row1 = [0.0, cos(theta), -sin(theta)]
row2 = [0.0, sin(theta), cos(theta)]
return np.array([row0, row1, row2])
def roty(theta):
""" Form rotation matrix around y axis """
row0 = [cos(theta), 0.0, sin(theta)]
row1 = [0.0, 1.0, 0.0]
row2 = [-sin(theta), 0.0, cos(theta)]
return np.array([row0, row1, row2])
def rotz(theta):
""" Form rotation matrix around z axis """
row0 = [cos(theta), -sin(theta), 0.0]
row1 = [sin(theta), cos(theta), 0.0]
row2 = [0.0, 0.0, 1.0]
return np.array([row0, row1, row2])
def aa2quat(angle, axis):
"""
Convert angle-axis to quaternion
Source:
<NAME>. "Quaternion kinematics for the error-state Kalman filter." arXiv
preprint arXiv:1711.02508 (2017).
[Page 22, eq (101), "Quaternion and rotation vector"]
"""
ax, ay, az = axis
qw = cos(angle / 2.0)
qx = ax * sin(angle / 2.0)
qy = ay * sin(angle / 2.0)
qz = az * sin(angle / 2.0)
return np.array([qw, qx, qy, qz])
def rvec2rot(rvec):
""" Rotation vector to rotation matrix """
# If small rotation
theta = sqrt(rvec @ rvec) # = norm(rvec), but faster
eps = 1e-8
if theta < eps:
return skew(rvec)
# Convert rvec to rotation matrix
rvec = rvec / theta
x, y, z = rvec
c = cos(theta)
s = sin(theta)
C = 1 - c
xs = x * s
ys = y * s
zs = z * s
xC = x * C
yC = y * C
zC = z * C
xyC = x * yC
yzC = y * zC
zxC = z * xC
row0 = [x * xC + c, xyC - zs, zxC + ys]
row1 = [xyC + zs, y * yC + c, yzC - xs]
row2 = [zxC - ys, yzC + xs, z * zC + c]
return np.array([row0, row1, row2])
def vecs2axisangle(u, v):
""" From 2 vectors form an axis-angle vector """
angle = math.acos(u.T * v)
ax = normalize(np.cross(u, v))
return ax * angle
def euler321(yaw, pitch, roll):
"""
Convert yaw, pitch, roll in radians to a 3x3 rotation matrix.
Source:
Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with
Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J:
Princeton University Press, 1999. Print.
[Page 85-86, "The Aerospace Sequence"]
"""
psi = yaw
theta = pitch
phi = roll
cpsi = cos(psi)
spsi = sin(psi)
ctheta = cos(theta)
stheta = sin(theta)
cphi = cos(phi)
sphi = sin(phi)
C11 = cpsi * ctheta
C21 = spsi * ctheta
C31 = -stheta
C12 = cpsi * stheta * sphi - spsi * cphi
C22 = spsi * stheta * sphi + cpsi * cphi
C32 = ctheta * sphi
C13 = cpsi * stheta * cphi + spsi * sphi
C23 = spsi * stheta * cphi - cpsi * sphi
C33 = ctheta * cphi
return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]])
def euler2quat(yaw, pitch, roll):
"""
Convert yaw, pitch, roll in radians to a quaternion.
Source:
Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with
Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J:
Princeton University Press, 1999. Print.
[Page 166-167, "Euler Angles to Quaternion"]
"""
psi = yaw # Yaw
theta = pitch # Pitch
phi = roll # Roll
c_phi = cos(phi / 2.0)
c_theta = cos(theta / 2.0)
c_psi = cos(psi / 2.0)
s_phi = sin(phi / 2.0)
s_theta = sin(theta / 2.0)
s_psi = sin(psi / 2.0)
qw = c_psi * c_theta * c_phi + s_psi * s_theta * s_phi
qx = c_psi * c_theta * s_phi - s_psi * s_theta * c_phi
qy = c_psi * s_theta * c_phi + s_psi * c_theta * s_phi
qz = s_psi * c_theta * c_phi - c_psi * s_theta * s_phi
mag = sqrt(qw**2 + qx**2 + qy**2 + qz**2)
return np.array([qw / mag, qx / mag, qy / mag, qz / mag])
def quat2euler(q):
"""
Convert quaternion to euler angles (yaw, pitch, roll).
Source:
Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with
Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J:
Princeton University Press, 1999. Print.
[Page 168, "Quaternion to Euler Angles"]
"""
qw, qx, qy, qz = q
m11 = (2 * qw**2) + (2 * qx**2) - 1
m12 = 2 * (qx * qy + qw * qz)
m13 = 2 * qx * qz - 2 * qw * qy
m23 = 2 * qy * qz + 2 * qw * qx
m33 = (2 * qw**2) + (2 * qz**2) - 1
psi = math.atan2(m12, m11)
theta = math.asin(-m13)
phi = math.atan2(m23, m33)
ypr = np.array([psi, theta, phi])
return ypr
def quat2rot(q):
"""
Convert quaternion to 3x3 rotation matrix.
Source:
<NAME>. "A tutorial on se (3) transformation parameterizations
and on-manifold optimization." University of Malaga, Tech. Rep 3 (2010): 6.
[Page 18, Equation (2.20)]
"""
assert len(q) == 4
qw, qx, qy, qz = q
qx2 = qx**2
qy2 = qy**2
qz2 = qz**2
qw2 = qw**2
# Homogeneous form
C11 = qw2 + qx2 - qy2 - qz2
C12 = 2.0 * (qx * qy - qw * qz)
C13 = 2.0 * (qx * qz + qw * qy)
C21 = 2.0 * (qx * qy + qw * qz)
C22 = qw2 - qx2 + qy2 - qz2
C23 = 2.0 * (qy * qz - qw * qx)
C31 = 2.0 * (qx * qz - qw * qy)
C32 = 2.0 * (qy * qz + qw * qx)
C33 = qw2 - qx2 - qy2 + qz2
return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]])
def rot2euler(C):
"""
Convert 3x3 rotation matrix to euler angles (yaw, pitch, roll).
"""
assert C.shape == (3, 3)
q = rot2quat(C)
return quat2euler(q)
def rot2quat(C):
"""
Convert 3x3 rotation matrix to quaternion.
"""
assert C.shape == (3, 3)
m00 = C[0, 0]
m01 = C[0, 1]
m02 = C[0, 2]
m10 = C[1, 0]
m11 = C[1, 1]
m12 = C[1, 2]
m20 = C[2, 0]
m21 = C[2, 1]
m22 = C[2, 2]
tr = m00 + m11 + m22
if tr > 0:
S = sqrt(tr + 1.0) * 2.0
# S=4*qw
qw = 0.25 * S
qx = (m21 - m12) / S
qy = (m02 - m20) / S
qz = (m10 - m01) / S
elif ((m00 > m11) and (m00 > m22)):
S = sqrt(1.0 + m00 - m11 - m22) * 2.0
# S=4*qx
qw = (m21 - m12) / S
qx = 0.25 * S
qy = (m01 + m10) / S
qz = (m02 + m20) / S
elif m11 > m22:
S = sqrt(1.0 + m11 - m00 - m22) * 2.0
# S=4*qy
qw = (m02 - m20) / S
qx = (m01 + m10) / S
qy = 0.25 * S
qz = (m12 + m21) / S
else:
S = sqrt(1.0 + m22 - m00 - m11) * 2.0
# S=4*qz
qw = (m10 - m01) / S
qx = (m02 + m20) / S
qy = (m12 + m21) / S
qz = 0.25 * S
return quat_normalize(np.array([qw, qx, qy, qz]))
# QUATERNION ##################################################################
def quat_norm(q):
""" Returns norm of a quaternion """
qw, qx, qy, qz = q
return sqrt(qw**2 + qx**2 + qy**2 + qz**2)
def quat_normalize(q):
""" Normalize quaternion """
n = quat_norm(q)
qw, qx, qy, qz = q
return np.array([qw / n, qx / n, qy / n, qz / n])
def quat_conj(q):
""" Return conjugate quaternion """
qw, qx, qy, qz = q
q_conj = np.array([qw, -qx, -qy, -qz])
return q_conj
def quat_inv(q):
""" Invert quaternion """
return quat_conj(q)
def quat_left(q):
""" Quaternion left product matrix """
qw, qx, qy, qz = q
row0 = [qw, -qx, -qy, -qz]
row1 = [qx, qw, -qz, qy]
row2 = [qy, qz, qw, -qx]
row3 = [qz, -qy, qx, qw]
return np.array([row0, row1, row2, row3])
def quat_right(q):
""" Quaternion right product matrix """
qw, qx, qy, qz = q
row0 = [qw, -qx, -qy, -qz]
row1 = [qx, qw, qz, -qy]
row2 = [qy, -qz, qw, qx]
row3 = [qz, qy, -qx, qw]
return np.array([row0, row1, row2, row3])
def quat_lmul(p, q):
""" Quaternion left multiply """
assert len(p) == 4
assert len(q) == 4
lprod = quat_left(p)
return lprod @ q
def quat_rmul(p, q):
""" Quaternion right multiply """
assert len(p) == 4
assert len(q) == 4
rprod = quat_right(q)
return rprod @ p
def quat_mul(p, q):
""" Quaternion multiply p * q """
return quat_lmul(p, q)
def quat_omega(w):
""" Quaternion omega matrix """
return np.block([[-1.0 * skew(w), w], [w.T, 0.0]])
def quat_delta(dalpha):
""" Form quaternion from small angle rotation vector dalpha """
half_norm = 0.5 * norm(dalpha)
scalar = cos(half_norm)
vector = sinc(half_norm) * 0.5 * dalpha
dqw = scalar
dqx, dqy, dqz = vector
dq = np.array([dqw, dqx, dqy, dqz])
return dq
def quat_integrate(q_k, w, dt):
"""
<NAME>. "Quaternion kinematics for the error-state Kalman filter." arXiv
preprint arXiv:1711.02508 (2017).
[Section 4.6.1 Zeroth-order integration, p.47]
"""
w_norm = norm(w)
q_scalar = 0.0
q_vec = np.array([0.0, 0.0, 0.0])
if w_norm > 1e-5:
q_scalar = cos(w_norm * dt * 0.5)
q_vec = w / w_norm * sin(w_norm * dt * 0.5)
else:
q_scalar = 1.0
q_vec = [0.0, 0.0, 0.0]
q_kp1 = quat_mul(q_k, np.array([q_scalar, q_vec]))
return q_kp1
def quat_slerp(q_i, q_j, t):
""" Quaternion Slerp `q_i` and `q_j` with parameter `t` """
assert len(q_i) == 4
assert len(q_j) == 4
assert t >= 0.0 and t <= 1.0
# Compute the cosine of the angle between the two vectors.
dot_result = q_i @ q_j
# If the dot product is negative, slerp won't take
# the shorter path. Note that q_j and -q_j are equivalent when
# the negation is applied to all four components. Fix by
# reversing one quaternion.
if dot_result < 0.0:
q_j = -q_j
dot_result = -dot_result
DOT_THRESHOLD = 0.9995
if dot_result > DOT_THRESHOLD:
# If the inputs are too close for comfort, linearly interpolate
# and normalize the result.
return q_i + t * (q_j - q_i)
# Since dot is in range [0, DOT_THRESHOLD], acos is safe
theta_0 = acos(dot_result) # theta_0 = angle between input vectors
theta = theta_0 * t # theta = angle between q_i and result
sin_theta = sin(theta) # compute this value only once
sin_theta_0 = sin(theta_0) # compute this value only once
# == sin(theta_0 - theta) / sin(theta_0)
s0 = cos(theta) - dot_result * sin_theta / sin_theta_0
s1 = sin_theta / sin_theta_0
return (s0 * q_i) + (s1 * q_j)
# TF ##########################################################################
def tf(rot, trans):
"""
Form 4x4 homogeneous transformation matrix from rotation `rot` and
translation `trans`. Where the rotation component `rot` can be a rotation
matrix or a quaternion.
"""
C = None
if rot.shape == (4,) or rot.shape == (4, 1):
C = quat2rot(rot)
elif rot.shape == (3, 3):
C = rot
else:
raise RuntimeError("Invalid rotation!")
T = np.eye(4, 4)
T[0:3, 0:3] = C
T[0:3, 3] = trans
return T
def tf_rot(T):
""" Return rotation matrix from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return T[0:3, 0:3]
def tf_quat(T):
""" Return quaternion from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return rot2quat(tf_rot(T))
def tf_trans(T):
""" Return translation vector from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return T[0:3, 3]
def tf_inv(T):
""" Invert 4x4 homogeneous transform """
assert T.shape == (4, 4)
return np.linalg.inv(T)
def tf_point(T, p):
""" Transform 3d point """
assert T.shape == (4, 4)
assert p.shape == (3,) or p.shape == (3, 1)
hpoint = np.array([p[0], p[1], p[2], 1.0])
return (T @ hpoint)[0:3]
def tf_hpoint(T, hp):
""" Transform 3d point """
assert T.shape == (4, 4)
assert hp.shape == (4,) or hp.shape == (4, 1)
return (T @ hp)[0:3]
def tf_decompose(T):
""" Decompose into rotation matrix and translation vector"""
assert T.shape == (4, 4)
C = tf_rot(T)
r = tf_trans(T)
return (C, r)
def tf_lerp(pose_i, pose_j, t):
""" Interpolate pose `pose_i` and `pose_j` with parameter `t` """
assert pose_i.shape == (4, 4)
assert pose_j.shape == (4, 4)
assert t >= 0.0 and t <= 1.0
# Decompose start pose
r_i = tf_trans(pose_i)
q_i = tf_quat(pose_i)
# Decompose end pose
r_j = tf_trans(pose_j)
q_j = tf_quat(pose_j)
# Interpolate translation and rotation
r_lerp = lerp(r_i, r_j, t)
q_lerp = quat_slerp(q_i, q_j, t)
return tf(q_lerp, r_lerp)
def tf_perturb(T, i, step_size):
""" Perturb transformation matrix """
assert T.shape == (4, 4)
assert i >= 0 and i <= 5
# Setup
C = tf_rot(T)
r = tf_trans(T)
if i >= 0 and i <= 2:
# Perturb translation
r[i] += step_size
elif i >= 3 and i <= 5:
# Perturb rotation
rvec = np.array([0.0, 0.0, 0.0])
rvec[i - 3] = step_size
q = rot2quat(C)
dq = quat_delta(rvec)
q_diff = quat_mul(q, dq)
q_diff = quat_normalize(q_diff)
C = quat2rot(q_diff)
return tf(C, r)
def tf_update(T, dx):
""" Update transformation matrix """
assert T.shape == (4, 4)
q = tf_quat(T)
r = tf_trans(T)
dr = dx[0:3]
dalpha = dx[3:6]
dq = quat_delta(dalpha)
return tf(quat_mul(q, dq), r + dr)
###############################################################################
# MATPLOTLIB
###############################################################################
import matplotlib.pylab as plt
def plot_set_axes_equal(ax):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def plot_tf(ax, T, **kwargs):
"""
Plot 4x4 Homogeneous Transform
Args:
ax (matplotlib.axes.Axes): Plot axes object
T (np.array): 4x4 homogeneous transform (i.e. Pose in the world frame)
Keyword args:
size (float): Size of the coordinate-axes
linewidth (float): Thickness of the coordinate-axes
name (str): Frame name
name_offset (np.array or list): Position offset for displaying the frame's name
fontsize (float): Frame font size
fontweight (float): Frame font weight
"""
assert T.shape == (4, 4)
size = kwargs.get('size', 1)
# linewidth = kwargs.get('linewidth', 3)
name = kwargs.get('name', None)
name_offset = kwargs.get('name_offset', [0, 0, -0.01])
fontsize = kwargs.get('fontsize', 10)
fontweight = kwargs.get('fontweight', 'bold')
colors = kwargs.get('colors', ['r-', 'g-', 'b-'])
origin = tf_trans(T)
lx = tf_point(T, np.array([size, 0.0, 0.0]))
ly = tf_point(T, np.array([0.0, size, 0.0]))
lz = tf_point(T, np.array([0.0, 0.0, size]))
# Draw x-axis
px = [origin[0], lx[0]]
py = [origin[1], lx[1]]
pz = [origin[2], lx[2]]
ax.plot(px, py, pz, colors[0])
# Draw y-axis
px = [origin[0], ly[0]]
py = [origin[1], ly[1]]
pz = [origin[2], ly[2]]
ax.plot(px, py, pz, colors[1])
# Draw z-axis
px = [origin[0], lz[0]]
py = [origin[1], lz[1]]
pz = [origin[2], lz[2]]
ax.plot(px, py, pz, colors[2])
# Draw label
if name is not None:
x = origin[0] + name_offset[0]
y = origin[1] + name_offset[1]
z = origin[2] + name_offset[2]
ax.text(x, y, z, name, fontsize=fontsize, fontweight=fontweight)
def plot_xyz(title, data, key_time, key_x, key_y, key_z, ylabel):
"""
Plot XYZ plot
Args:
title (str): Plot title
data (Dict[str, pandas.DataFrame]): Plot data
key_time (str): Dictionary key for timestamps
key_x (str): Dictionary key x-axis
key_y (str): Dictionary key y-axis
key_z (str): Dictionary key z-axis
ylabel (str): Y-axis label
"""
axis = ['x', 'y', 'z']
colors = ["r", "g", "b"]
keys = [key_x, key_y, key_z]
line_styles = ["--", "-", "x"]
# Time
time_data = {}
for label, series_data in data.items():
ts0 = series_data[key_time][0]
time_data[label] = ts2sec(series_data[key_time].to_numpy() - ts0)
# Plot subplots
plt.figure()
for i in range(3):
plt.subplot(3, 1, i + 1)
for (label, series_data), line in zip(data.items(), line_styles):
line_style = colors[i] + line
x_data = time_data[label]
y_data = series_data[keys[i]].to_numpy()
plt.plot(x_data, y_data, line_style, label=label)
plt.xlabel("Time [s]")
plt.ylabel(ylabel)
plt.legend(loc=0)
plt.title(f"{title} in {axis[i]}-axis")
plt.subplots_adjust(hspace=0.65)
###############################################################################
# CV
###############################################################################
# UTILS #######################################################################
def lookat(cam_pos, target_pos, **kwargs):
""" Form look at matrix """
up_axis = kwargs.get('up_axis', np.array([0.0, -1.0, 0.0]))
assert len(cam_pos) == 3
assert len(target_pos) == 3
assert len(up_axis) == 3
# Note: If we were using OpenGL the cam_dir would be the opposite direction,
# since in OpenGL the camera forward is -z. In robotics however our camera is
# +z forward.
cam_z = normalize(target_pos - cam_pos)
cam_x = normalize(cross(up_axis, cam_z))
cam_y = cross(cam_z, cam_x)
T_WC = zeros((4, 4))
T_WC[0:3, 0] = cam_x.T
T_WC[0:3, 1] = cam_y.T
T_WC[0:3, 2] = cam_z.T
T_WC[0:3, 3] = cam_pos
T_WC[3, 3] = 1.0
return T_WC
# GEOMETRY ####################################################################
def linear_triangulation(P_i, P_j, z_i, z_j):
"""
Linear triangulation
This function is used to triangulate a single 3D point observed by two
camera frames (be it in time with the same camera, or two different cameras
with known extrinsics).
Args:
P_i (np.array): First camera 3x4 projection matrix
P_j (np.array): Second camera 3x4 projection matrix
z_i (np.array): First keypoint measurement
z_j (np.array): Second keypoint measurement
Returns:
p_Ci (np.array): 3D point w.r.t first camera
"""
# First three rows of P_i and P_j
P1T_i = P_i[0, :]
P2T_i = P_i[1, :]
P3T_i = P_i[2, :]
P1T_j = P_j[0, :]
P2T_j = P_j[1, :]
P3T_j = P_j[2, :]
# Image point from the first and second frame
x_i, y_i = z_i
x_j, y_j = z_j
# Form the A matrix of AX = 0
A = zeros((4, 4))
A[0, :] = x_i * P3T_i - P1T_i
A[1, :] = y_i * P3T_i - P2T_i
A[2, :] = x_j * P3T_j - P1T_j
A[3, :] = y_j * P3T_j - P2T_j
# Use SVD to solve AX = 0
(_, _, Vh) = svd(A.T @ A)
hp = Vh.T[:, -1] # Get the best result from SVD (last column of V)
hp = hp / hp[-1] # Normalize the homogeneous 3D point
p = hp[0:3] # Return only the first three components (x, y, z)
return p
# PINHOLE #####################################################################
def focal_length(image_width, fov_deg):
"""
Estimated focal length based on `image_width` and field of fiew `fov_deg`
in degrees.
"""
return (image_width / 2.0) / tan(deg2rad(fov_deg / 2.0))
def pinhole_K(params):
""" Form camera matrix K """
fx, fy, cx, cy = params
return np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
def pinhole_P(params, T_WC):
""" Form 3x4 projection matrix P """
K = pinhole_K(params)
T_CW = inv(T_WC)
C = tf_rot(T_CW)
r = tf_trans(T_CW)
P = zeros((3, 4))
P[0:3, 0:3] = C
P[0:3, 3] = r
P = K @ P
return P
def pinhole_project(proj_params, p_C):
""" Project 3D point onto image plane using pinhole camera model """
assert len(proj_params) == 4
assert len(p_C) == 3
# Project
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Scale and center
fx, fy, cx, cy = proj_params
z = np.array([fx * x[0] + cx, fy * x[1] + cy])
return z
def pinhole_params_jacobian(x):
""" Form pinhole parameter jacobian """
return np.array([[x[0], 0.0, 1.0, 0.0], [0.0, x[1], 0.0, 1.0]])
def pinhole_point_jacobian(proj_params):
""" Form pinhole point jacobian """
fx, fy, _, _ = proj_params
return np.array([[fx, 0.0], [0.0, fy]])
# RADTAN4 #####################################################################
def radtan4_distort(dist_params, p):
""" Distort point with Radial-Tangential distortion """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, p1, p2 = dist_params
# Point
x, y = p
# Apply radial distortion
x2 = x * x
y2 = y * y
r2 = x2 + y2
r4 = r2 * r2
radial_factor = 1.0 + (k1 * r2) + (k2 * r4)
x_dash = x * radial_factor
y_dash = y * radial_factor
# Apply tangential distortion
xy = x * y
x_ddash = x_dash + (2.0 * p1 * xy + p2 * (r2 + 2.0 * x2))
y_ddash = y_dash + (p1 * (r2 + 2.0 * y2) + 2.0 * p2 * xy)
return np.array([x_ddash, y_ddash])
def radtan4_point_jacobian(dist_params, p):
""" Radial-tangential point jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, p1, p2 = dist_params
# Point
x, y = p
# Apply radial distortion
x2 = x * x
y2 = y * y
r2 = x2 + y2
r4 = r2 * r2
# Point Jacobian
# Let u = [x; y] normalized point
# Let u' be the distorted u
# The jacobian of u' w.r.t. u (or du'/du) is:
J_point = zeros((2, 2))
J_point[0, 0] = k1 * r2 + k2 * r4 + 2.0 * p1 * y + 6.0 * p2 * x
J_point[0, 0] += x * (2.0 * k1 * x + 4.0 * k2 * x * r2) + 1.0
J_point[1, 0] = 2.0 * p1 * x + 2.0 * p2 * y
J_point[1, 0] += y * (2.0 * k1 * x + 4.0 * k2 * x * r2)
J_point[0, 1] = J_point[1, 0]
J_point[1, 1] = k1 * r2 + k2 * r4 + 6.0 * p1 * y + 2.0 * p2 * x
J_point[1, 1] += y * (2.0 * k1 * y + 4.0 * k2 * y * r2) + 1.0
# Above is generated using sympy
return J_point
def radtan4_undistort(dist_params, p0):
""" Un-distort point with Radial-Tangential distortion """
assert len(dist_params) == 4
assert len(p0) == 2
# Undistort
p = p0
max_iter = 5
for _ in range(max_iter):
# Error
p_distorted = radtan4_distort(dist_params, p)
J = radtan4_point_jacobian(dist_params, p)
err = (p0 - p_distorted)
# Update
# dp = inv(J' * J) * J' * err
dp = pinv(J) @ err
p = p + dp
# Check threshold
if (err.T @ err) < 1e-15:
break
return p
def radtan4_params_jacobian(dist_params, p):
""" Radial-Tangential distortion parameter jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Point
x, y = p
# Setup
x2 = x * x
y2 = y * y
xy = x * y
r2 = x2 + y2
r4 = r2 * r2
# Params Jacobian
J_params = zeros((2, 4))
J_params[0, 0] = x * r2
J_params[0, 1] = x * r4
J_params[0, 2] = 2.0 * xy
J_params[0, 3] = 3.0 * x2 + y2
J_params[1, 0] = y * r2
J_params[1, 1] = y * r4
J_params[1, 2] = x2 + 3.0 * y2
J_params[1, 3] = 2.0 * xy
return J_params
# EQUI4 #######################################################################
def equi4_distort(dist_params, p):
""" Distort point with Equi-distant distortion """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, k3, k4 = dist_params
# Distort
x, y = p
r = sqrt(x * x + y * y)
th = math.atan(r)
th2 = th * th
th4 = th2 * th2
th6 = th4 * th2
th8 = th4 * th4
thd = th * (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8)
s = thd / r
x_dash = s * x
y_dash = s * y
return np.array([x_dash, y_dash])
def equi4_undistort(dist_params, p):
""" Undistort point using Equi-distant distortion """
thd = sqrt(p(0) * p(0) + p[0] * p[0])
# Distortion parameters
k1, k2, k3, k4 = dist_params
th = thd # Initial guess
for _ in range(20):
th2 = th * th
th4 = th2 * th2
th6 = th4 * th2
th8 = th4 * th4
th = thd / (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8)
scaling = tan(th) / thd
return np.array([p[0] * scaling, p[1] * scaling])
def equi4_params_jacobian(dist_params, p):
""" Equi-distant distortion params jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Jacobian
x, y = p
r = sqrt(x**2 + y**2)
th = atan(r)
J_params = zeros((2, 4))
J_params[0, 0] = x * th**3 / r
J_params[0, 1] = x * th**5 / r
J_params[0, 2] = x * th**7 / r
J_params[0, 3] = x * th**9 / r
J_params[1, 0] = y * th**3 / r
J_params[1, 1] = y * th**5 / r
J_params[1, 2] = y * th**7 / r
J_params[1, 3] = y * th**9 / r
return J_params
def equi4_point_jacobian(dist_params, p):
""" Equi-distant distortion point jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, k3, k4 = dist_params
# Jacobian
x, y = p
r = sqrt(x**2 + y**2)
th = math.atan(r)
th2 = th**2
th4 = th**4
th6 = th**6
th8 = th**8
thd = th * (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8)
th_r = 1.0 / (r * r + 1.0)
thd_th = 1.0 + 3.0 * k1 * th2
thd_th += 5.0 * k2 * th4
thd_th += 7.0 * k3 * th6
thd_th += 9.0 * k4 * th8
s = thd / r
s_r = thd_th * th_r / r - thd / (r * r)
r_x = 1.0 / r * x
r_y = 1.0 / r * y
J_point = zeros((2, 2))
J_point[0, 0] = s + x * s_r * r_x
J_point[0, 1] = x * s_r * r_y
J_point[1, 0] = y * s_r * r_x
J_point[1, 1] = s + y * s_r * r_y
return J_point
# PINHOLE RADTAN4 #############################################################
def pinhole_radtan4_project(proj_params, dist_params, p_C):
""" Pinhole + Radial-Tangential project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Distort
x_dist = radtan4_distort(dist_params, x)
# Scale and center to image plane
fx, fy, cx, cy = proj_params
z = np.array([fx * x_dist[0] + cx, fy * x_dist[1] + cy])
return z
def pinhole_radtan4_backproject(proj_params, dist_params, z):
""" Pinhole + Radial-Tangential back-project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Convert image pixel coordinates to normalized retinal coordintes
fx, fy, cx, cy = proj_params
x = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy, 1.0])
# Undistort
x = radtan4_undistort(dist_params, x)
# 3D ray
p = np.array([x[0], x[1], 1.0])
return p
def pinhole_radtan4_undistort(proj_params, dist_params, z):
""" Pinhole + Radial-Tangential undistort """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Back project and undistort
fx, fy, cx, cy = proj_params
p = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy])
p_undist = radtan4_undistort(dist_params, p)
# Project undistorted point to image plane
return np.array([p_undist[0] * fx + cx, p_undist[1] * fy + cy])
def pinhole_radtan4_project_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Radial-Tangential project jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project 3D point
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Jacobian
J_proj = zeros((2, 3))
J_proj[0, :] = [1 / p_C[2], 0, -p_C[0] / p_C[2]**2]
J_proj[1, :] = [0, 1 / p_C[2], -p_C[1] / p_C[2]**2]
J_dist_point = radtan4_point_jacobian(dist_params, x)
J_proj_point = pinhole_point_jacobian(proj_params)
return J_proj_point @ J_dist_point @ J_proj
def pinhole_radtan4_params_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Radial-Tangential params jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Project 3D point
x_dist = radtan4_distort(dist_params, x) # Distort point
J_proj_point = pinhole_point_jacobian(proj_params)
J_dist_params = radtan4_params_jacobian(dist_params, x)
J = zeros((2, 8))
J[0:2, 0:4] = pinhole_params_jacobian(x_dist)
J[0:2, 4:8] = J_proj_point @ J_dist_params
return J
# PINHOLE EQUI4 ###############################################################
def pinhole_equi4_project(proj_params, dist_params, p_C):
""" Pinhole + Equi-distant project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Distort
x_dist = equi4_distort(dist_params, x)
# Scale and center to image plane
fx, fy, cx, cy = proj_params
z = np.array([fx * x_dist[0] + cx, fy * x_dist[1] + cy])
return z
def pinhole_equi4_backproject(proj_params, dist_params, z):
""" Pinhole + Equi-distant back-project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Convert image pixel coordinates to normalized retinal coordintes
fx, fy, cx, cy = proj_params
x = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy, 1.0])
# Undistort
x = equi4_undistort(dist_params, x)
# 3D ray
p = np.array([x[0], x[1], 1.0])
return p
def pinhole_equi4_undistort(proj_params, dist_params, z):
""" Pinhole + Equi-distant undistort """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Back project and undistort
fx, fy, cx, cy = proj_params
p = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy])
p_undist = equi4_undistort(dist_params, p)
# Project undistorted point to image plane
return np.array([p_undist[0] * fx + cx, p_undist[1] * fy + cy])
def pinhole_equi4_project_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Equi-distant project jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project 3D point
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Jacobian
J_proj = zeros((2, 3))
J_proj[0, :] = [1 / p_C[2], 0, -p_C[0] / p_C[2]**2]
J_proj[1, :] = [0, 1 / p_C[2], -p_C[1] / p_C[2]**2]
J_dist_point = equi4_point_jacobian(dist_params, x)
J_proj_point = pinhole_point_jacobian(proj_params)
return J_proj_point @ J_dist_point @ J_proj
def pinhole_equi4_params_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Equi-distant params jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Project 3D point
x_dist = equi4_distort(dist_params, x) # Distort point
J_proj_point = pinhole_point_jacobian(proj_params)
J_dist_params = equi4_params_jacobian(dist_params, x)
J = zeros((2, 8))
J[0:2, 0:4] = pinhole_params_jacobian(x_dist)
J[0:2, 4:8] = J_proj_point @ J_dist_params
return J
# CAMERA GEOMETRY #############################################################
@dataclass
class CameraGeometry:
""" Camera Geometry """
cam_idx: int
resolution: tuple
proj_model: str
dist_model: str
proj_params_size: int
dist_params_size: int
project_fn: FunctionType
backproject_fn: FunctionType
undistort_fn: FunctionType
J_proj_fn: FunctionType
J_params_fn: FunctionType
def get_proj_params_size(self):
""" Return projection parameter size """
return self.proj_params_size
def get_dist_params_size(self):
""" Return distortion parameter size """
return self.dist_params_size
def get_params_size(self):
""" Return parameter size """
return self.get_proj_params_size() + self.get_dist_params_size()
def proj_params(self, params):
""" Extract projection parameters """
return params[:self.proj_params_size]
def dist_params(self, params):
""" Extract distortion parameters """
return params[-self.dist_params_size:]
def project(self, params, p_C):
""" Project point `p_C` with camera parameters `params` """
# Project
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
z = self.project_fn(proj_params, dist_params, p_C)
# Make sure point is infront of camera
if p_C[2] < 0.0:
return False, z
# Make sure image point is within image bounds
x_ok = z[0] >= 0.0 and z[0] <= self.resolution[0]
y_ok = z[1] >= 0.0 and z[1] <= self.resolution[1]
if x_ok and y_ok:
return True, z
return False, z
def backproject(self, params, z):
""" Back-project image point `z` with camera parameters `params` """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.project_fn(proj_params, dist_params, z)
def undistort(self, params, z):
""" Undistort image point `z` with camera parameters `params` """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.undistort_fn(proj_params, dist_params, z)
def J_proj(self, params, p_C):
""" Form Jacobian w.r.t. p_C """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.J_proj_fn(proj_params, dist_params, p_C)
def J_params(self, params, p_C):
""" Form Jacobian w.r.t. camera parameters """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.J_params_fn(proj_params, dist_params, p_C)
def pinhole_radtan4_setup(cam_idx, cam_res):
""" Setup Pinhole + Radtan4 camera geometry """
return CameraGeometry(
cam_idx, cam_res, "pinhole", "radtan4", 4, 4, pinhole_radtan4_project,
pinhole_radtan4_backproject, pinhole_radtan4_undistort,
pinhole_radtan4_project_jacobian, pinhole_radtan4_params_jacobian)
def pinhole_equi4_setup(cam_idx, cam_res):
""" Setup Pinhole + Equi camera geometry """
return CameraGeometry(cam_idx, cam_res, "pinhole", "equi4", 4, 4,
pinhole_equi4_project, pinhole_equi4_backproject,
pinhole_equi4_undistort, pinhole_equi4_project_jacobian,
pinhole_equi4_params_jacobian)
def camera_geometry_setup(cam_idx, cam_res, proj_model, dist_model):
""" Setup camera geometry """
if proj_model == "pinhole" and dist_model == "radtan4":
return pinhole_radtan4_setup(cam_idx, cam_res)
elif proj_model == "pinhole" and dist_model == "equi4":
return pinhole_equi4_setup(cam_idx, cam_res)
else:
raise RuntimeError(f"Unrecognized [{proj_model}]-[{dist_model}] combo!")
################################################################################
# DATASET
################################################################################
# TIMELINE######################################################################
@dataclass
class CameraEvent:
""" Camera Event """
ts: int
cam_idx: int
image: np.array
@dataclass
class ImuEvent:
""" IMU Event """
ts: int
imu_idx: int
acc: np.array
gyr: np.array
@dataclass
class Timeline:
""" Timeline """
def __init__(self):
self.data = {}
def num_timestamps(self):
""" Return number of timestamps """
return len(self.data)
def num_events(self):
""" Return number of events """
nb_events = 0
for _, events in self.data:
nb_events += len(events)
return nb_events
def get_timestamps(self):
""" Get timestamps """
return sorted(list(self.data.keys()))
def add_event(self, ts, event):
""" Add event """
if ts not in self.data:
self.data[ts] = [event]
else:
self.data[ts].append(event)
def get_events(self, ts):
""" Get events """
return self.data[ts]
# EUROC ########################################################################
class EurocSensor:
""" Euroc Sensor """
def __init__(self, yaml_path):
# Load yaml file
config = load_yaml(yaml_path)
# General sensor definitions.
self.sensor_type = config.sensor_type
self.comment = config.comment
# Sensor extrinsics wrt. the body-frame.
self.T_BS = np.array(config.T_BS.data).reshape((4, 4))
# Camera specific definitions.
if config.sensor_type == "camera":
self.rate_hz = config.rate_hz
self.resolution = config.resolution
self.camera_model = config.camera_model
self.intrinsics = config.intrinsics
self.distortion_model = config.distortion_model
self.distortion_coefficients = config.distortion_coefficients
elif config.sensor_type == "imu":
self.rate_hz = config.rate_hz
self.gyro_noise_density = config.gyroscope_noise_density
self.gyro_random_walk = config.gyroscope_random_walk
self.accel_noise_density = config.accelerometer_noise_density
self.accel_random_walk = config.accelerometer_random_walk
class EurocImuData:
""" Euroc Imu data """
def __init__(self, data_dir):
self.imu_dir = Path(data_dir, 'mav0', 'imu0')
self.config = EurocSensor(Path(self.imu_dir, 'sensor.yaml'))
self.timestamps = []
self.acc = {}
self.gyr = {}
# Load data
df = pandas.read_csv(Path(self.imu_dir, 'data.csv'))
df = df.rename(columns=lambda x: x.strip())
# -- Timestamp
timestamps = df['#timestamp [ns]'].to_numpy()
# -- Accelerometer measurement
acc_x = df['a_RS_S_x [m s^-2]'].to_numpy()
acc_y = df['a_RS_S_y [m s^-2]'].to_numpy()
acc_z = df['a_RS_S_z [m s^-2]'].to_numpy()
# -- Gyroscope measurement
gyr_x = df['w_RS_S_x [rad s^-1]'].to_numpy()
gyr_y = df['w_RS_S_y [rad s^-1]'].to_numpy()
gyr_z = df['w_RS_S_z [rad s^-1]'].to_numpy()
# -- Load
for i, ts in enumerate(timestamps):
self.timestamps.append(ts)
self.acc[ts] = np.array([acc_x[i], acc_y[i], acc_z[i]])
self.gyr[ts] = np.array([gyr_x[i], gyr_y[i], gyr_z[i]])
class EurocCameraData:
""" Euroc Camera data """
def __init__(self, data_dir, cam_idx):
self.cam_idx = cam_idx
self.cam_dir = Path(data_dir, 'mav0', 'cam' + str(cam_idx))
self.config = EurocSensor(Path(self.cam_dir, 'sensor.yaml'))
self.timestamps = []
self.image_paths = {}
# Load image paths
cam_data_dir = str(Path(self.cam_dir, 'data', '*.png'))
for img_file in sorted(glob.glob(cam_data_dir)):
ts_str, _ = os.path.basename(img_file).split('.')
ts = int(ts_str)
self.timestamps.append(ts)
self.image_paths[ts] = img_file
def get_image_path_list(self):
""" Return list of image paths """
return [img_path for _, img_path in self.image_paths]
class EurocGroundTruth:
""" Euroc ground truth """
def __init__(self, data_dir):
self.timestamps = []
self.T_WB = {}
self.v_WB = {}
self.w_WB = {}
self.a_WB = {}
# Load data
dir_name = 'state_groundtruth_estimate0'
data_csv = Path(data_dir, 'mav0', dir_name, 'data.csv')
df = pandas.read_csv(data_csv)
df = df.rename(columns=lambda x: x.strip())
# -- Timestamp
timestamps = df['#timestamp'].to_numpy()
# -- Body pose in world frame
rx_list = df['p_RS_R_x [m]'].to_numpy()
ry_list = df['p_RS_R_y [m]'].to_numpy()
rz_list = df['p_RS_R_z [m]'].to_numpy()
qw_list = df['q_RS_w []'].to_numpy()
qx_list = df['q_RS_x []'].to_numpy()
qy_list = df['q_RS_y []'].to_numpy()
qz_list = df['q_RS_z []'].to_numpy()
# -- Body velocity in world frame
vx_list = df['v_RS_R_x [m s^-1]'].to_numpy()
vy_list = df['v_RS_R_y [m s^-1]'].to_numpy()
vz_list = df['v_RS_R_z [m s^-1]'].to_numpy()
# -- Add to class
for i, ts in enumerate(timestamps):
r_WB = np.array([rx_list[i], ry_list[i], rz_list[i]])
q_WB = | np.array([qw_list[i], qx_list[i], qy_list[i], qz_list[i]]) | numpy.array |
import numpy as np
import os
import cohere.utilities.utils as ut
import math
from typing import Union
# tensorflow will use cpu
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow for trained model
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.activations import sigmoid, tanh
class Mymodel:
__model = None
__amp_layer_model = None
__ph_layer_model = None
@staticmethod
def get_model(model_file):
""" Static access method. """
if Mymodel.__model == None:
Mymodel(model_file)
return Mymodel.__amp_layer_model, Mymodel.__ph_layer_model
def __init__(self, model_file):
""" Virtually private constructor. """
if Mymodel.__model != None:
raise Exception("This class is a singleton!")
else:
# load trained network
Mymodel.__model = load_model(
model_file,
custom_objects={
'tf': tf,
'loss_comb2_scale': loss_comb2_scale,
'sigmoid': sigmoid,
'tanh': tanh,
'math': math,
'combine_complex': combine_complex,
'get_mask': get_mask,
'ff_propagation': ff_propagation
})
model = Mymodel.__model
# get the outputs from amplitude and phase layers
Mymodel.__amp_layer_model = Model(inputs=model.input,
outputs=model.get_layer('amp').output)
Mymodel.__ph_layer_model = Model(inputs=model.input,
outputs=model.get_layer('phi').output)
def threshold_by_edge(fp: np.ndarray) -> np.ndarray:
# threshold by left edge value
mask = np.ones_like(fp, dtype=bool)
mask[tuple([slice(1, None)] * fp.ndim)] = 0
zero = 1e-6
cut = np.max(fp[mask])
binary = np.zeros_like(fp)
binary[(np.abs(fp) > zero) & (fp > cut)] = 1
return binary
def select_central_object(fp: np.ndarray) -> np.ndarray:
import scipy.ndimage as ndimage
zero = 1e-6
binary = np.abs(fp)
binary[binary > zero] = 1
binary[binary <= zero] = 0
# cluster by connectivity
struct = ndimage.morphology.generate_binary_structure(fp.ndim,
1).astype("uint8")
label, nlabel = ndimage.label(binary, structure=struct)
# select largest cluster
select = np.argmax(np.bincount(np.ravel(label))[1:]) + 1
binary[label != select] = 0
fp[binary == 0] = 0
return fp
def get_central_object_extent(fp: np.ndarray) -> list:
fp_cut = threshold_by_edge(np.abs(fp))
need = select_central_object(fp_cut)
# get extend of cluster
extent = [np.max(s) + 1 - np.min(s) for s in np.nonzero(need)]
return extent
def get_oversample_ratio(fp: np.ndarray) -> np.ndarray:
""" get oversample ratio
fp = diffraction pattern
"""
# autocorrelation
acp = np.fft.fftshift(np.fft.ifftn(np.abs(fp)**2.))
aacp = np.abs(acp)
# get extent
blob = get_central_object_extent(aacp)
# correct for underestimation due to thresholding
correction = [0.025, 0.025, 0.0729][:fp.ndim]
extent = [
min(m, s + int(round(f * aacp.shape[i], 1)))
for i, (s, f, m) in enumerate(zip(blob, correction, aacp.shape))
]
# oversample ratio
oversample = [
2. * s / (e + (1 - s % 2)) for s, e in zip(aacp.shape, extent)
]
return np.round(oversample, 3)
def Resize(IN, dim):
ft = np.fft.fftshift(np.fft.fftn(IN)) / np.prod(IN.shape)
pad_value = np.array(dim) // 2 - np.array(ft.shape) // 2
pad = [[pad_value[0], pad_value[0]], [pad_value[1], pad_value[1]],
[pad_value[2], pad_value[2]]]
ft_resize = ut.adjust_dimensions(ft, pad)
output = np.fft.ifftn(np.fft.ifftshift(ft_resize)) * np.prod(dim)
return output
def match_oversample_diff(
diff: np.ndarray,
fr: Union[list, np.ndarray, None] = None,
to: Union[list, np.ndarray, None] = None,
shape: Union[list, np.ndarray, None] = [64, 64, 64],
):
""" resize diff to match oversample ratios
diff = diffraction pattern
fr = from oversample ratio
to = to oversample ratio
shape = output shape
"""
# adjustment needed to match oversample ratio
change = [np.round(f / t).astype('int32') for f, t in zip(fr, to)]
change = [np.max([1, c]) for c in change]
diff = ut.binning(diff, change)
# crop diff to match output shape
shape_arr = np.array(shape)
diff_shape_arr = np.array(diff.shape)
pad_value1 = shape_arr // 2 - diff_shape_arr // 2
pad_value2 = shape_arr - diff_shape_arr -pad_value1
pad = [[pad_value1[0], pad_value2[0]], [pad_value1[1], pad_value2[1]],
[pad_value1[2], pad_value2[2]]]
output = ut.adjust_dimensions(diff, pad)
return output, diff.shape
def shift_com(amp, phi):
from scipy.ndimage.measurements import center_of_mass as com
from scipy.ndimage.interpolation import shift
h, w, t = 64, 64, 64
coms = com(amp)
deltas = (int(round(h / 2 - coms[0])), int(round(w / 2 - coms[1])),
int(round(t / 2 - coms[2])))
amp_shift = shift(amp, shift=deltas, mode='wrap')
phi_shift = shift(phi, shift=deltas, mode='wrap')
return amp_shift, phi_shift
def post_process(amp, phi, th=0.1, uw=0):
if uw == 1:
# phi = np.unwrap(np.unwrap(np.unwrap(phi,0),1),2)
phi = unwrap_phase(phi)
mask = np.where(amp > th, 1, 0)
amp_out = mask * amp
phi_out = mask * phi
mean_phi = | np.sum(phi_out) | numpy.sum |
import numpy as np
from tqdm import tqdm
from ..utils.simulation import rollout
from torch.utils.data import Dataset, DataLoader
import torch
class Camelid_Dataset:
def __init__(self):
pass
# draw n_sample (x,y) pairs drawn from n_func functions
# returns (x,y) where each has size [n_func, n_samples, x/y_dim]
def sample(self, n_funcs, n_samples):
raise NotImplementedError
class TorchDatasetWrapper(Dataset):
"""
This provides a torch wrapper on the existing camelid dataset classes.
It is used for dataloaders in training of torch models.
We are doing our own batching (since this is how camelid datasets were implemented)
Therefore we can't grab data according to indices and it is sampled randomly
Thus, epochs do not make sense.
"""
def __init__(self, camelid_dataset, traj_len=50):
self.dataset = camelid_dataset
self.batch_size = 1
self.traj_len = traj_len
def __len__(self):
# returns the number of trajectories
if hasattr(self.dataset, 'N'):
return self.dataset.N
else:
return int(1e5)
def __getitem__(self,idx):
# will return a trajectory (corresponding to a particular task)
# return a batch
xu,xp = self.dataset.sample(self.batch_size, self.traj_len)
# split x/u
o_dim = xp.shape[-1]
x = xu[0,:,:o_dim]
u = xu[0,:,o_dim:]
xp = xp[0,...]
# map to tensors
x_torch = torch.from_numpy(x)
u_torch = torch.from_numpy(u)
xp_torch = torch.from_numpy(xp)
sample = {
'x': x_torch,
'u': u_torch,
'xp': xp_torch
}
# print(xp)
return sample
class PresampledTrajectoryDataset(Camelid_Dataset):
def __init__(self, trajs, controls):
self.trajs = trajs
self.controls = controls
self.o_dim = trajs[0].shape[-1]
self.u_dim = controls[0].shape[-1]
self.N = len(trajs)
def sample(self, n_funcs, n_samples):
o_dim = self.o_dim
u_dim = self.u_dim
x_dim = o_dim + u_dim
y_dim = o_dim
x = np.zeros((n_funcs, n_samples, x_dim))
y = np.zeros((n_funcs, n_samples, y_dim))
for i in range(n_funcs):
j = np.random.randint(self.N)
T = self.controls[j].shape[0]
if n_samples > T:
raise ValueError('You are requesting more samples than are in this trajectory.')
start_ind = 0
if T > n_samples:
start_ind = np.random.randint(T-n_samples)
inds_to_keep = np.arange(start_ind, start_ind+n_samples)
x[i,:,:self.o_dim] = self.trajs[j][inds_to_keep]
x[i,:,self.o_dim:] = self.controls[j][inds_to_keep]
y[i,:,:] = self.trajs[j][inds_to_keep+1]
return x,y
class PresampledDataset(Camelid_Dataset):
def __init__(self, X=None, Y=None, whiten=False, shuffle=False, filename=None, x_check=None, y_check=None):
if (X is not None) and (Y is not None):
# TODO: implement load from file functionality
self.X = X
self.Y = Y
elif filename is not None:
data = np.load(filename)
self.X = data["X"]
self.Y = data["Y"]
else:
raise Exception
self.shuffle = shuffle
self.x_check = x_check
self.y_check = y_check
self.x_dim = self.X.shape[-1]
self.y_dim = self.Y.shape[-1]
self.N = self.X.shape[0]
self.T = self.X.shape[1]
self.input_scaling = np.ones([1,1,self.X.shape[-1]])
self.output_scaling = np.ones([1,1,self.Y.shape[-1]])
deltas = (self.Y.T - self.X[:,:,:self.y_dim].T).reshape([self.y_dim,-1]).T
#to filter episodes with 3 sigma events
self.means = np.mean(deltas, axis=0)
self.stds = np.std(deltas, axis=0)
if whiten:
self.input_scaling = np.std(self.X, axis=(0,1), keepdims=True)
self.output_scaling = np.std(self.Y, axis=(0,1), keepdims=True)
def sample(self, n_funcs, n_samples):
x = np.zeros((n_funcs, n_samples, self.x_dim))
y = np.zeros((n_funcs, n_samples, self.y_dim))
for i in range(n_funcs):
j = np.random.randint(self.N)
if n_samples > self.T:
raise ValueError('You are requesting %d samples but there are only %d in the dataset.'%(n_samples, self.T))
inds_to_keep = np.random.choice(self.T, n_samples)
x[i,:,:] = self.X[j,inds_to_keep,:] / self.input_scaling
y[i,:,:] = self.Y[j,inds_to_keep,:] / self.output_scaling
if self.shuffle:
inds_to_keep = np.random.choice(self.T, n_samples)
x[i,:,:] = self.X[j,inds_to_keep,:] / self.input_scaling
y[i,:,:] = self.Y[j,inds_to_keep,:] / self.output_scaling
else:
start_idx = 0 if (self.T == n_samples) else np.random.randint(self.T - n_samples)
x[i,:,:] = self.X[j,start_idx:start_idx+n_samples,:]
y[i,:,:] = self.Y[j,start_idx:start_idx+n_samples,:]
return x,y
def append(self,X,Y):
self.X = np.concatenate([self.X, X], axis=0)
self.Y = np.concatenate([self.Y, Y], axis=0)
self.N = self.X.shape[0]
def prune(self):
"""
removes functions with values Y - X[:,:,:ydim] exceeding k*sigma to remove crazy data
"""
X = self.X
Y = self.Y
good_eps = np.ones_like(self.X[:,0,0])
if self.x_check is not None:
good_eps = np.logical_and(good_eps, self.x_check(X))
if self.y_check is not None:
good_eps = np.logical_and(good_eps, self.y_check(X))
self.X = X[good_eps,:,:]
self.Y = Y[good_eps,:,:]
self.N = self.X.shape[0]
def save(self, filename):
| np.savez(filename, X=self.X, Y=self.Y) | numpy.savez |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout, HEAD_SIZE_DEFAULT)
from mne.channels.layout import (_box_size, _find_topomap_coords,
generate_2d_layout)
from mne.utils import run_tests_if_main
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
from mne.utils import _TempDir
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
"""Make test info."""
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout():
"""Test IO with .lout files."""
tempdir = _TempDir()
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout) # test repr
def test_io_layout_lay():
"""Test IO with .lay files."""
tempdir = _TempDir()
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_find_topomap_coords():
"""Test mapping of coordinates in 3D space to 2D."""
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
# Remove extra digitization point, so EEG digitization points match up
# with the EEG channels
del info['dig'][85]
# Use channel locations
kwargs = dict(ignore_overlap=False, to_sphere=True,
sphere=HEAD_SIZE_DEFAULT)
l0 = _find_topomap_coords(info, picks, **kwargs)
# Remove electrode position information, use digitization points from now
# on.
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1, l0, atol=1e-3)
for z_pt in ((HEAD_SIZE_DEFAULT, 0., 0.),
(0., HEAD_SIZE_DEFAULT, 0.)):
info['dig'][-1]['r'] = z_pt
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-6)
# Test plotting mag topomap without channel locations: it should fail
mag_picks = pick_types(info, meg='mag')
with pytest.raises(ValueError, match='Cannot determine location'):
_find_topomap_coords(info, mag_picks, **kwargs)
# Test function with too many EEG digitization points: it should fail
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Test function with too little EEG digitization points: it should fail
info['dig'] = info['dig'][:-2]
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Electrode positions must be unique
info['dig'].append(info['dig'][-1])
with pytest.raises(ValueError, match='overlapping positions'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without EEG digitization points: it should fail
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
with pytest.raises(RuntimeError, match='Did not find any digitization'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without any digitization points, it should fail
info['dig'] = None
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
info['dig'] = []
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
def test_make_eeg_layout():
"""Test creation of EEG layout."""
tempdir = _TempDir()
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
# Test input validation
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout():
"""Test creation of grid layout."""
tempdir = _TempDir()
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
# Test creating grid layout with specified number of columns
layout = make_grid_layout(_get_test_info(), n_col=2)
# Vertical positions should be equal
assert layout.pos[0, 1] == layout.pos[1, 1]
# Horizontal positions should be unequal
assert layout.pos[0, 0] != layout.pos[1, 0]
# Box sizes should be equal
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
"""Test finding layout."""
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
# mock new convention
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
# test new vector-view
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
# no common layout, 'meg' option not supported
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 13', 'MEG 14', 'MEG 15', 'MEG 16']
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
# fallback for missing IDs
raw_kit.info['kit_system_id'] = 35
lout = find_layout(raw_kit.info)
assert lout.kind == 'custom'
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
# Test plotting
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
"""Test calculation of box sizes."""
# No points. Box size should be 1,1.
assert_allclose(_box_size([]), (1.0, 1.0))
# Create one point. Box size should be 1,1.
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
# Create two points. Box size should be 0.5,1.
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
# Create three points. Box size should be (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
# Create a grid of points. Box size should be (0.1, 0.1).
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
# Create a random set of points. This should never break the function.
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
# Test specifying an existing width.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
# Test specifying an existing width that has influence on the calculated
# height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
# Test specifying an existing height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
# Test specifying an existing height that has influence on the calculated
# width.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
# Test specifying both width and height. The function should simply return
# these.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
# Test specifying a width that will cause unfixable horizontal overlap and
# essentially breaks the function (height will be 0).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
# Test adding some padding.
# Create three points. Box size should be a little less than (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
"""Test creation of a layout from 2d points."""
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
# Generate fake data
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
# Correct points ordering / minmaxing
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'): # divide by zero
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
| assert_allclose(lt.pos[0, [2, 3]], [w, h]) | numpy.testing.assert_allclose |
import math
import os.path
import os
import itertools
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.signal import savgol_filter
from sklearn.metrics import roc_curve, auc
import settings
OUTLIER_LIMIT = 60
FLOAT_ERROR = 0.000001
def movingaverage(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.vstack((
np.convolve(interval[:,0], window, 'same'),
np.convolve(interval[:,1], window, 'same'),
)).T
def get_list_string(l):
return ','.join([str(e) for e in l])
def compute_auc(y1, y2):
fpr, tpr, thresholds = roc_curve(y1, y2)
roc_auc = auc(fpr, tpr)
return roc_auc
def view_rides(*rides):
colors = ['b', 'r', 'g', 'm', 'y', 'c', 'k']
for i, ride in enumerate(rides):
plt.plot([p[0] for p in ride], [p[1] for p in ride], '%s-' % colors[i % len(colors)])
plt.show()
def euclidian_distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def euclidian_distances(ride):
return [euclidian_distance(ride[i], ride[i+1]) for i in xrange(len(ride) - 1)]
def view_ride_speed(ride):
sm_ride = savgol_filter(np.array(ride).T, 7, 2).T
distances = euclidian_distances(ride)
#smoothed = [np.mean(distances[max(0, i-1):min(i+2, len(distances))]) for i in range(len(distances))]
#smoothed = np.array(smoothed)
smoothed = euclidian_distances(sm_ride)
acc = np.hstack((smoothed, [0])) - np.hstack(([0], smoothed))
acc = acc[1:-1]
plt.plot(range(len(distances)), distances, 'b-')
plt.plot(range(len(smoothed)), smoothed, 'r-')
plt.plot(range(len(acc)), acc, 'g-')
plt.plot(range(len(distances)), [0] * len(distances), 'm-')
plt.show()
def get_ride_histograms(distances, normalized=False, version=1):
numbers1 = np.array(distances)
numbers2 = (np.hstack((numbers1, [0])) - np.hstack(([0], numbers1)))[1:-1]
if version == 1:
hists = [
np.histogram(numbers1, bins=range(0, 50, 4))[0],
np.histogram(numbers1[numbers1 < 20], bins=range(0, 20, 2))[0],
np.histogram(numbers2[-(numbers2>-4) -(numbers2<3)], bins=[-4 + i * 0.7 for i in range(10)])[0],
]
else:
hists = [
np.histogram(numbers1, bins=range(0, 40, 4))[0],
np.histogram(numbers1, bins=range(0, 20, 1))[0],
np.histogram(numbers2, bins=[-100] + [-4 + i * 0.6 for i in range(14)] + [100])[0],
]
if normalized:
hists = [
hists[0] / (len(numbers1) + 1.0),
hists[1] / (len(numbers1) + 1.0),
hists[2] / (len(numbers2) + 1.0),
]
return list(itertools.chain(*hists))
def get_g_forces(ride, distances=None):
if distances is None:
distances = np.array(euclidian_distances(ride))
angles = [get_angle(ride[i-2], ride[i-1], ride[i]) for i in range(2, len(ride))]
g_forces = [(180-angles[i-1]) * (distances[i-1] + distances[i]) for i in range(1, len(distances))]
return np.array(g_forces)
def get_g_forces_v2(ride):
distances = np.array(euclidian_distances(ride))
lateral_g_forces = get_g_forces(ride, distances=distances)
acc = np.hstack((distances, [0])) - np.hstack(([0], distances))
acc = acc[1:-1]
distances = distances[1:]
forward_g_forces = distances * acc
LAT_TH = [1, 5, 10, 30, 70, 110, 150]
FW_TH = [-30, -15, -7, -3, -1, 1, 3, 7, 15, 30]
DIST_TH = [1, 3, 8, 13, 20, 35]
# print np.percentile(forward_g_forces, [1, 5, 25, 75, 95, 99])
# print ''
lateral_g_forces = np.digitize(lateral_g_forces, LAT_TH)
forward_g_forces = np.digitize(forward_g_forces, FW_TH)
distances = np.digitize(distances, DIST_TH)
g_forces = np.vstack((distances, lateral_g_forces, forward_g_forces)).transpose()
g_force_string = ' '.join(['%s_%s_%s' % (m[0], m[1], m[2]) for m in g_forces])
return g_force_string
def get_g_forces_v3(ride, step=5):
ride2 = np.array(ride)
ride1 = np.roll(ride2, step, axis=0)
ride0 = np.roll(ride1, step, axis=0)
ride0 = ride0[step*2:]
ride1 = ride1[step*2:]
ride2 = ride2[step*2:]
a1 = ride1 - ride0
a2 = ride2 - ride1
distances1 = np.linalg.norm(a1, axis=1)
distances2 = np.linalg.norm(a2, axis=1)
distances = distances1 + distances2
np.seterr(all='ignore')
angles = np.arccos((a1 * a2).sum(1) / (distances1 * distances2))
np.seterr(all='print')
angles[distances1 < 0.5] = 0
angles[distances2 < 0.5] = 0
angles = angles * 180 / math.pi
lateral_g_forces = angles * distances
acc = distances2 - distances1
forward_g_forces = acc * distances
LAT_TH = [2, 33, 88, 164, 524, 1275, 1693, 2615, 3996]
FW_TH = [-3952, -1963, -1081, -576, 0, 652, 1034, 1718, 3279]
DIST_TH = [1, 47, 108, 146, 200, 250]
lateral_g_forces = np.digitize(lateral_g_forces, LAT_TH)
forward_g_forces = np.digitize(forward_g_forces, FW_TH)
distances = np.digitize(distances, DIST_TH)
g_forces = np.vstack((distances, lateral_g_forces, forward_g_forces)).transpose()
g_force_string = ' '.join(['%s_%s' % (m[0], m[1]) for m in g_forces])
return g_force_string
def get_g_forces_v4(ride, version=1):
ride = np.array(ride)
ride = savgol_filter(ride.T, 7, 3).T
# http://stackoverflow.com/questions/28269379/curve-curvature-in-numpy
dx_dt = np.gradient(ride[:, 0])
dy_dt = np.gradient(ride[:, 1])
velocity = np.vstack((dx_dt, dy_dt)).T
ds_dt = np.linalg.norm(velocity, axis=1)
np.seterr(all='ignore')
tangent = np.array([1/ds_dt] * 2).T
np.seterr(all='print')
tangent = np.nan_to_num(tangent)
tangent = tangent * velocity
tangent_x = tangent[:, 0]
tangent_y = tangent[:, 1]
deriv_tangent_x = np.gradient(tangent_x)
deriv_tangent_y = np.gradient(tangent_y)
dT_dt = np.vstack((deriv_tangent_x, deriv_tangent_y)).T
length_dT_dt = np.linalg.norm(dT_dt, axis=1)
np.seterr(all='ignore')
normal = np.array([1/length_dT_dt] * 2).T
np.seterr(all='print')
normal = np.nan_to_num(normal)
normal = normal * dT_dt
d2s_dt2 = np.gradient(ds_dt)
d2x_dt2 = np.gradient(dx_dt)
d2y_dt2 = np.gradient(dy_dt)
np.seterr(all='ignore')
curvature = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt)**1.5
np.seterr(all='print')
curvature = np.nan_to_num(curvature)
t_comp = d2s_dt2
n_comp = curvature * ds_dt * ds_dt
# t_component = np.array([t_comp] * 2).T
# n_component = np.array([n_comp] * 2).T
# acceleration = t_component * tangent + n_component * normal
N_TH = [0.001, 0.01, 0.1, 0.5, 1]
T_TH = [-1.5, -1, -0.5, -0.1, 0.1, 0.5, 1]
D_TH = [1, 3, 8, 15, 30]
C_TH = [0.001, 0.1, 0.8]
if version == 1:
n_comp = np.digitize(n_comp, N_TH)
t_comp = np.digitize(t_comp, T_TH)
acc_vectors = np.vstack((n_comp, t_comp)).transpose()
else:
d_comp = np.digitize(ds_dt, D_TH)
c_comp = np.digitize(curvature, C_TH)
acc_vectors = np.vstack((d_comp, c_comp)).transpose()
acc_string = ' '.join(['%s_%s' % (m[0], m[1]) for m in acc_vectors])
return acc_string
def get_distance_acc_words(ride, step=5):
ride = np.array(ride)
ride1 = savgol_filter(ride.T, 7, 2).T
ride0 = np.roll(ride1, step, axis=0)[step:]
ride1 = ride1[step:]
distance_vectors = ride1 - ride0
acc_vectors = np.vstack((distance_vectors, [0,0])) - \
np.vstack(([0,0], distance_vectors))
acc_vectors = acc_vectors[1:-1]
distance_vectors = distance_vectors[:-1]
distances = np.linalg.norm(distance_vectors, axis=1)
acc_projection = (distance_vectors[:,0] * acc_vectors[:,0] + \
distance_vectors[:,1] * acc_vectors[:,1]) / np.maximum(distances, 0.01)
acc = np.linalg.norm(acc_vectors, axis=1)
acc_rejection = np.sqrt(np.maximum(acc**2 - acc_projection**2,0))
DIST_TH = np.array([0.5, 3, 8, 12, 22, 30]) * step
PROJ_TH = [-8, -4, -1, -0.1, 0.1, 1, 3, 5]
REJ_TH = [0.1, 0.8, 3, 6, 10]
features = np.vstack((
np.digitize(distances, DIST_TH),
np.digitize(acc_projection, PROJ_TH),
np.digitize(acc_rejection, REJ_TH)
)).T
features = ' '.join(['%s_%s_%s' % (f[0], f[1], f[2]) for f in features])
return features
def get_acc4acc_words(ride, step=5, version=1):
ride = np.array(ride)
ride1 = savgol_filter(ride.T, 7, 2).T
ride0 = np.roll(ride1, step, axis=0)[step:]
ride1 = ride1[step:]
distance_vectors = ride1 - ride0
acc_vectors = distance_vectors[1:] - distance_vectors[:-1]
acc4acc_vectors = acc_vectors[1:] - acc_vectors[:-1]
acc_vectors = acc_vectors[:-1]
acc = np.linalg.norm(acc_vectors, axis=1)
acc4acc = np.linalg.norm(acc4acc_vectors, axis=1)
ACC_TH = [0.1, 0.3, 0.7, 1.1, 1.6, 2.3, 3.5, 5, 6.5, 9]
ACC4ACC_TH = [0.1, 0.3, 0.7, 1.2, 2, 2.8]
if version == 1:
features = np.vstack((
np.digitize(acc, ACC_TH),
np.digitize(acc4acc, ACC4ACC_TH),
)).T
features = ' '.join(['%s_%s' % (f[0], f[1]) for f in features])
else:
features = ' '.join(['a%s' % f for f in np.digitize(acc, ACC_TH)])
return features
def build_features_acc(ride, version=1):
IS_MOVING_TH = 0.7 if version == 1 else 0.3
distances = euclidian_distances(ride)
if version == 1:
smoothed = [np.mean(distances[max(0, i-1):min(i+2, len(distances))] or [0]) for i in range(len(distances))]
smoothed = np.array(smoothed)
else:
smoothed = np.array(distances)
acc = np.hstack((smoothed, [0])) - np.hstack(([0], smoothed))
acc = acc[1:-1]
windows = []
current_window = []
current_window_type = 0
for i in range(len(acc)):
current_window.append(acc[i])
current_window = current_window[-3:]
t = np.mean(current_window)
if current_window_type == 0:
if np.abs(t) > IS_MOVING_TH:
current_window_type = np.sign(t)
else:
if np.sign(current_window[-1]) != current_window_type:
current_window_type = 0
windows.append(current_window_type)
windows[0] = windows[1]
for i in range(1, len(windows) - 1):
if windows[i] != windows[i-1] and windows[i] != windows[i+1]:
windows[i] = windows[i+1]
features = []
# features to compute:
# - percent accelerating, contant, decelerating
# features.extend(np.histogram(windows, [-1, 0, 1, 2])[0] / (1.0 * len(windows))) # eventual normalizat
# - average acceleration, deceleration
mean_acc = np.mean([acc[i] for i in range(len(acc)) if windows[i] == 1] or [0])
mean_dec = np.mean([acc[i] for i in range(len(acc)) if windows[i] == -1] or [0])
features.extend([mean_acc, mean_dec])
# - average acceleration, deceleration relative to speed
SPEED_TH = list(range(0, 50, 3)) + [10000]
for sp in range(len(SPEED_TH)-1):
mean_acc = np.mean([acc[i] for i in range(len(acc)) if windows[i] == 1 and SPEED_TH[sp] <= smoothed[i] < SPEED_TH[sp+1]] or [0])
mean_dec = np.mean([acc[i] for i in range(len(acc)) if windows[i] == -1 and SPEED_TH[sp] <= smoothed[i] < SPEED_TH[sp+1]] or [0])
features.extend([mean_acc, mean_dec])
# - average number of acc/dec changes in a trip
changes = 0
current_type = 1
for w in windows:
if w == -current_type:
changes += 1
current_type = w
features.append(changes) # eventual normalizat
features.append(1.0 * changes / len(windows))
# - the maximum, minimum, and average values of speed multiplied by acceleration
# - their standard deviations
speed_times_acc = np.hstack((acc, [0])) * smoothed
if version == 1:
sta_hist = np.histogram(speed_times_acc, bins=range(-400, 400, 40))[0]
else:
sta_hist = np.histogram(speed_times_acc, bins=range(-500, 500, 20))[0]
if version == 1:
features.extend(sta_hist * 1.0 / len(speed_times_acc))
else:
features.extend(sta_hist)
if version != 1:
features.extend(np.percentile(speed_times_acc, [1, 3, 5, 7, 25, 50, 75, 93, 95, 97, 99]))
features.append(np.std(speed_times_acc))
# max acceleration per window
max_windows = []
current_max = 0
is_accelerating = 0
for i in range(len(acc)):
if windows[i] == 1:
is_accelerating = 1
current_max = max(current_max, acc[i])
else:
if current_max:
max_windows.append(current_max)
current_max = 0
is_accelerating = 0
features.append(np.mean(max_windows or [0]))
acc_for_acc = (np.hstack((acc, [0])) - np.hstack(([0], acc)))[1:-1]
acc_for_acc_hist = np.histogram(acc_for_acc, bins=[-3 + i * 0.3 for i in range(21)])[0]
if version == 1:
features.extend(acc_for_acc_hist * 1.0 / len(acc_for_acc))
else:
features.extend(acc_for_acc_hist)
# #standing start
# standing_starts = []
# for i in range(1, len(windows) - 4):
# if not (windows[i] == 1 and windows[i-1] == 0):
# continue
# if distances[i-1] > 1.5:
# continue
# d = sum(distances[i:i+5])
# standing_starts.append(d)
# features.append(np.max(standing_starts or [0]))
csw_lengths = []
current_window_lenght = 0
tbs_lengths = []
current_stop_length = 0
for i in range(1, len(windows)):
# time at constant speed
if windows[i] == 0 and smoothed[i] > 4:
current_window_lenght += 1
else:
if current_window_lenght:
csw_lengths.append(current_window_lenght)
current_window_lenght = 0
# time between stops
if windows[i] == 0 and smoothed[i] < 3:
current_stop_length += 1
else:
if current_stop_length:
tbs_lengths.append(current_stop_length)
current_stop_length = 0
if version == 1:
features.append(np.mean(csw_lengths or [0]))
features.append(np.std(csw_lengths or [0]))
features.append(np.mean(tbs_lengths or [0]))
if version == 1:
csw_length_hist = np.histogram(csw_lengths, bins=[0, 5, 15, 35, 70, 200, 10000])[0]
features.extend(csw_length_hist * 1.0 / (len(csw_lengths) + 1))
return features
def build_features(ride, normalized=False, version=1):
if version == 3:
ride = savgol_filter(np.array(ride).T, 7, 2).T
distances = np.array(euclidian_distances(ride))
#ride_length = distances.sum()
#ride_speed = ride_length / len(ride)
distances_no_stops = distances[distances > 1.5]
#stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
ride_length_no_stops = distances_no_stops.sum()
ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
features = [
#ride_length,
#ride_speed,
ride_length_no_stops,
#stops_ratio,
euclidian_distance(ride[0], ride[-1]),
]
if version == 1:
features.append(ride_speed_no_stops)
features.extend(get_ride_histograms(distances, normalized=normalized, version=version))
g_forces = get_g_forces(ride, distances=distances)
if version == 1:
h_g_forces = np.histogram(g_forces, bins=range(0, 600, 50))[0]
else:
h_g_forces = np.histogram(g_forces, bins=range(0, 600, 10))[0]
features.extend(h_g_forces)
return np.array(features)
def build_features_big(ride_orig):
ride = savgol_filter(np.array(ride_orig).T, 7, 2).T
distances = np.linalg.norm(
(np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1],
axis=1
)
acc = (np.hstack((distances, [0])) - np.hstack(([0], distances)))[1:-1]
ride_length = distances.sum()
ride_speed = ride_length / len(ride)
distances_no_stops = distances[distances > 1.5]
stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
ride_length_no_stops = distances_no_stops.sum()
ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
features = [
ride_length,
ride_speed,
ride_length_no_stops,
stops_ratio,
euclidian_distance(ride[0], ride[-1]),
]
move_vectors = (np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1]
m1 = move_vectors[1:]
m2 = move_vectors[:-1]
distances1 = np.linalg.norm(m1, axis=1)
distances2 = np.linalg.norm(m2, axis=1)
dot_product = (m1 * m2).sum(1)
denominator = np.maximum(distances1 * distances2, 0.01)
angles = np.arccos(np.maximum(np.minimum(dot_product / denominator, 1.0), -1.0))
angles = angles * 180 / math.pi
g_forces = angles * (distances1 + distances2)
features.extend(np.percentile(angles, [25, 50, 75, 90, 95, 99]))
acc_for_acc = (np.hstack((acc, [0])) - np.hstack(([0], acc)))[1:-1]
hists = [
np.histogram(distances, bins=range(0, 50, 4))[0] / (len(distances) + 1.0),
np.histogram(distances[distances < 20], bins=range(0, 20, 2))[0],
np.histogram(acc, bins=[-4 + i * 0.7 for i in range(10)])[0] / (len(acc) + 1.0),
np.histogram(g_forces, bins=range(0, 600, 10))[0],
np.histogram(acc * distances2, bins=range(-500, 500, 20))[0],
np.histogram(acc_for_acc, bins=[-2.1 + i * 0.3 for i in range(15)])[0] / (len(acc_for_acc) + 1.0),
]
features.extend(list(itertools.chain(*hists)))
return np.array(features)
def build_features_big_v2(ride_orig):
ride_orig = np.array(ride_orig)
ride = savgol_filter(ride_orig.T, 11, 2).T
distances_orig = np.linalg.norm(
(np.vstack((ride_orig, [0,0])) - np.vstack(([0,0], ride_orig)))[1:-1],
axis=1
)
acc_orig = (np.hstack((distances_orig, [0])) - np.hstack(([0], distances_orig)))[1:-1]
distances = np.linalg.norm(
(np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1],
axis=1
)
acc = (np.hstack((distances, [0])) - np.hstack(([0], distances)))[1:-1]
ride_length = distances.sum()
ride_speed = ride_length / len(ride)
distances_no_stops = distances[distances > 1.5]
stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
ride_length_no_stops = distances_no_stops.sum()
ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
features = [
ride_length,
ride_speed,
ride_length_no_stops,
stops_ratio,
euclidian_distance(ride[0], ride[-1]),
]
move_vectors = (np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1]
m1 = move_vectors[1:]
m2 = move_vectors[:-1]
distances1 = np.linalg.norm(m1, axis=1)
distances2 = np.linalg.norm(m2, axis=1)
dot_product = (m1 * m2).sum(1)
denominator = np.maximum(distances1 * distances2, 0.01)
angles = np.arccos(np.maximum(np.minimum(dot_product / denominator, 1.0), -1.0))
angles = angles * 180 / math.pi
g_forces = angles * (distances1 + distances2)
features.extend(np.percentile(angles, [1, 5, 25, 50, 75, 90, 95, 99]))
acc_for_acc = (np.hstack((acc, [0])) - np.hstack(([0], acc)))[1:-1]
acc_for_acc_orig = (np.hstack((acc_orig, [0])) - np.hstack(([0], acc_orig)))[1:-1]
acc5 = np.pad(acc, (0,4), 'constant') + \
np.pad(acc, (1,3), 'constant') + \
np.pad(acc, (2,2), 'constant') + \
np.pad(acc, (3,1), 'constant') + \
np.pad(acc, (4,0), 'constant')
acc5 = acc5[5:-5]
hists = [
np.histogram(distances, bins=range(0, 50, 3))[0] / (len(distances) + 1.0),
np.histogram(distances[distances < 20], bins=range(0, 20, 1))[0],
np.histogram(acc_orig, bins=[-4 + i * 0.7 for i in range(10)])[0],
np.histogram(acc, bins=[-4 + i * 0.7 for i in range(10)])[0],
np.percentile(acc_orig, [1, 5, 10, 25, 50, 75, 90, 95, 99]),
np.percentile(acc, [1, 5, 10, 25, 50, 75, 90, 95, 99]),
np.histogram(g_forces, bins=range(0, 600, 10))[0],
np.histogram(acc * distances2, bins=range(-500, 500, 20))[0],
np.histogram(acc_orig * distances2, bins=range(-500, 500, 20))[0],
np.histogram(acc_for_acc, bins=[-2.1 + i * 0.3 for i in range(15)])[0],
np.percentile(acc_for_acc, [1, 5, 10, 25, 50, 75, 90, 95, 99]),
np.percentile(acc_for_acc_orig, [1, 5, 10, 25, 50, 75, 90, 95, 99]),
np.percentile(acc5, [1, 5, 10, 25, 50, 75, 90, 95, 99]),
np.histogram(acc_for_acc, bins=[-1.2 + i * 0.2 for i in range(12)])[0],
np.histogram(acc_for_acc_orig, bins=[-1.2 + i * 0.2 for i in range(12)])[0],
]
features.extend(list(itertools.chain(*hists)))
for step in [10, 30, 50]:
distances = np.linalg.norm((np.roll(ride, step) - ride)[step:], axis=1)
features.extend(np.percentile(distances, [1, 5, 20, 50, 80, 95, 99]))
dist_slice = distances[distances < 10 * step]
if not len(dist_slice):
dist_slice = [0]
features.extend(np.percentile(dist_slice, [1, 5, 20, 50, 80, 95, 99]))
return np.array(features)
def get_angle(p1, p2, p3):
dot_product = (p1[0] - p2[0]) * (p3[0] - p2[0]) + (p1[1] - p2[1]) * (p3[1] - p2[1])
denominator = max(euclidian_distance(p1, p2) * euclidian_distance(p2, p3), 0.1)
# just in case dot_product is infinitesimaly larger than denominator
ratio = dot_product / denominator
if ratio > 1:
ratio = 1
if ratio < -1:
ratio = -1
angle = math.acos(ratio)
return angle * 180 / math.pi
def bucket(values, bins, cutoff):
bucketed = []
diff = cutoff[1] - cutoff[0]
for value in values:
if value < cutoff[0]:
bucketed.append(0)
continue
if value >= cutoff[1]:
bucketed.append(bins - 1)
continue
ratio = (value - cutoff[0]) / diff
bin = int(ratio * bins)
bucketed.append(bin)
return bucketed
def get_accelerations(ride):
distances = euclidian_distances(ride)
accelerations = [distances[i] - distances[i-1] for i in xrange(1, len(distances))]
bucketed = bucket(accelerations, 10, [-2,2])
words = ['a%s_%s' % (bucketed[i-1], bucketed[i]) for i in xrange(1, len(bucketed))]
return words
def get_accelerations_v2(ride):
distances = euclidian_distances(ride)
accelerations = [distances[i] - distances[i-1] for i in xrange(1, len(distances))]
bucketed = np.digitize(accelerations, np.array(range(-30, 30, 3)) / 10.0)
words = ['a%s_%s' % (bucketed[i-1], bucketed[i]) for i in xrange(1, len(bucketed))]
return words
def _get_cache_file(model, get_data, driver_id, test, repeat):
cache_folder = settings.CACHE[repeat]
filename = '%s/%s_%s_%s/%s.npy' % (
cache_folder,
'TEST' if test else 'TRAIN',
get_data.func_name,
model.__name__,
driver_id
)
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
return filename
def get_results(model, get_data, driver_id, test, repeat):
filename = _get_cache_file(model, get_data, driver_id, test, repeat)
if not os.path.isfile(filename):
return False
return np.load(filename)
def cache_results(model, get_data, driver_id, test, data, repeat):
filename = _get_cache_file(model, get_data, driver_id, test, repeat)
np.save(filename, data)
def build_features3(ride, step=5, version=1):
if version == 3:
ride = savgol_filter(np.array(ride).T, 7, 3).T
ride2 = np.array(ride)
ride1 = np.roll(ride2, step, axis=0)
ride0 = np.roll(ride1, step, axis=0)
ride0 = ride0[step*2:]
ride1 = ride1[step*2:]
ride2 = ride2[step*2:]
a1 = ride1 - ride0
a2 = ride2 - ride1
distances1 = np.linalg.norm(a1, axis=1)
distances2 = np.linalg.norm(a2, axis=1)
distances = distances1 + distances2
np.seterr(all='ignore')
angles = np.arccos((a1 * a2).sum(1) / (distances1 * distances2))
np.seterr(all='print')
if version == 1:
angles[distances1 < 7] = 0
angles[distances2 < 7] = 0
else:
angles[distances1 < 0.5] = 0
angles[distances2 < 0.5] = 0
angles = angles * 180 / math.pi
if version == 1:
DIST_THR = | np.array([1, 11, 16, 26, 36, 56, 80]) | numpy.array |
# -*- coding: utf-8 -*-
# CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL).
# Copyright 2017 UKRI-STFC
# Copyright 2017 University of Manchester
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import numpy as np
from ccpi.framework import DataContainer
from ccpi.framework import ImageData
from ccpi.framework import AcquisitionData, VectorData
from ccpi.framework import ImageGeometry,VectorGeometry
from ccpi.framework import AcquisitionGeometry
from ccpi.optimisation.algorithms import FISTA
from ccpi.optimisation.functions import LeastSquares
from ccpi.optimisation.functions import ZeroFunction
from ccpi.optimisation.functions import L1Norm
from ccpi.optimisation.operators import LinearOperatorMatrix
from ccpi.optimisation.operators import Identity
from ccpi.optimisation.operators import LinearOperator
import numpy.testing
try:
from cvxpy import *
cvx_not_installable = False
except ImportError:
cvx_not_installable = True
def aid(x):
# This function returns the memory
# block address of an array.
return x.__array_interface__['data'][0]
def dt(steps):
return steps[-1] - steps[-2]
class TestAlgorithms(unittest.TestCase):
def assertNumpyArrayEqual(self, first, second):
res = True
try:
numpy.testing.assert_array_equal(first, second)
except AssertionError as err:
res = False
print(err)
self.assertTrue(res)
def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
res = True
try:
numpy.testing.assert_array_almost_equal(first, second, decimal)
except AssertionError as err:
res = False
print(err)
self.assertTrue(res)
def test_FISTA_cvx(self):
if False:
if not cvx_not_installable:
try:
# Problem data.
m = 30
n = 20
np.random.seed(1)
Amat = np.random.randn(m, n)
A = LinearOperatorMatrix(Amat)
bmat = np.random.randn(m)
bmat.shape = (bmat.shape[0], 1)
# A = Identity()
# Change n to equal to m.
#b = DataContainer(bmat)
vg = VectorGeometry(m)
b = vg.allocate('random')
# Regularization parameter
lam = 10
opt = {'memopt': True}
# Create object instances with the test data A and b.
f = LeastSquares(A, b, c=0.5)
g0 = ZeroFunction()
# Initial guess
#x_init = DataContainer(np.zeros((n, 1)))
x_init = vg.allocate()
f.gradient(x_init, out = x_init)
# Run FISTA for least squares plus zero function.
#x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt)
fa = FISTA(x_init=x_init, f=f, g=g0)
fa.max_iteration = 10
fa.run(10)
# Print solution and final objective/criterion value for comparison
print("FISTA least squares plus zero function solution and objective value:")
print(fa.get_output())
print(fa.get_last_objective())
# Compare to CVXPY
# Construct the problem.
x0 = Variable(n)
objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]))
prob0 = Problem(objective0)
# The optimal objective is returned by prob.solve().
result0 = prob0.solve(verbose=False, solver=SCS, eps=1e-9)
# The optimal solution for x is stored in x.value and optimal objective value
# is in result as well as in objective.value
print("CVXPY least squares plus zero function solution and objective value:")
print(x0.value)
print(objective0.value)
self.assertNumpyArrayAlmostEqual(
numpy.squeeze(x_fista0.array), x0.value, 6)
except SolverError as se:
print (str(se))
self.assertTrue(True)
else:
self.assertTrue(cvx_not_installable)
def stest_FISTA_Norm1_cvx(self):
if not cvx_not_installable:
try:
opt = {'memopt': True}
# Problem data.
m = 30
n = 20
np.random.seed(1)
Amat = np.random.randn(m, n)
A = LinearOperatorMatrix(Amat)
bmat = np.random.randn(m)
#bmat.shape = (bmat.shape[0], 1)
# A = Identity()
# Change n to equal to m.
vgb = VectorGeometry(m)
vgx = VectorGeometry(n)
b = vgb.allocate()
b.fill(bmat)
#b = DataContainer(bmat)
# Regularization parameter
lam = 10
opt = {'memopt': True}
# Create object instances with the test data A and b.
f = LeastSquares(A, b, c=0.5)
g0 = ZeroFunction()
# Initial guess
#x_init = DataContainer(np.zeros((n, 1)))
x_init = vgx.allocate()
# Create 1-norm object instance
g1 = lam * L1Norm()
g1(x_init)
g1.prox(x_init, 0.02)
# Combine with least squares and solve using generic FISTA implementation
#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt)
fa = FISTA(x_init=x_init, f=f, g=g1)
fa.max_iteration = 10
fa.run(10)
# Print for comparison
print("FISTA least squares plus 1-norm solution and objective value:")
print(fa.get_output())
print(fa.get_last_objective())
# Compare to CVXPY
# Construct the problem.
x1 = Variable(n)
objective1 = Minimize(
0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1))
prob1 = Problem(objective1)
# The optimal objective is returned by prob.solve().
result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9)
# The optimal solution for x is stored in x.value and optimal objective value
# is in result as well as in objective.value
print("CVXPY least squares plus 1-norm solution and objective value:")
print(x1.value)
print(objective1.value)
self.assertNumpyArrayAlmostEqual(
numpy.squeeze(x_fista1.array), x1.value, 6)
except SolverError as se:
print (str(se))
self.assertTrue(True)
else:
self.assertTrue(cvx_not_installable)
def skip_test_FBPD_Norm1_cvx(self):
print ("test_FBPD_Norm1_cvx")
if not cvx_not_installable:
opt = {'memopt': True}
# Problem data.
m = 30
n = 20
np.random.seed(1)
Amat = | np.random.randn(m, n) | numpy.random.randn |
import numpy as np
from scipy.integrate import trapz
from eqsig import single
import liquepy as lq
def calculate_factor_safety(q_c1ncs, p_a, magnitude, pga, depth, soil_profile):
"""
Calculate the liquefaction factor of safety at a given depth.
:param q_c1ncs: float, normalised cone tip resistance corrected to equivalent clean sand
:param p_a: float, atmospheric pressure
:param magnitude: float, earthquake magnitude
:param pga: float, peak ground acceleration
:param depth: float, depth from surface
:param soil_profile: SoilProfile, A soil profile object
:return:
"""
crr_m7p5 = np.exp(q_c1ncs / 113 + (q_c1ncs / 1000) ** 2 - (q_c1ncs / 140) ** 3 + (q_c1ncs / 137) ** 4 - 2.8)
c_sigma = 1.0 / (37.3 - (8.27 * (q_c1ncs ** 0.264)))
sigma_v = soil_profile.vertical_total_stress(depth)
sigma_veff = soil_profile.vertical_effective_stress(depth)
k_sigma = np.clip(1.0 - c_sigma * np.log(sigma_veff / p_a), -1000, 1.1)
msf_max = 1.09 + (q_c1ncs / 180) ** 3
msf = 1 + ((msf_max - 1) * ((8.64 * np.exp(-magnitude / 4)) - 1.325))
alpha = -1.012 - 1.126 * np.sin(depth / 11.73 + 5.133)
beta = 0.106 + 0.118 * np.sin(depth / 11.28 + 5.142)
r_d = np.exp(alpha + (beta * magnitude))
csr = 0.65 * pga * sigma_v / sigma_veff * r_d
fs_liq = crr_m7p5 * k_sigma * msf / csr
return fs_liq
def calc_degraded_phi(phi, sigma_v_eff, q, a=0.9, ru_ff=1.):
"""
Equivalent degraded friction angle of liquefied soil under a foundation
Ref: Cascone and Bouckovalas (1998)
:param phi: float, friction angle
:param sigma_v_eff: float, vertical effective stress
:param q: float, bearing pressure of foundation
:param a: float, adjustment parameter
:param ru_ff: float, pore pressure ratio in the free-field
:return:
"""
u_foot = a / (1 + (q / sigma_v_eff))
big_u = (u_foot + ru_ff) / 2 # for strip foundations
degraded_phi = np.degrees(np.arctan((1 - big_u) * np.tan(np.deg2rad(phi))))
return degraded_phi
def cal_z_c(fd, z_liq, h0):
"""
Calculation of characteristic depth from Karamitros et al. (2013)
:param fd:
:param z_liq:
:param h0:
:return:
"""
if fd.width > z_liq:
z_c = h0 + z_liq
else:
z_c = h0 + fd.b
return z_c
def karamitros_settlement(fd, z_liq, q, q_ult, acc, dt):
"""
Calculate the settlement using the method proposed by Karamitros et al. 2013 - sett
:param sss:
:return:
"""
sett_dyn_ts = karamitros_settlement_time_series(fd, z_liq, q, q_ult, acc, dt)
return sett_dyn_ts[-1]
def karamitros_settlement_time_series(fd, z_liq, q, q_ult, acc, dt): # units: m, Pa, s
"""
Calculate the settlement using the method proposed by Karamitros et al. 2013 - sett
:param sss:
:return:
"""
c_dash = 0.003 # Karamitros 2013 sett
c_factor = min(c_dash * (1.0 + 1.65 * fd.length / fd.width), 11.65 * c_dash) # Karamitros 2013 sett
int_vel = integral_of_velocity(acc, dt)
amax_t2_n = (np.pi ** 2) * int_vel
fs_deg = (q_ult) / q
sett_dyn_ts = c_factor * amax_t2_n * (z_liq / fd.width) ** 1.5 * (1.0 / fs_deg) ** 3
return sett_dyn_ts
def integral_of_velocity(acc, dt):
delta_vel = acc * dt
vel = np.cumsum(delta_vel)
abs_vel = abs(vel)
vel_int = np.cumsum(abs_vel * dt)
return vel_int
def integral_of_acceleration(acc, dt):
abs_acc = abs(acc)
acc_int = np.cumsum(abs_acc * dt)
return acc_int
def calculate_cav_dp(acc, dt):
start = 0
pga_max = 0
CAVdp = 0
num_points = (int(1 / dt))
total_time = int(dt * (len(acc) - 1))
for i in range(0, total_time):
end = start + num_points
interval_total_time = (start * dt) + 1
interval_time = np.arange(start * dt, interval_total_time, dt)
acc_interval = []
for j in range(start, end + 1):
acc_interval.append(acc[j])
acc_interval = np.array(acc_interval)
abs_acc_interval = abs(acc_interval)
x_lower = start * dt # the lower limit of x
x_upper = end * dt # the upper limit of x
x_int = interval_time[np.where((x_lower <= interval_time) * (interval_time <= x_upper))]
y_int = np.abs(np.array(abs_acc_interval)[np.where((x_lower <= interval_time) * (interval_time <= x_upper))])
int_acc = trapz(y_int, x_int)
# print (x_lower, x_upper)
# calculation of pga (g)
pga = (max(abs_acc_interval))
if pga > pga_max:
pga_max = pga
if (pga - 0.025) < 0:
H=0
if (pga - 0.025) >= 0:
H=1
#H = 1 # what is x??
#CAVdp = CAVdp + (H * (pga - 0.025) * int_acc)
CAVdp = CAVdp + (H * int_acc)
start = end
return CAVdp
def calculate_cav_dp_time_series(acc, dt):
start = 0
pga_max = 0
CAVdp = 0
num_points = (int(1 / dt))
total_time = int(dt * (len(acc) - 1))
CAVdp_time_series = []
for i in range(0, total_time):
end = start + num_points
interval_total_time = (start * dt) + 1
interval_time = np.arange(start * dt, interval_total_time, dt)
acc_interval = []
for j in range(start, end + 1):
acc_interval.append(acc[j])
acc_interval = np.array(acc_interval)
abs_acc_interval = abs(acc_interval)
x_lower = start * dt # the lower limit of x
x_upper = end * dt # the upper limit of x
x_int = interval_time[np.where((x_lower <= interval_time) * (interval_time <= x_upper))]
y_int = np.abs(np.array(abs_acc_interval)[np.where((x_lower <= interval_time) * (interval_time <= x_upper))])
int_acc = trapz(y_int, x_int)
# print (x_lower, x_upper)
# calculation of pga (g)
pga = (max(abs_acc_interval))
if pga > pga_max:
pga_max = pga
if (pga - 0.025) < 0:
H=0
if (pga - 0.025) >= 0:
H=1
CAVdp = CAVdp + (H * int_acc)
CAVdp_time_series.append(CAVdp)
start = end
return CAVdp_time_series
def bray_and_macedo_settlement(acc, dt, z_liq, q, fd, soil_profile):
"""
Calculates foundation settlement using Bray and Macedo (2017)
:param acc: array, acceleration time series
:param dt: float, time step of acceleration time series
:param z_liq:
:param q: float, foundation bearing pressure
:param fd: Foundation, foundation object
:param soil_profile: SoilProfile, soil profile object
:return:
"""
sett_dyn_ts = bray_and_macedo_settlement_time_series(acc, dt, z_liq, q, fd, soil_profile)
return sett_dyn_ts[-1]
def bray_and_macedo_settlement_time_series(acc, dt, z_liq, q, fd, soil_profile):
"""
Calculates foundation settlement using Bray and Macedo (2017)
:param acc: array, acceleration time series
:param dt: float, time step of acceleration time series
:param z_liq:
:param q: float, foundation bearing pressure
:param fd: Foundation, foundation object
:param soil_profile: SoilProfile, soil profile object
"""
# calculation of CAVdp
cavdp_time_series = calculate_cav_dp_time_series(acc, dt)
pga_max = max(abs(acc))
# calculation of LBS
# calculation of Maximum Cyclic Shear Strains
z = np.arange((soil_profile.layer_depth(1)) + 1, (soil_profile.layer_depth(2) + 1), 1)
xmax = len(z) - 1
lbs = []
for item in z:
fs = calculate_factor_safety(q_c1ncs=106, p_a=101, magnitude=6.6, pga=pga_max, depth=item, soil_profile=soil_profile)
d_r = soil_profile.layer(2).relative_density
e_shear = lq.calculate_shear_strain(fs=fs, d_r=d_r)
w = 1
lbs1 = w * e_shear/item
lbs.append(lbs1)
x_lower = z[0] # the lower limit of x
x_upper = z[xmax] # the upper limit of x
x_int = z[np.where((x_lower <= z) * (z <= x_upper))]
y_int = np.abs(np.array(lbs)[np.where((x_lower <= z) * (z <= x_upper))])
int_lbs = trapz(y_int, x_int) # lbs value
# calculation of c_1 and c_2
if int_lbs <= 16:
c_1 = -8.35
c_2 = 0.072
else:
c_1 = -7.48
c_2 = 0.014
acc_signal = single.AccSignal(acc, dt)
acc_signal.generate_response_spectrum(response_times=np.array([1.]), xi=0.01)
sa1 = acc_signal.s_a
sett_dyn_ts = np.exp(c_1 + (4.59 * np.log(q)) - (0.42 * ((np.log(q)) ** 2)) + (c_2 * int_lbs) + (0.58 * np.log(np.tanh(z_liq / 6))) - (0.02 * fd.width) + (0.84 * np.log(cavdp_time_series)) + (0.41 * np.log(sa1)))
return sett_dyn_ts # TODO: Should return metres not millimetres
def lu_settlements(q, fd, Dr, acc):
# TODO: q should be in Pa not kPa
# TODO: DR should be a ratio
Dr_1b=[30.057, 32.004, 34.065, 35.954, 37.958, 39.962, 41.966, 43.969, 45.973, 47.920, 49.866, 51.985, 53.817, 55.935, 57.939, 59.943]
N_lr_1b = [204.739, 208.531, 212.322, 216.114, 219.905, 231.280, 242.654, 257.820, 276.777, 299.526, 329.858, 382.938, 428.436, 496.682, 561.137, 636.967]
Dr_2a = [30.000, 32.004, 33.950, 35.954, 38.187, 40.019, 41.966, 43.969, 46.031, 48.034, 49.981, 51.927, 53.989, 55.992, 57.882, 59.943]
N_lr_2a = [37.915, 37.915, 45.498, 53.081, 56.872, 60.664, 75.829, 79.621, 90.995, 98.578, 98.578, 109.953, 117.536, 128.910, 140.284, 155.450]
Dr_2b = [30.115, 31.947, 34.008, 36.298, 37.901, 40.076, 42.137, 43.511, 45.744, 47.748, 49.122, 51.126, 53.130, 55.649, 57.424, 59.943]
N_lr_2b = [299.526, 299.526, 318.483, 329.858, 337.441, 367.773, 401.896, 424.645, 492.891, 553.555, 587.678, 659.716, 724.171, 800.000, 845.498, 936.493]
Dr_3a = [30.115, 31.947, 34.179, 35.954, 38.073, 40.019, 42.023, 43.969, 46.088, 48.092, 50.095, 51.985, 53.989, 55.763, 57.767, 59.828]
N_lr_3a = [60.664, 60.664, 68.246, 75.829, 83.412, 90.995, 98.578, 106.161, 121.327, 128.910, 151.659, 170.616, 189.573, 216.114, 250.237, 288.152]
Dr_3b = [30.057, 31.897, 33.678, 35.230, 37.356, 39.483, 41.034, 42.414, 43.563, 46.264, 49.080, 50.862, 52.644, 54.713, 57.011, 60.000]
N_lr_3b = [778.202, 793.461, 820.164, 831.608, 865.940, 900.272, 926.975, 946.049, 972.752, 1022.343, 1079.564, 1121.526, 1163.488, 1205.450, 1251.226, 1319.891]
x_int = Dr
abs_acc = abs(acc)
pga = (max(abs_acc))
x_pga = [0.1, 0.4]
if 10 <= q < 30:
N_lr_1_b = | np.interp(x_int, Dr_1b, N_lr_1b) | numpy.interp |
import numpy as np
# 2013-10-31 Added MultiRate class, simplified fitting methods, removed full_output parameter
# 2014-12-18 Add loading of Frequency, Integration time and Iterations, calculate lower
# bound on errors from Poisson distribution
# 2015-01-28 Simplified fitting again. Needs more work
# 2015-03-02 Added functions for number density calculation
_verbosity = 2
def set_verbosity(level):
"""
0: serious/unrecoverable error
1: recoverable error
2: warning
3: information
"""
global _verbosity
_verbosity = level
def warn(message, level):
if level <= _verbosity:
print(message)
def fitter(p0, errfunc, args):
from lmfit import minimize
result = minimize(errfunc, p0, args=args, nan_policy="omit")
if not result.success:
msg = " Optimal parameters not found: " + result.message
raise RuntimeError(msg)
for i, name in enumerate(result.var_names):
if result.params[name].value == result.init_vals[i]:
warn("Warning: fitter: parameter \"%s\" was not changed, it is probably redundant"%name, 2)
from scipy.stats import chi2
chi = chi2.cdf(result.chisqr, result.nfree)
if chi > 0.5: pval = -(1-chi)*2
else: pval = chi*2
pval = 1-chi
return result.params, pval, result
def dict2Params(dic):
from lmfit import Parameters
if isinstance(dic, Parameters): return dic.copy()
p = Parameters()
for key, val in dic.items():
p.add(key, value=val)
return p
P = dict2Params
class Rate:
def __init__(self, fname, full_data=False, skip_iter=[]):
import re
import datetime as dt
fr = open(fname)
state = -1
npoints = 0
nions = 0
pointno = 0
iterno = 0
ioniter = []
ionname = []
frequency = 0
integration = 0
poisson_error = True
# -1 header
# 0 init
# 1 read time
# 2 read data
for lineno, line in enumerate(fr):
# read header
if state == -1:
if lineno == 2:
T1 = line[:22].split()
T2 = line[22:].split()
self.starttime = dt.datetime.strptime(" ".join(T1), "%Y-%m-%d %H:%M:%S.%f")
self.stoptime = dt.datetime.strptime(" ".join(T2), "%Y-%m-%d %H:%M:%S.%f")
if lineno == 3:
state = 0
toks = line.split()
if len(toks) == 0:
continue
if state == 0:
if re.search("Period \(s\)=", line):
frequency = 1/float(re.search("Period \(s\)=([0-9.]+)", line).group(1))
if re.search("Frequency=", line):
frequency = float(re.search("Frequency=([0-9.]+)", line).group(1))
if re.search("Integration time \(s\)", line):
integration = float(re.search("Integration time \(s\)=([0-9.]+)", line).group(1))
if re.search("Number of Points=", line):
npoints = int(re.search("Number of Points=(\d+)", line).group(1))
if re.search("Number of Iterations=", line):
self.niter = int(re.search("Number of Iterations=(\d+)", line).group(1))
if toks[0] == "[Ion":
nions += 1
if re.search("^Iterations=", line) :
ioniter.append(int(re.search("Iterations=(\d+)", line).group(1)))
if re.search("^Name=", line) :
ionname.append(re.search("Name=(.+)$", line).group(1).strip('\"'))
if toks[0] == "Time":
if len(toks)-2 != nions:
print("Corrupt file", fname, "Wrong number of ions in the header. Trying to recover")
# Assume that the Time header is correct:
nions = len(toks)-2
ioniter = ioniter[:nions]
if len(ioniter) < nions:
warn("Corrupt file " + str(fname) + ": Iterations for all species not recorded, guessing...", 1)
while len(ioniter) < nions:
ioniter.append(ioniter[-1])
if len(ionname) < nions:
warn("Corrupt file " + str(fname) + ": Names for all species not recorded, making something up...", 2)
ionname += toks[len(ionname)+2:]
state = 1
time = []
data = np.zeros((nions, npoints, self.niter))
continue
if state == 1:
try:
newtime = float(toks[0])
except ValueError:
if pointno != npoints:
warn("Corrupt file " + fname + " trying to guess number of points", 2)
npoints = pointno
data.resize((nions, npoints, self.niter))
time = np.array(time)
state = 2
else:
time.append(newtime)
pointno += 1
if state == 2:
if toks[0] == "Iteration":
iterno = int(toks[1])-1
if iterno+1 > self.niter:
warn("Corrupt file " + fname + " trying to guess number of iterations", 2)
#msg = "Corrupt file: " + fname
#raise IOError(msg)
self.niter = iterno+1
data.resize((nions, npoints, self.niter))
pointno = 0
continue
try:
data[:, pointno, iterno] = [float(x) for x in toks][1:-1]
except ValueError:
warn("Error in file " + fname + " number of ions probably wrong")
pointno += 1
ioniter = np.array(ioniter)
# in case of multiple measurements per iteration
if iterno+1 != self.niter:
if self.niter % (iterno+1) != 0:
msg = "Corrupt file: " + fname
print(("Corrupt file " + fname + " trying to guess number of iterations:" + str(iterno+1)))
if iterno+1 < self.niter:
data = data[:,:,:iterno+1]
else:
newdata = np.zeros((nions, npoints, iterno+1))
newdata[:,:,:self.niter] = data
print(data, newdata)
data = newdata
#data.resize((nions, npoints, iterno+1))
self.niter = iterno+1
data = data[:,:,:iterno+1]
#print skip_iter, np.shape(skip_iter)
if len(skip_iter)!=0:
skip_iter = np.array(skip_iter)
indices = np.ones(self.niter, dtype=bool)
indices[skip_iter] = False
data = data[:,:,indices]
# XXX frequency is sometimes wrong in the files
# use some heuristics to estimate the frequency
# repetition time is usually set in multiples of 0.1s
measurement_time = np.ceil(time[-1]/0.1)*0.1
if frequency*measurement_time > 1.1 or frequency*measurement_time < 0.4:
warn("Recorded frequency in " + fname + " is probably wrong. Using estimate %f" % (1/measurement_time), 1)
frequency = 1/measurement_time
# this is later used to estimate Poisson error
self.total_iterations = ioniter[:,None]*integration*frequency*self.niter
self.nions = nions
self.ionname = ionname
self.time = time
self.data = data
self.fname = fname
self.average()
if not full_data:
self.data = None
self.mask = None
def average(self):
data_mean = np.mean(self.data, axis=2)
data_std = np.std(self.data, axis=2)/np.sqrt(self.niter)
#print(np.shape(self.data), np.shape(data_mean), np.shape(self.total_iterations))
data_counts = data_mean*self.total_iterations
# divide by sqrt(total_iterations) twice - once to get Poisson
# variance of measured data and once to the error of estimated mean
# this should be verified, but it is in agreement with errors obtained
# by treating data as normal variables for large numbers
data_poiss_err = np.sqrt(np.maximum(data_counts, 3))/self.total_iterations
# we assume that if 0 counts are observed, 3 counts is within confidence interval
# we use std error if it is larger than poisson error to capture other sources
# of error e.g. fluctuations
data_std = np.maximum(data_std, data_poiss_err)
self.data_mean = data_mean
self.data_std = data_std
def merge(self, rate2):
self.data_mean = np.concatenate((self.data_mean, rate2.data_mean), axis=1)
self.data_std = np.concatenate((self.data_std, rate2.data_std), axis=1)
self.time = np.concatenate((self.time, rate2.time), axis=0)
#print " ** merging ** "
#print self.data_mean, self.data_std, self.time
def poisson_test1(self):
shape = np.shape(self.data_mean)
#check only H- XXX
shape = (1, shape[1])
pval = np.zeros(shape)
for specno in range(shape[0]):
for pointno in range(shape[1]):
if self.mask != None:
dataline = self.data[specno, pointno, self.mask[specno, pointno, :]]
else:
dataline = self.data[specno, pointno, :]
mean = np.mean(dataline)
Q = np.sum((dataline-mean)**2)/mean
niter = len(dataline[~np.isnan(dataline)])
dof = niter-1
from scipy.stats import chi2
chi = chi2.cdf(Q, dof)
if chi > 0.5: pval[specno, pointno] = (1-chi)*2
else: pval[specno, pointno] = chi*2
print((chi, Q, pval[specno, pointno]))
return np.min(pval)
def cut3sigma(self, nsigma=3):
shape = np.shape(self.data)
self.mask = np.zeros(shape, dtype=bool)
for specno in range(shape[0]):
for pointno in range(shape[1]):
stddev = self.data_std[specno, pointno]*np.sqrt(self.niter)
low = self.data_mean[specno, pointno] - nsigma*stddev
high = self.data_mean[specno, pointno] + nsigma*stddev
dataline = self.data[specno, pointno, :]
mask = (dataline > low) & (dataline < high)
#self.data[specno, pointno, ~mask] = float("nan")
self.mask[specno, pointno, :] = mask
self.data_mean[specno, pointno] = np.mean(dataline[mask])
self.data_std[specno, pointno] = np.std(dataline[mask])/np.sqrt(self.niter)
#data_mean = np.mean(self.data[self.mask], axis=2)
#data_std = np.std(self.data, axis=2)/np.sqrt(self.niter)
#print self.data_mean, self.data_std
#self.data[self.data<120] = 130
def fit_ode_mpmath(self, p0=[60.0, .1], columns=[0]):
from mpmath import odefun
def fitfunc(p, x):
eqn = lambda x, y: -p[1]*y
y0 = p[0]
f = odefun(eqn, 0, y0)
g = np.vectorize(lambda x: float(f(x)))
return g(x)
return self._fit(fitfunc, p0, columns)
def fit_ode_scipy(self, p0=[60.0, .1], columns=[0]):
from scipy.integrate import odeint
def fitfunc(p, x):
eqn = lambda y, x: -p[1]*y
y0 = p[0]
t = np.r_[0., x]
y = odeint(eqn, y0, t)
return y[1:,0]
return self._fit(fitfunc, p0, columns)
def fit_inc(self, p0=[1.0, .01, 0.99], columns=[1]):
#fitfuncinc = lambda p, x: p[0]*(1-np.exp(-x/p[1]))+p[2]
fitfunc = lambda p, x: -abs(p[0])*np.exp(-x/abs(p[1]))+abs(p[2])
return self._fit(fitfunc, p0, columns)
def fit_equilib(self, p0=[70.0, .1, 1], columns=[0]):
fitfunc = lambda p, x: abs(p[0])*np.exp(-x/abs(p[1]))+abs(p[2])
return self._fit(fitfunc, p0, columns)
class MultiRate:
def __init__(self, fnames, directory=""):
if isinstance(fnames, str): fnames = [fnames]
self.rates = [Rate(directory+fname, full_data=True) for fname in fnames]
# if True, a normalization factor for each rate with respect to rates[0] is a free fitting param
self.normalized = True
self.norms = [1]*len(self.rates)
self.fitfunc = None
self.fitparam = None
self.fitresult = None
self.fitcolumns = None
self.fitmask = slice(None)
self.fnames = fnames
self.sigma_min = 0.01 # lower bound on the measurement accuracy
def plot_to_file(self, fname, comment=None, figsize=(6,8.5), logx=False, *args, **kwargs):
import matplotlib.pyplot as plt
from lmfit import fit_report
f = plt.figure(figsize=figsize)
ax = f.add_axes([.15, .5, .8, .45])
self.plot(ax=ax, show=False, *args, **kwargs)
ax.set_yscale("log")
if logx: ax.set_xscale("log")
ax.legend(loc="lower right", fontsize=5)
ax.set_title(comment, size=8)
if self.fitresult is not None:
f.text(0.1, 0.44, "p-value = %.2g\n"%self.fitpval
+ fit_report(self.fitresult, min_correl=0.5), size=6, va="top", family='monospace')
if ax.get_ylim()[0] < 1e-4: ax.set_ylim(bottom=1e-4)
ax.set_xlabel(r"$t (\rm s)$")
ax.set_ylabel(r"$N_{\rm i}$")
if self.fitresult is not None:
ax2 = f.add_axes([.55, .345, .40, .10])
if logx: ax2.set_xscale("log")
self.plot_residuals(ax=ax2, show=False, weighted=True)
ax2.tick_params(labelsize=7)
ax2.set_title("weighted residuals", size=7)
ax2.set_xlabel(r"$t (\rm s)$", size=7)
ax2.set_ylabel(r"$R/\sigma$", size=7)
f.savefig(fname, dpi=200)
plt.close(f)
def plot(self, ax=None, show=False, plot_fitfunc=True, symbols=["o", "s", "v", "^", "D", "h"], colors=["r", "g", "b", "m", "k", "orange"],\
opensymbols=False, fitfmt="-", fitcolor=None, hide_uncertain=False, plot_columns=None):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
lines = {}
if plot_columns is None: plot_columns = range(self.rates[0].nions)
for i in plot_columns:
if opensymbols:
kwargs = {"markeredgewidth":1, "markerfacecolor":"w", "markeredgecolor": colors[i], "color":colors[i]}
else:
kwargs = {"markeredgewidth":0, "color":colors[i]}
l = None
for j, rate in enumerate(self.rates):
norm = 1/self.norms[j]
I = rate.data_std[i] < rate.data_mean[i] if hide_uncertain else slice(None)
if l==None:
l = ax.errorbar(rate.time[I], rate.data_mean[i][I]*norm, yerr=rate.data_std[i][I]*norm, label=rate.ionname[i],
fmt = symbols[i], **kwargs)
color = l.get_children()[0].get_color()
else:
l = ax.errorbar(rate.time[I], rate.data_mean[i][I]*norm, yerr=rate.data_std[i][I]*norm,
fmt = symbols[i], color=color, markeredgewidth=0)
lines[i] = l
# plot sum
for j, rate in enumerate(self.rates):
# calculate the sum over the plotted data only
S = np.sum(rate.data_mean[plot_columns], axis=0)
label = "sum" if j==0 else None
ax.plot(rate.time, S/self.norms[j], ".", c="0.5", label=label)
if self.fitfunc != None and self.fitparam != None:
mintime = np.min([np.min(r.time[self.fitmask]) for r in self.rates])
maxtime = np.max([np.max(r.time[self.fitmask]) for r in self.rates])
x = np.logspace(np.log10(mintime), np.log10(maxtime), 500)-self.fit_t0
x = x[x>=0.]
fit = self.fitfunc(self.fitparam, x)
for i, column in enumerate(self.fitcolumns):
if column not in plot_columns: continue
if fitcolor == None: c = lines[column].get_children()[0].get_color()
else: c = fitcolor
ax.plot(x+self.fit_t0, fit[i], fitfmt, c=c)
if len(self.fitcolumns) > 1:
ax.plot(x+self.fit_t0, np.sum(fit, axis=0), c="k")
if show == True:
ax.set_yscale("log")
ax.legend()
plt.show()
return ax
def plot_residuals(self, ax=None, show=False, weighted=False, symbols=["o", "s", "v", "^", "D", "h"], colors=["r", "g", "b", "m", "k", "orange"],\
opensymbols=False, plot_columns=None):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if plot_columns is None: plot_columns = range(self.rates[0].nions)
cdict = {col: i for i, col in enumerate(plot_columns)}
lines = {}
for j, rate in enumerate(self.rates):
t = rate.time[self.fitmask]
#print("\n"*3 + "*"*80)
#print(rate.fname)
fit = self.fitfunc(self.fitparam, t-self.fit_t0)
for i, column in enumerate(self.fitcolumns):
"""
print("\n"*2 + "*"*3 + " " + rate.ionname[column])
print(rate.time)
print(t - self.fit_t0)
print(rate.data_mean[column])
print(rate.data_std[column])
print(fit[i])
print((rate.data_mean[column][self.fitmask] - fit[i])/rate.data_std[column][self.fitmask])
"""
if column in plot_columns:
j = cdict[column]
if weighted:
ax.plot(t, (rate.data_mean[column][self.fitmask] - fit[i])/rate.data_std[column][self.fitmask],
symbols[j], color=colors[j], lw=0.5, ms=2)
else:
ax.errorbar(t, rate.data_mean[column][self.fitmask] - fit[i], yerr=rate.data_std[column][self.fitmask],
fmt=symbols[j], color=colors[j], lw=0.5, ms=2)
#ax.set_yscale("symlog", linthresh=10)
if show == True:
ax.set_yscale("log")
ax.legend()
plt.show()
return ax
def save_data(self, filename):
to_save = []
for j, rate in enumerate(self.rates):
norm = 1/self.norms[j]
to_save.append(np.hstack((rate.time[:,np.newaxis], rate.data_mean.T, rate.data_std.T)))
to_save = | np.vstack(to_save) | numpy.vstack |
# CREATED: 4/15/14 9:42 AM by <NAME> <<EMAIL>>
'''
Unit tests for mir_eval.melody
'''
import numpy as np
import json
import nose.tools
import mir_eval
import glob
import warnings
A_TOL = 1e-12
# Path to the fixture files
REF_GLOB = 'data/melody/ref*.txt'
EST_GLOB = 'data/melody/est*.txt'
SCORES_GLOB = 'data/melody/output*.json'
def test_hz2cents():
# Unit test some simple values
hz = np.array([0., 10., 5., 320., 1420.31238974231])
# Expected cent conversion
expected_cent = np.array([0., 0., -1200., 6000., 8580.0773605])
assert np.allclose(mir_eval.melody.hz2cents(hz), expected_cent)
def test_freq_to_voicing():
# Unit test some simple values
hz = np.array([0., 100., -132.])
expected_hz = np.array([0., 100., 132.])
expected_voicing = np.array([0, 1, 0])
# Check voicing conversion
res_hz, res_voicing = mir_eval.melody.freq_to_voicing(hz)
assert np.all(res_hz == expected_hz)
assert np.all(res_voicing == expected_voicing)
# Unit test some simple values where voicing is given
hz = np.array([0., 100., -132., 0, 131.])
voicing = np.array([0.8, 0.0, 1.0, 0.0, 0.5])
expected_hz = np.array([0., 100., 132., 0., 131.])
expected_voicing = np.array([0.0, 0.0, 1.0, 0.0, 0.5])
# Check voicing conversion
res_hz, res_voicing = mir_eval.melody.freq_to_voicing(hz, voicing=voicing)
assert np.all(res_hz == expected_hz)
assert np.all(res_voicing == expected_voicing)
def test_constant_hop_timebase():
hop = .1
end_time = .35
expected_times = np.array([0, .1, .2, .3])
res_times = mir_eval.melody.constant_hop_timebase(hop, end_time)
assert np.allclose(res_times, expected_times)
def test_resample_melody_series():
# Check for a small example including a zero transition
times = np.arange(4)/35.0
cents = np.array([2., 0., -1., 1.])
voicing = np.array([1, 0, 1, 1])
times_new = np.linspace(0, .08, 9)
expected_cents = np.array([2., 2., 2., 0., 0., 0., -.8, -.1, .6])
expected_voicing = np.array([1, 1, 1, 0, 0, 0, 1, 1, 1])
(res_cents,
res_voicing) = mir_eval.melody.resample_melody_series(times, cents,
voicing, times_new)
assert np.allclose(res_cents, expected_cents)
assert np.allclose(res_voicing, expected_voicing)
# Check for a small example including a zero transition - nonbinary voicing
times = np.arange(4)/35.0
cents = np.array([2., 0., -1., 1.])
voicing = np.array([0.8, 0.0, 0.2, 1.0])
times_new = np.linspace(0, .08, 9)
expected_cents = np.array([2., 2., 2., 0., 0., 0., -.8, -.1, .6])
expected_voicing = np.array(
[0.8, 0.52, 0.24, 0.01, 0.08, 0.15, 0.28, 0.56, 0.84]
)
(res_cents,
res_voicing) = mir_eval.melody.resample_melody_series(times, cents,
voicing, times_new)
assert np.allclose(res_cents, expected_cents)
assert np.allclose(res_voicing, expected_voicing)
def test_resample_melody_series_same_times():
# Check the case where the time bases are identical
times = np.array([0.0, 0.1, 0.2, 0.3])
times_new = np.array([0.0, 0.1, 0.2, 0.3])
cents = np.array([2., 0., -1., 1.])
voicing = np.array([0, 0, 1, 1])
expected_cents = np.array([2., 0., -1., 1.])
expected_voicing = np.array([False, False, True, True])
(res_cents,
res_voicing) = mir_eval.melody.resample_melody_series(times, cents,
voicing, times_new)
assert np.allclose(res_cents, expected_cents)
assert np.allclose(res_voicing, expected_voicing)
# Check the case where the time bases are identical - nonbinary voicing
times = np.array([0.0, 0.1, 0.2, 0.3])
times_new = np.array([0.0, 0.1, 0.2, 0.3])
cents = np.array([2., 0., -1., 1.])
voicing = np.array([0.5, 0.8, 0.9, 1.0])
expected_cents = np.array([2., 0., -1., 1.])
expected_voicing = np.array([0.5, 0.8, 0.9, 1.0])
(res_cents,
res_voicing) = mir_eval.melody.resample_melody_series(times, cents,
voicing, times_new)
assert np.allclose(res_cents, expected_cents)
assert np.allclose(res_voicing, expected_voicing)
def test_to_cent_voicing():
# We'll just test a few values from one of the test annotations
ref_file = sorted(glob.glob(REF_GLOB))[0]
ref_time, ref_freq = mir_eval.io.load_time_series(ref_file)
est_file = sorted(glob.glob(EST_GLOB))[0]
est_time, est_freq = mir_eval.io.load_time_series(est_file)
ref_v, ref_c, est_v, est_c = mir_eval.melody.to_cent_voicing(ref_time,
ref_freq,
est_time,
est_freq)
# Expected values
test_range = np.arange(220, 225)
expected_ref_v = np.array([False, False, False, True, True])
expected_ref_c = np.array([0., 0., 0., 6056.8837818916609,
6028.5504583021921])
expected_est_v = np.array([False]*5)
expected_est_c = np.array([5351.3179423647571]*5)
assert np.allclose(ref_v[test_range], expected_ref_v)
assert np.allclose(ref_c[test_range], expected_ref_c)
assert np.allclose(est_v[test_range], expected_est_v)
assert np.allclose(est_c[test_range], expected_est_c)
# Test that a 0 is added to the beginning
for return_item in mir_eval.melody.to_cent_voicing(
np.array([1., 2.]), np.array([440., 442.]), np.array([1., 2.]),
np.array([441., 443.])):
assert len(return_item) == 3
assert return_item[0] == return_item[1]
# Test custom voicings
ref_time, ref_freq = mir_eval.io.load_time_series(ref_file)
_, ref_reward = mir_eval.io.load_time_series("data/melody/reward00.txt")
_, est_voicing = mir_eval.io.load_time_series(
"data/melody/voicingest00.txt"
)
(ref_v, ref_c,
est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
ref_freq,
est_time,
est_freq,
est_voicing=est_voicing,
ref_reward=ref_reward)
# Expected values
test_range = np.arange(220, 225)
expected_ref_v = np.array([0., 0., 0., 1., 0.3])
expected_ref_c = np.array([0., 0., 0., 6056.8837818916609,
6028.5504583021921])
expected_est_v = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
expected_est_c = np.array([5351.3179423647571]*5)
assert np.allclose(ref_v[test_range], expected_ref_v)
assert np.allclose(ref_c[test_range], expected_ref_c)
assert np.allclose(est_v[test_range], expected_est_v)
assert np.allclose(est_c[test_range], expected_est_c)
def test_continuous_voicing_metrics():
ref_time = np.array([0.0, 0.1, 0.2, 0.3])
ref_freq = np.array([440.0, 0.0, 220.0, 220.0])
est_time = np.array([0.0, 0.1, 0.2, 0.3])
est_freq = np.array([440.1, 330.0, 440.0, 330.0])
# test different estimate voicings
all_est_voicing = [
np.array([1.0, 0.0, 1.0, 1.0]), # perfect
np.array([0.0, 1.0, 0.0, 0.0]), # all wrong
np.array([0.5, 0.5, 0.5, 0.5]), # all 0.5
np.array([0.8, 0.2, 0.8, 0.8]), # almost right
np.array([0.2, 0.8, 0.2, 0.2]), # almost wrong
]
all_expected = [
# perfect
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.0,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.5,
},
# all wrong
{
'Voicing Recall': 0.0,
'Voicing False Alarm': 1.0,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.0,
},
# all 0.5
{
'Voicing Recall': 0.5,
'Voicing False Alarm': 0.5,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.25,
},
# almost right
{
'Voicing Recall': 0.8,
'Voicing False Alarm': 0.2,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.4,
},
# almost wrong
{
'Voicing Recall': 0.2,
'Voicing False Alarm': 0.8,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.1,
},
]
for est_voicing, expected_scores in zip(all_est_voicing, all_expected):
actual_scores = mir_eval.melody.evaluate(ref_time, ref_freq, est_time,
est_freq,
est_voicing=est_voicing)
for metric in actual_scores:
assert np.isclose(actual_scores[metric], expected_scores[metric])
# test different rewards
all_rewards = [
np.array([0.5, 0.5, 0.5, 0.5]), # uniform
np.array([0.3, 0.3, 0.3, 0.3]), # uniform - different number
np.array([0.0, 0.0, 0.0, 0.0]), # all zero
np.array([1.0, 0.0, 0.0, 0.0]), # one weight
np.array([1.0, 0.0, 1.0, 0.0]), # two weights
np.array([1.0, 0.0, 0.5, 0.5]), # slightly generous
np.array([0.1, 0.0, 0.1, 0.8]), # big penalty
]
est_voicing = np.array([1.0, 0.0, 1.0, 1.0])
all_expected = [
# uniform
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.0,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.5,
},
# uniform - different number
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.0,
'Raw Pitch Accuracy': 1. / 3.,
'Raw Chroma Accuracy': 2. / 3.,
'Overall Accuracy': 0.5,
},
# all zero
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.75,
'Raw Pitch Accuracy': 0.0,
'Raw Chroma Accuracy': 0.0,
'Overall Accuracy': 0.25,
},
# one weight
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 2. / 3.,
'Raw Pitch Accuracy': 1.0,
'Raw Chroma Accuracy': 1.0,
'Overall Accuracy': 0.5,
},
# two weights
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.5,
'Raw Pitch Accuracy': 0.5,
'Raw Chroma Accuracy': 1.0,
'Overall Accuracy': 0.5,
},
# slightly generous
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.0,
'Raw Pitch Accuracy': 0.5,
'Raw Chroma Accuracy': 0.75,
'Overall Accuracy': 0.625,
},
# big penalty
{
'Voicing Recall': 1.0,
'Voicing False Alarm': 0.0,
'Raw Pitch Accuracy': 0.1,
'Raw Chroma Accuracy': 0.2,
'Overall Accuracy': 0.325,
},
]
for ref_reward, expected_scores in zip(all_rewards, all_expected):
actual_scores = mir_eval.melody.evaluate(ref_time, ref_freq, est_time,
est_freq,
est_voicing=est_voicing,
ref_reward=ref_reward)
for metric in actual_scores:
assert np.isclose(actual_scores[metric], expected_scores[metric])
def __unit_test_voicing_measures(metric):
# We need a special test for voicing_measures because it only takes 2 args
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# First, test for warnings due to empty voicing arrays
score = metric(np.array([]), np.array([]))
assert len(w) == 4
assert np.all([issubclass(wrn.category, UserWarning) for wrn in w])
assert [str(wrn.message)
for wrn in w] == ["Reference voicing array is empty.",
"Estimated voicing array is empty.",
"Reference melody has no voiced frames.",
"Estimated melody has no voiced frames."]
# And that the metric is 0
assert np.allclose(score, 0)
# Also test for a warning when the arrays have non-voiced content
metric(np.ones(10), np.zeros(10))
assert len(w) == 5
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == "Estimated melody has no voiced frames."
# Now test validation function - voicing arrays must be the same size
nose.tools.assert_raises(ValueError, metric, np.ones(10), np.ones(12))
def __unit_test_melody_function(metric):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# First, test for warnings due to empty voicing arrays
score = metric(np.array([]), np.array([]), np.array([]), np.array([]))
assert len(w) == 6
assert np.all([issubclass(wrn.category, UserWarning) for wrn in w])
assert [str(wrn.message)
for wrn in w] == ["Reference voicing array is empty.",
"Estimated voicing array is empty.",
"Reference melody has no voiced frames.",
"Estimated melody has no voiced frames.",
"Reference frequency array is empty.",
"Estimated frequency array is empty."]
# And that the metric is 0
assert np.allclose(score, 0)
# Also test for a warning when the arrays have non-voiced content
metric(np.ones(10), np.arange(10), np.zeros(10), np.arange(10))
assert len(w) == 7
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == "Estimated melody has no voiced frames."
# Now test validation function - all inputs must be same length
nose.tools.assert_raises(ValueError, metric, np.ones(10),
np.ones(12), np.ones(10), np.ones(10))
def __check_score(sco_f, metric, score, expected_score):
assert | np.allclose(score, expected_score, atol=A_TOL) | numpy.allclose |
import numpy as np
import scipy.sparse as sp
def subsetNpMatrix(matrix, row_bounds, column_bounds):
rows = np.array([x for x in range(row_bounds[0], row_bounds[1]) if 0 <= int(x) < matrix.shape[0]])
cols = np.array([y for y in range(column_bounds[0], column_bounds[1]) if 0 <= int(y) < matrix.shape[1]])
if len(rows)==0 or len(cols)==0:
return np.empty(0)
subset = (matrix.ravel()[(cols + (rows * matrix.shape[1]).reshape((-1, 1))).ravel()]).reshape(rows.size, cols.size)
return subset
def strata2triu(strata):
mat = np.zeros((len(strata[0]), len(strata[0])))
for i in range(len(strata)):
for j in range(len(strata[i])):
mat[j, j + i] = strata[i][j]
return mat
def strata2horizontal(strata):
hmat = np.zeros((len(strata[0]), len(strata)))
for i in range(len(strata)):
hmat[:len(strata[i]), i] = strata[i]
return hmat
def strata2vertical(strata):
vmat = np.zeros((len(strata[0]), len(strata)))
for i in range(len(strata)):
vmat[i:, i] = strata[i]
return vmat
def blank_diagonal2(matr, strata = False):
"""
in: edgelist, strata (n entries off main diagonal to zero)
out:matrix with n blanked diagonal
"""
# int_shape = (int(max(matr[:,1])+1), int(max(matr[:,1])+1))
# coo = sp.coo_matrix((matr[:, 2], (matr[:, 0], matr[:, 1])), shape=int_shape, dtype=matr.dtype)
# csr = coo.tocsr()
# csr_org = csr.copy()
coo = sp.coo_matrix(matr)
csr = coo.tocsr()
lil = csr.tolil()
for i in range(lil.shape[0]):
lil[i:i+strata+1,i:i+strata+1] = 0
csr = lil.tocsr()
return csr
def blank_diagonal(mat, nstrata=0):
return np.triu(mat, nstrata) if nstrata > 0 else mat
def blank_diagonal_sparse_from_strata(strata, nstrata=0):
"""
# >>> strata = [np.array([1, 2, 3, 4]), np.array([5, 6, 7]), np.array([8, 9])]
# >>> blank_diagonal_sparse_from_strata(strata, nstrata=1)
"""
size = len(strata[0])
padded_strata = [
np.concatenate([np.zeros(shape=(i,)), strata[i]]) for i in range(len(strata))
]
for i in range(nstrata):
padded_strata[i] = np.zeros((size,))
diags = np.arange(len(strata))
return sp.spdiags(padded_strata, diags, size, size, format='csr')
if __name__ == '__main__':
strata = [ | np.array([1, 2, 3, 4]) | numpy.array |
from math import sqrt
import numpy as np
def rms_error(base_curve, fitted_curve):
error = base_curve - fitted_curve
return np.sqrt(np.mean(error ** 2))
def nrms_error(base_curve, fitted_curve, method="mean"):
if method == "mean":
Norm = | np.mean(base_curve) | numpy.mean |
import random
import numpy as np
class Generative():
def __init__(self, itemNum, userNum, emb_dim, lamda, param, initdelta, learning_rate):
self.itemNum = itemNum
self.userNum = userNum
self.emb_dim = emb_dim
self.lamda = lamda
self.param = param
self.initdelta = initdelta
self.learning_rate = learning_rate
self.alpha_u = self.alpha_v = self.beta_v = lamda
self.users_embedding = np.array([([0.] * self.emb_dim) for i in range(self.userNum)])
self.items_embedding = np.array([([0.] * self.emb_dim) for i in range(self.itemNum)])
self.items_bias = np.array([0.] * self.itemNum)
## init the user latent feature matrix and item latent feature matrix
if (param == None):
i = 0
while (i < self.userNum):
j = 0
while (j < self.emb_dim):
self.users_embedding[i][j] = random.uniform(-self.initdelta, self.initdelta)
j += 1
i += 1
i = 0
while (i < self.itemNum):
j = 0
while (j < self.emb_dim):
self.items_embedding[i][j] = random.uniform(-self.initdelta, self.initdelta)
j += 1
i += 1
self.items_bias = np.array([0.] * self.itemNum)
else:
self.users_embedding = self.param[0]
self.users_embedding = np.array(self.users_embedding)
self.items_embedding = self.param[1]
self.items_embedding = np.array(self.items_embedding)
self.items_bias = self.param[2]
self.items_bias = np.array(self.items_bias)
def get_all_logits(self, u, all_logits):
all_items = np.arange(self.itemNum)
all_logits = np.sum( | np.multiply(self.users_embedding[u], self.items_embedding[all_items]) | numpy.multiply |
import pandas as pd
import numpy as np
import random
import torch
"""
specify a certain GPU
"""
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = '6'
# random seed
np.random.seed(1)
random.seed(2)
torch.manual_seed(1)
config_initial_swarm_positions = pd.read_excel("Configurations/swarm_positions_200.xlsx")
config_initial_swarm_positions = config_initial_swarm_positions.values[:, 1:4]
config_initial_swarm_positions = np.array(config_initial_swarm_positions, dtype=np.float64)
# configurations on swarm
config_num_of_agents = 200
config_communication_range = 120
# configurations on environment
config_width = 1000.0
config_length = 1000.0
config_height = 100.0
config_constant_speed = 1
# configurations on destroy
config_maximum_destroy_num = 50
config_minimum_remain_num = 5
# configurations on meta learning
config_meta_training_epi = 500
# configurations on Graph Convolutional Network
config_K = 1 / 100
config_best_eta = 0.3
config_best_epsilon = 0.99
# configurations on one-off UEDs
config_num_destructed_UAVs = 100 # should be in the range of [1, num_of_UAVs-2]
config_normalize_positions = True
# configurations on training GCN
config_alpha_k = [0.01, 0.05, 0.1, 0.15, 0.2, 0.5, 0.9, 0.95, 1, 1.5, 2, 3, 5]
config_gcn_repeat = 100
config_expension_alpha = [0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
config_d0_alpha = [0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2]
config_representation_step = 450
# configurations on continuous destroy setting 1
config_destroy_step_list_1 = [10, 90, 100, 131, 230, 310, config_representation_step + 100]
config_destroy_mode_list_1 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
# config_destroy_num_list_1 = [50, 8, 9, 25, 1, 0]
config_destroy_num_list_1 = [50, 8, 9, 7, 20, 1]
config_destroy_range_list_1 = [0, 0, 0, 0, 0, 0, 0, 0, 50, 10, 0]
config_destroy_center_list_1 = [None, None, None, None, None, None, None, None, np.array([300, 200, 50]),
np.array([600, 750, 50]), None]
# configurations on continuous destroy setting 2
config_destroy_step_list_2 = [3, 21, 40, 56, 70, 125, 145, 160, 176, 190, config_representation_step + 100]
config_destroy_mode_list_2 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
config_destroy_num_list_2 = [10, 20, 20, 30, 40, 10, 10, 30, 10, 10]
config_destroy_range_list_2 = [0, 0, 0, 0, 0, 0, 0, 0, 50, 10]
config_destroy_center_list_2 = [None, None, None, None, None, None, None, None, np.array([300, 200, 50]),
np.array([600, 750, 50]), None]
# configurations on continuous destroy setting 3
config_destroy_step_list_3 = [9, 15, 20, 56, 60, 70, 103, 156, 170, config_representation_step + 100]
config_destroy_mode_list_3 = [2, 2, 2, 2, 2, 2, 2, 2, 2]
config_destroy_num_list_3 = [10, 30, 15, 8, 50, 20, 10, 10, 10]
config_destroy_range_list_3 = [0, 0, 0, 0, 0, 0, 0, 0, 50, 10]
config_destroy_center_list_3 = [None, None, None, None, None, None, None, None, np.array([300, 200, 50]),
np.array([600, 750, 50]), None]
# configurations on continuous destroy setting 4
config_destroy_step_list_4 = [10, 51, 70, 91, 100, 120, 135, 150, 170, 198, 210, config_representation_step + 100]
config_destroy_mode_list_4 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
config_destroy_num_list_4 = [50, 8, 10, 3, 40, 5, 5, 30, 1, 0, 4]
config_destroy_range_list_4 = [10, 20, 30, 40, 30, 20, 40, 20, 50, 10, 10]
config_destroy_center_list_4 = [np.array([60, 70, 60]), np.array([500, 750, 30]), np.array([30, 75, 70]),
np.array([500, 500, 50]), np.array([100, 150, 20]), | np.array([600, 750, 30]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.neighbors.classification import KNeighborsClassifier
def safe_margin(val, low=True, pct: float = 0.05):
low_pct, high_pct = 1 - pct, 1 + pct
func = min if low else max
return func(val * low_pct, val * high_pct)
def safe_bounds(array, pct: float = 0.05):
low_x, high_x = array.min(), array.max()
low_x = safe_margin(low_x, pct=pct)
high_x = safe_margin(high_x, pct=pct, low=False)
return low_x, high_x
def example_meshgrid(X, n_bins=100, low_th: float = 0.95, high_th: float = 1.05):
low_x, high_x = X[:, 0].min(), X[:, 0].max()
low_y, high_y = X[:, 1].min(), X[:, 1].max()
low_x = safe_margin(low_x)
low_y = safe_margin(low_y)
high_x = safe_margin(high_x, False)
high_y = safe_margin(high_y, False)
xs = | np.linspace(low_x, high_x, n_bins) | numpy.linspace |
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Polygons, Contours, Path
from .testplot import TestMPLPlot, mpl_renderer
class TestPathPlot(TestMPLPlot):
def test_path_continuously_varying_color_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
color = [998, 999, 998, 994]
data = {'x': xs, 'y': ys, 'color': color}
levels = [0, 38, 73, 95, 110, 130, 156, 999]
colors = ['#5ebaff', '#00faf4', '#ffffcc', '#ffe775', '#ffc140', '#ff8f20', '#ff6060']
path = Path([data], vdims='color').options(
color='color', color_levels=levels, cmap=colors)
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([998, 999, 998]))
self.assertEqual(artist.get_clim(), (994, 999))
def test_path_continuously_varying_alpha_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
alpha = [0.1, 0.7, 0.3, 0.2]
data = {'x': xs, 'y': ys, 'alpha': alpha}
path = Path([data], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(path)
def test_path_continuously_varying_line_width_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
line_width = [1, 7, 3, 2]
data = {'x': xs, 'y': ys, 'line_width': line_width}
path = Path([data], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 7, 3])
def test_path_continuously_varying_line_width_op_update(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
path = HoloMap({
0: Path([{'x': xs, 'y': ys, 'line_width': [1, 7, 3, 2]}], vdims='line_width'),
1: Path([{'x': xs, 'y': ys, 'line_width': [3, 8, 2, 3]}], vdims='line_width')
}).options(linewidth='line_width')
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 7, 3])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [3, 8, 2])
class TestPolygonPlot(TestMPLPlot):
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = mpl_renderer.get_plot(polygons)
for j, splot in enumerate(plot.subplots.values()):
artist = splot.handles['artist']
self.assertEqual(artist.get_array(), | np.array([j]) | numpy.array |
import abc
import glob
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
from tensormate.graph.base import TfGgraphBuilder
class DataSetParams(object):
def __init__(self, path_pattern_train, path_pattern_validation):
self.path_pattern_train = path_pattern_train
self.path_pattern_validation = path_pattern_validation
self.samples_train = None
self.samples_validation = None
class ImageDataSetParams(DataSetParams):
def __init__(self, *args, **kwargs):
super(ImageDataSetParams, self).__init__(*args, **kwargs)
self.image_height = None
self.image_weight = None
self.image_channels = 3
self.pixel_mean = None
self.pixel_std = None
@property
def pixel_bounds(self):
pixel_min = | np.min((0.0 - self.pixel_mean) / self.pixel_std) | numpy.min |
import cv2
import collections
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.optimizers import SGD
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from sklearn import datasets
from sklearn.cluster import KMeans
import numpy as np
import scipy.signal
import os
from fuzzywuzzy import fuzz
import matplotlib
import matplotlib.pyplot as plt
import math
def train_or_load_character_recognition_model(train_image_paths, serialization_folder):
"""
Procedura prima putanje do fotografija za obucavanje (dataset se sastoji iz razlicitih fotografija alfabeta), kao i
putanju do foldera u koji treba sacuvati model nakon sto se istrenira (da ne trenirate svaki put iznova)
Procedura treba da istrenira model i da ga sacuva u folder "serialization_folder" pod proizvoljnim nazivom
Kada se procedura pozove, ona treba da trenira model ako on nije istraniran, ili da ga samo ucita ako je prethodno
istreniran i ako se nalazi u folderu za serijalizaciju
:param train_image_paths: putanje do fotografija alfabeta
:param serialization_folder: folder u koji treba sacuvati serijalizovani model
:return: Objekat modela
"""
try:
with open(os.path.join(serialization_folder, 'NeuralNetParams.json'), 'r') as nnp_file:
nnmodel = model_from_json(nnp_file.read())
nnmodel.load_weights(os.path.join(serialization_folder, 'NeuralNetWeights.h5'))
except Exception as e:
nnmodel = train_ocr(train_image_paths)
params = nnmodel.to_json()
try:
with open(os.path.join(serialization_folder, 'NeuralNetParams.json'), 'w') as nnp_file:
nnp_file.write(params)
nnmodel.save_weights(os.path.join(serialization_folder, 'NeuralNetWeights.h5'))
except Exception as e:
print(e)
pass
return nnmodel
alphabet = ['A', 'B', 'C', 'Č', 'Ć', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'Š', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'Ž', 'a', 'b', 'c', 'č', 'ć', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 'š', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ž']
def train_ocr(train_image_paths):
datagen = ImageDataGenerator(
rotation_range=25,
fill_mode="constant",
cval=0,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.5,
zoom_range=0.15
)
if train_image_paths[0][-5] == '1':
train_image_paths = train_image_paths[::-1]
nn = Sequential()
nn.add(Dense(192, input_dim=28*28, activation='sigmoid'))
nn.add(Dropout(0.3))
#nn.add(Dense(256, activation='sigmoid'))
#nn.add(Dropout(0.3))
nn.add(Dense(128, activation='sigmoid'))
nn.add(Dropout(0.3))
nn.add(Dense(len(alphabet), activation='softmax'))
y = np.array(np.eye(len(alphabet)), np.float32)
x = []
#b = False
for path in train_image_paths:
vectorimagerois, _ = extract_rois(path)
for im in vectorimagerois:
#if b:
# plt.imshow(im)
# plt.show()
#x.append(im.flatten())
x.append(resize_and_flatten(im, (28, 28), flatten=False).tolist())
#b = True
x = np.array(x)
#print(len(x))
#exit(0)
print(x.shape, y.shape)
sgd = SGD(lr=0.4, momentum=0.9)
nn.compile(loss='mean_squared_error', optimizer=sgd)
#nn.fit(x, y, epochs=700, batch_size=1, verbose=2, shuffle=False)
#return nn
#print(x.shape)
x = np.expand_dims(x, axis=3)
print(x.shape)
round = 0
for x_batch, y_batch in datagen.flow(x, y, batch_size=y.shape[0], shuffle=False):
round += 1
x = []
for a in x_batch:
#plt.imshow(a)
#plt.show()
x.append(a.flatten())#resize_and_flatten(a, (28, 28)))
x = np.array(x)
print(round)
nn.fit(x, y_batch, epochs=1, steps_per_epoch=x.shape[0], verbose=2, shuffle=False)
if round >= 6000:
break
#nn.fit_generator(inputdata, steps_per_epoch=len(x) / 30, epochs=4000)
return nn
def nn_predict_text(trained_model, vectorcharimgrois):
extracted_text = ''
for i in range(len(vectorcharimgrois)):
vectorcharimgrois[i] = resize_and_flatten(vectorcharimgrois[i], (28, 28))
if vectorcharimgrois[i].ndim == 1:
vectorcharimgrois[i] = np.array([vectorcharimgrois[i]])
index = np.argmax(trained_model.predict(vectorcharimgrois[i]))
extracted_text += alphabet[index]
return extracted_text
def add_spaces_to_nn_text_output(extracted_text, distancerois):
try:
distances = np.array(distancerois).reshape(len(distancerois), 1)
k_means = KMeans(n_clusters=2, max_iter=2000, tol=0.000001, n_init=100)
k_means.fit(distances)
w_space_group = max(enumerate(k_means.cluster_centers_), key=lambda x: x[1])[0]
except Exception as e:
print(e)
return extracted_text
charsnum = len(extracted_text)
insertedwhitespaces = 0
ret = ''
for i in range(charsnum):
ret += extracted_text[i]
if i < len(distancerois) and k_means.labels_[i] == w_space_group:
ret += ' '
#extracted_text = extracted_text[:i + insertedwhitespaces + 1] + ' ' + extracted_text[i + insertedwhitespaces + 1:]
#insertedwhitespaces += 1
return ret #extracted_text
def guess_text_by_distance(extracted_text, vocabulary):
words = extracted_text.split(' ')
extracted_text = ''
for i in range(0, len(words)):
wordguess = []
for vword in vocabulary.keys():
wordguess.append((vword, fuzz.ratio(words[i], vword), vocabulary[vword]))
wordguess.sort(key=lambda x: x[1], reverse=True)
for j in range(0, len(wordguess)):
if j == 0:
continue
elif wordguess[0][1] != wordguess[j][1]:
wordguess = wordguess[:j]
break
wordguess.sort(key=lambda x: x[2], reverse=True)
extracted_text += wordguess[0][0]
if i + 1 != len(words):
extracted_text += ' '
return extracted_text
def extract_text_from_image(trained_model, image_path, vocabulary):
"""
Procedura prima objekat istreniranog modela za prepoznavanje znakova (karaktera), putanju do fotografije na kojoj
se nalazi tekst za ekstrakciju i recnik svih poznatih reci koje se mogu naci na fotografiji.
Procedura treba da ucita fotografiju sa prosledjene putanje, i da sa nje izvuce sav tekst koriscenjem
openCV (detekcija karaktera) i prethodno istreniranog modela (prepoznavanje karaktera), i da vrati procitani tekst
kao string.
Ova procedura se poziva automatski iz main procedure pa nema potrebe dodavati njen poziv u main.py
:param trained_model: <Model> Istrenirani model za prepoznavanje karaktera
:param image_path: <String> Putanja do fotografije sa koje treba procitati tekst.
:param vocabulary: <Dict> Recnik SVIH poznatih reci i ucestalost njihovog pojavljivanja u tekstu
:return: <String> Tekst procitan sa ulazne slike
"""
extracted_text = ""
vectorcharimgrois, distancerois = extract_rois(image_path)
if vectorcharimgrois is None: return extracted_text
extracted_text = nn_predict_text(trained_model, vectorcharimgrois)
print("NeuralNet, preprocessed, predicted characters : ", extracted_text)
extracted_text = add_spaces_to_nn_text_output(extracted_text, distancerois)
print("Kmeans, added spaces after aneuralnet results: ", extracted_text)
extracted_text = guess_text_by_distance(extracted_text, vocabulary)
print("Levenshtein, guess word by distance, end result: ", extracted_text)
return extracted_text
def histogram(image, xmax):
height, width = image.shape[0:2]
x = range(0, xmax + 1)
y = np.zeros(xmax + 1)
for i in range(0, height):
for j in range(0, width):
pixel = image[i, j]
y[pixel] += 1
return x, y
def distinctHist(image, xmax, sourcevalid):
height, width = image.shape[0:2]
x = range(0, xmax + 1)
y = np.zeros(xmax + 1)
for i in range(0, height):
for j in range(0, width):
if sourcevalid[i, j]:
pixel = image[i, j]
y[pixel] += 1
return x, y
def rectPoints(r):
pts = [[r[0], r[1]], [r[0], r[1]], [r[0], r[1]], [r[0], r[1]]]
pts[1][0] = pts[1][0] + r[2]
pts[2][0] = pts[1][0]
pts[2][1] = pts[2][1] + r[3]
pts[3][1] = pts[2][1]
return pts
def isInside(rectangle, contour):
pts = rectPoints(rectangle)
rectcontour = cv2.convexHull(np.array([pts[0], pts[1], pts[2], pts[3]], dtype=np.int32))
for coor in contour:
point = (coor[0][0], coor[0][1])
if cv2.pointPolygonTest(rectcontour, point, False) < 0:
return False
return True
def expandRect(rectangle):
wxsideshift = 0 # int(0.15 * rectangle[2])
hyupshift = int(0.5 * rectangle[3])
hydownshift = int(0.15 * rectangle[3])
rectw = int(2 * wxsideshift + rectangle[2])
recth = int(hyupshift + hydownshift + rectangle[3])
rectx = rectangle[0] - wxsideshift
recty = rectangle[1] - hyupshift
return rectx, recty, rectw, recth
def cropMultipleContoursBoundingRect(baseimg, cnts, allcontours):
img = | np.copy(baseimg) | numpy.copy |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import (Fittable1DModel, Fittable2DModel)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D',
'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D',
'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale',
'Sersic1D', 'Sersic2D', 'Shift',
'Sine1D', 'Cosine1D', 'Tangent1D',
'ArcSine1D', 'ArcCosine1D', 'ArcTangent1D',
'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D', 'Voigt1D',
'KingProjectedAnalytic1D', 'Exponential1D', 'Logarithmic1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian")
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian")
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mean': inputs_unit[self.inputs[0]],
'stddev': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
Rotation angle (value in radians). The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian")
y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian")
x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)")
y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)")
theta = Parameter(default=0.0, description="Rotation angle [in radians] (Optional parameter)")
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit,
self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_mean': inputs_unit[self.inputs[0]],
'y_mean': inputs_unit[self.inputs[0]],
'x_stddev': inputs_unit[self.inputs[0]],
'y_stddev': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'offset': outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box())
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box())
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='Redshift', default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.z) for x in self.bounding_box.bounding_box())
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_eff': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': inputs_unit[self.inputs[0]] ** -1,
'amplitude': outputs_unit[self.outputs[0]]}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine"""
return ArcSine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative"""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = - (TWOPI * x * amplitude *
np.sin(TWOPI * frequency * x + TWOPI * phase))
d_phase = - (TWOPI * amplitude *
np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine"""
return ArcCosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative"""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase))**2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent"""
return ArcTangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
"""
bbox = [(-1/4 - self.phase) / self.frequency, (1/4 - self.phase) / self.frequency]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models
"""
@property
def input_units(self):
if self.amplitude.unit is None:
return None
return {self.inputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': outputs_unit[self.outputs[0]] ** -1,
'amplitude': inputs_unit[self.inputs[0]]}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative"""
d_amplitude = - x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2))
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = - 1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine"""
return Sine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative"""
d_amplitude = x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2))
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = - 1 / frequency * | np.ones(x.shape) | numpy.ones |
# -*- coding: utf-8 -*-
"""HousingModel
Main model class
"""
import time
import numpy as np
import pandas as pd
# consav package
from consav import ModelClass
from consav.misc import elapsed, nonlinspace, markov_rouwenhorst
# local
import post_decision
import negm
import nvfi
import simulate
def solve_model(model,t_min=0,do_print=True):
""" solve the model """
par = model.par
sol = model.sol
t0_outer = time.time()
# a. re-set up grids
t0 = time.time()
model.create_grids()
if do_print: print(f'setup grids in {elapsed(t0)}')
# c. time loop
for t in reversed(range(t_min,par.T)):
t0 = time.time()
# i. post-decions
t0_pd = time.time()
post_decision.compute_wq_renters(par,sol,t)
post_decision.compute_wq_owners(par,sol,t)
t_pd = elapsed(t0_pd)
# ii. negm
t0_negm = time.time()
negm.solve_renters(par,sol,t)
negm.solve_owners(par,sol,t)
t_negm = elapsed(t0_negm)
# iii. evaluate values of each discrete choice
t0_evaluate = time.time()
nvfi.evaluate_rt(par,sol,t)
nvfi.evaluate_ft(par,sol,t)
nvfi.evaluate_bt(par,sol,t)
t_evaluate = elapsed(t0_evaluate)
# iv. final nvfi
t0_nvfi = time.time()
nvfi.solve_renters(par,sol,t)
t_nvfi_r = elapsed(t0_nvfi)
t0_nvfi = time.time()
nvfi.solve_owners(par,sol,t)
t_nvfi_o = elapsed(t0_nvfi)
if do_print:
msg = f't = {t:2d} solved in {elapsed(t0)}'
msg += f'[pd: {t_pd}, negm: {t_negm}, evaluate: {t_evaluate}, nvfi_r: {t_nvfi_r}, nvfi_o: {t_nvfi_o}]'
print(msg)
if do_print: print(f'model solved in {elapsed(t0_outer)}')
def simulate_model(model,do_print=True,seed=1986):
""" simulate the model """
if not seed is None: np.random.seed(seed)
par = model.par
sol = model.sol
sim = model.sim
t0_outer = time.time()
# a. draw random numbers
sim.i_beta[:] = np.random.choice(par.Nbeta,size=par.simN) # preferences
sim.a0[:] = np.random.gamma(par.a0_shape,par.a0_scale,size=par.simN) # initial assets
sim.pi_p[:] = np.random.uniform(size=(par.simN,par.T)) # income process
sim.pi_c[:] = np.random.uniform(size=(par.simN,par.T)) # discrete choice
# b. simulate
simulate.simulate(par,sol,sim)
if do_print: print(f'model simulated in {elapsed(t0_outer)}')
# class
class HousingModelClass(ModelClass):
def setup(self):
""" set baseline parameters in .par """
par = self.par
# specify list over parameters, which are allowed not to be floats
self.not_float_list = ['T','TR','age_min','t_min','Delta_iota',
'Nbeta','Na','Niota','Nh','Nht','Np','NLTV','Nm','Nx','Nz',
'Nrt','Nbt','Nft','Nkt','Ncr','Nco','do_sim','simN']
# a. economic parameters
# life-cycle
par.T = 55 # life-span from age_min
par.TR = 37 # working-life-span from age_min
par.age_min = 25 # only used in figures
par.t_min = 0 # used when solving
# income
par.rho_p = 0.99 # persistence of income shocks
par.sigma_p = 0.30 # std. of income shocks
par.G = np.ones(par.T) # age-specific growth factors of income
par.G[:20] = 1.066
par.G[20:par.TR] = 1.015
par.G[par.TR:] = 0.96
par.retirement_drop = 1.00 # drop in income at retirement
# assets and housing
par.ra = 0.035 # return on liquid assets
par.rm = 0.040 # mortgage interest rate
par.rb = 0.070 # bank loan interest rate
par.ph = 1.000 # housing price
par.rh = 0.045 # rental price
par.delta = 0.0075 # mortgage interest only spread
par.gamma_m = 0.050 # mortgage repayment rate
par.gamma_b = 0.100 # bank loan repayment rate
par.tau_f = 0.100 # loan refinancing cost
par.tau_h = 0.200 # moving-in cost for owners
par.tau_ht = 0.010 # moving-in cost for renters
par.kappa_p = 4.00 # loan-to-income ratio
par.kappa_h = 0.95 # loan-to-value ratio
par.kappa_h_mortgage = 0.80 # loan-to-value ratio (mortgage)
par.grid_h = | np.array([2.0,4.0,6.0,8.0,10.0,15.0,20.0,25.0,30.0,35.0],dtype=np.float_) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
# create data
be_short_data = | np.arange(20, dtype='>i2') | numpy.arange |
"""
Usage:
run.py [options]
Options:
-h --help Show this screen.
--data_dir DATA_DIR Directory(*) to of dataset. (*)=relative to repository root ProGraML/.
Will overwrite the per-dataset defaults if provided.
--log_dir LOG_DIR Directory(*) to store logfiles and trained models relative to repository dir.
[default: deeplearning/ml4pl/poj104/logs/unspecified]
--model MODEL The model to run.
--dataset DATASET The dataset to us.
--config CONFIG Path(*) to a config json dump with params.
--config_json CONFIG_JSON Config json with params.
--restore CHECKPOINT Path(*) to a model file to restore from.
--skip_restore_config Whether to skip restoring the config from CHECKPOINT.
--test Test the model without training.
--restore_by_pattern PATTERN Restore newest model of this name from log_dir and
continue training. (AULT specific!)
PATTERN is a string that can be grep'ed for.
--kfold Run kfold cross-validation iff kfold is set.
Splits are currently dataset specific.
--transfer MODEL The model-class to transfer to.
The args specified will be applied to the transferred model to the extend applicable, e.g.
training params and Readout module specifications, but not to the transferred model trunk.
However, we strongly recommend to make all trunk-parameters match, in order to be able
to restore from transferred checkpoints without having to pass a matching config manually.
--transfer_mode MODE One of frozen, finetune (but not yet implemented) [default: frozen]
Mode frozen also sets all dropout in the restored model to zero (the newly initialized
readout function can have dropout nonetheless, depending on the config provided).
--skip_save_every_epoch Save latest model after every epoch (on a rolling basis).
"""
import pickle, time, os, json, sys
from pathlib import Path
from docopt import docopt
import tqdm
import numpy as np
import torch
from torch_geometric.data import Data, InMemoryDataset, DataLoader # (see below)
# make this file executable from anywhere
#if __name__ == '__main__':
full_path = os.path.realpath(__file__)
print(full_path)
REPO_ROOT = full_path.rsplit('ProGraML', maxsplit=1)[0] + 'ProGraML'
print(REPO_ROOT)
#insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, REPO_ROOT)
REPO_ROOT = Path(REPO_ROOT)
from deeplearning.ml4pl.poj104.dataloader import NodeLimitedDataLoader
from deeplearning.ml4pl.models.ggnn.modeling import (
GGNNModel,
GraphTransformerModel,
)
from deeplearning.ml4pl.models.ggnn.configs import (
ProGraMLBaseConfig,
GGNN_POJ104_Config,
GGNN_ForPretraining_Config,
GGNN_Devmap_Config,
GGNN_Threadcoarsening_Config,
GGNN_BranchPrediction_Config,
GraphTransformer_POJ104_Config,
GraphTransformer_Devmap_Config,
GraphTransformer_Threadcoarsening_Config,
GraphTransformer_BranchPrediction_Config,
GraphTransformer_ForPretraining_Config,
)
from deeplearning.ml4pl.poj104.dataset import (
POJ104Dataset,
NCCDataset,
ThreadcoarseningDataset,
DevmapDataset,
BranchPredictionDataset,
)
# Importing twice like this enables restoring
from deeplearning.ml4pl.models.ggnn import modeling
from deeplearning.ml4pl.models.ggnn import configs
# Slurm gives us among others: SLURM_JOBID, SLURM_JOB_NAME,
# SLURM_JOB_DEPENDENCY (set to the value of the --dependency option)
if os.environ.get('SLURM_JOBID'):
print('SLURM_JOB_NAME', os.environ.get('SLURM_JOB_NAME', ''))
print('SLURM_JOBID', os.environ.get('SLURM_JOBID', ''))
RUN_ID = "_".join([os.environ.get('SLURM_JOB_NAME', ''), os.environ.get('SLURM_JOBID')])
else:
RUN_ID = str(os.getpid())
MODEL_CLASSES = {
'ggnn_poj104': (GGNNModel, GGNN_POJ104_Config),
'ggnn_devmap': (GGNNModel, GGNN_Devmap_Config),
'ggnn_threadcoarsening': (GGNNModel, GGNN_Threadcoarsening_Config),
'ggnn_branch_prediction': (GGNNModel, GGNN_BranchPrediction_Config),
'ggnn_pretraining': (GGNNModel, GGNN_ForPretraining_Config),
'transformer_poj104': (GraphTransformerModel, GraphTransformer_POJ104_Config),
'transformer_devmap': (GraphTransformerModel, GraphTransformer_Devmap_Config),
'transformer_threadcoarsening': (GraphTransformerModel, GraphTransformer_Threadcoarsening_Config),
'transformer_branch_prediction': (GraphTransformerModel, GraphTransformer_BranchPrediction_Config),
'transformer_pretraining': (GraphTransformerModel, GraphTransformer_ForPretraining_Config),
}
DATASET_CLASSES = { #DS, default data_dir,
'poj104': (POJ104Dataset, 'deeplearning/ml4pl/poj104/classifyapp_data'),
'ncc': (NCCDataset, 'deeplearning/ml4pl/poj104/ncc_data'),
'devmap_amd': (DevmapDataset, 'deeplearning/ml4pl/poj104/devmap_data'),
'devmap_nvidia': (DevmapDataset, 'deeplearning/ml4pl/poj104/devmap_data'),
'threadcoarsening_Cypress': (ThreadcoarseningDataset, 'deeplearning/ml4pl/poj104/threadcoarsening_data'),
'threadcoarsening_Tahiti': (ThreadcoarseningDataset, 'deeplearning/ml4pl/poj104/threadcoarsening_data'),
'threadcoarsening_Fermi': (ThreadcoarseningDataset, 'deeplearning/ml4pl/poj104/threadcoarsening_data'),
'threadcoarsening_Kepler': (ThreadcoarseningDataset, 'deeplearning/ml4pl/poj104/threadcoarsening_data'),
'branch_prediction': (BranchPredictionDataset, 'deeplearning/ml4pl/poj104/branch_prediction_data'),
}
DEBUG = False
if DEBUG:
torch.autograd.set_detect_anomaly(True)
class Learner(object):
def __init__(self, model, dataset, args=None, current_kfold_split=None):
# Make class work without file being run as main
self.args = docopt(__doc__, argv=[])
if args:
self.args.update(args)
# prepare logging
self.parent_run_id = None # for restored models
self.run_id = f"{time.strftime('%Y-%m-%d_%H:%M:%S')}_{RUN_ID}"
if args['--kfold']:
self.run_id += f'_{current_kfold_split}'
log_dir = REPO_ROOT / self.args.get("--log_dir", '.')
log_dir.mkdir(parents=True, exist_ok=True)
self.log_file = log_dir / f"{self.run_id}_log.json"
self.best_model_file = log_dir / f"{self.run_id}_model_best.pickle"
self.last_model_file = log_dir / f"{self.run_id}_model_last.pickle"
# ~~~~~~~~~~ load model ~~~~~~~~~~~~~
if self.args.get('--restore'):
self.model = self.restore_model(path=REPO_ROOT / self.args['--restore'])
elif self.args.get('--restore_by_pattern'):
self.model = self.restore_by_pattern(pattern=self.args['--restore_by_pattern'],
log_dir=log_dir,
current_kfold_split=current_kfold_split)
else: # initialize fresh model
# get model and dataset
assert model, "Need to provide --model to initialize freshly."
Model, Config = MODEL_CLASSES[model]
self.global_training_step = 0
self.current_epoch = 1
# get config
params = self.parse_config_params(args)
self.config = Config.from_dict(params=params)
test_only = self.args.get('--test', False)
self.model = Model(self.config, test_only=test_only)
# set seeds, NB: the NN on CUDA is partially non-deterministic!
torch.manual_seed(self.config.random_seed)
| np.random.seed(self.config.random_seed) | numpy.random.seed |
#!/usr/bin/env python3
import argparse
import os
import sys
import re
import math
import warnings
import time
import struct
from collections import defaultdict
import pandas as pd
import numpy as np
import hicstraw
import cooler
from scipy.stats import expon
from scipy.ndimage import gaussian_filter
from scipy.ndimage.filters import maximum_filter
from scipy.signal import convolve2d
import scipy.ndimage.measurements as scipy_measurements
from scipy import sparse
from statsmodels.stats.multitest import multipletests
from multiprocessing import Process, Manager
def parseBP(s):
"""
:param s: string
:return: string converted to number, taking account for kb or mb
"""
if not s:
return False
if s.isnumeric():
return int(s)
s = s.lower()
if "kb" in s:
n = s.split("kb")[0]
if not n.isnumeric():
return False
return int(n) * 1000
elif "mb" in s:
n = s.split("mb")[0]
if not n.isnumeric():
return False
return int(n) * 1000000
return False
def parse_args(args):
parser = argparse.ArgumentParser(description="Check the help flag")
parser.add_argument("-f",
"--file",
dest="f_path",
help="REQUIRED: Contact map",
required=False)
parser.add_argument("-d",
"--distance",
dest="distFilter",
help="REQUIRED: Maximum distance (in bp) allowed between loop loci",
required=False)
parser.add_argument("-o",
"--outfile",
dest="outdir",
help="REQUIRED: Name of the output file.\
Output is a numpy binary.",
required=True)
parser.add_argument("-r",
"--resolution",
dest="resolution",
help="REQUIRED: Resolution used for the contact maps",
required=True)
parser.add_argument("-bed", "--bed", dest="bed",
help="BED file for HiC-Pro type input",
default="",
required=False)
parser.add_argument("-m", "--matrix", dest="mat",
help="MATRIX file for HiC-Pro type input",
default="",
required=False)
parser.add_argument("-b", "--biases", dest="biasfile",
help="RECOMMENDED: biases calculated by\
ICE or KR norm for each locus for contact map are read from BIASFILE",
required=False)
parser.add_argument(
"-cz",
"--chromosomeSize",
default="",
dest="chrSize_file",
help="RECOMMENDED: .hic corressponfing chromosome size file.",
required=False)
parser.add_argument(
"-norm",
"--normalization",
default=False,
dest="norm_method",
help="RECOMMENDED: Hi-C normalization method (KR, VC,...).",
required=False)
# parser.add_argument("-cb",
# '--cooler-balance',
# dest='cooler_balance',
# default=False,
# #action='store_false',
# required=False,
# help="OPTIONAL: The cooler data was normalized prior to creating the .cool file.")
# parser.set_defaults(cooler_balance=False)
parser.add_argument(
"-st",
"--sparsityThreshold",
dest="st",
type=float,
default=0.88,
help="OPTIONAL: Mustache filters out contacts in sparse areas, you can relax this for sparse datasets(i.e. -st 0.8). Default value is 0.88.",
required=False)
parser.add_argument(
"-pt",
"--pThreshold",
dest="pt",
type=float,
default=0.2,
help="OPTIONAL: P-value threshold for the results in the final output. Default is 0.2",
required=False)
parser.add_argument(
"-sz",
"--sigmaZero",
dest="s_z",
type=float,
default=1.6,
help="OPTIONAL: sigma0 value for the method. DEFAULT is 1.6. \
Experimentally chosen for 5Kb resolution",
required=False)
parser.add_argument("-oc", "--octaves", dest="octaves", default=2,
type=int,
help="OPTIONAL: Octave count for the method. \
DEFAULT is 2.",
required=False)
parser.add_argument("-i", "--iterations", dest="s", default=10,
type=int,
help="OPTIONAL: iteration count for the method. \
DEFAULT is 10. Experimentally chosen for \
5Kb resolution",
required=False)
parser.add_argument("-p", "--processes", dest="nprocesses", default=4, type=int,
help="OPTIONAL: Number of parallel processes to run. DEFAULT is 4. Increasing this will also increase the memory usage",
required=False)
# parser.add_argument("-c",
# "--changefile",
# dest="changedir",
# help="...",
# required=False,
# default="")
parser.add_argument(
"-ch",
"--chromosome",
dest="chromosome",
nargs='+',
help="REQUIRED: Specify which chromosome to run the program for. Optional for cooler files.",
default='n',
required=False)
parser.add_argument(
"-ch2",
"--chromosome2",
dest="chromosome2",
nargs='+',
help="Optional: Specify the second chromosome for interchromosomal analysis.",
default='n',
required=False)
parser.add_argument("-v",
"--verbose",
dest="verbose",
type=bool,
default=True,
help="OPTIONAL: Verbosity of the program",
required=False)
return parser.parse_args()
def kth_diag_indices(a, k):
rows, cols = np.diag_indices_from(a)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
def is_chr(s, c):
# if 'X' == c or 'chrX':
# return 'X' in c
# if 'Y' == c:
# return 'Y' in c
return str(c).replace('chr', '') == str(s).replace('chr', '') # re.findall("[1-9][0-9]*", str(s))
def get_sep(f):
"""
:param f: file path
:return: Guesses the value separator in the file.
"""
with open(f) as file:
for line in file:
if "\t" in line:
return '\t'
if " " in line.strip():
return ' '
if "," in line:
return ','
if len(line.split(' ')) == 1:
return ' '
break
raise FileNotFoundError
def read_bias(f, chromosome, res):
"""
:param f: Path to the bias file
:return: Dictionary where keys are the bin coordinates and values are the bias values.
"""
d = defaultdict(lambda: 1.0)
if f:
sep = get_sep(f)
with open(f) as file:
for pos, line in enumerate(file):
line = line.strip().split(sep)
if len(line) == 3:
if is_chr(line[0], chromosome):
val = float(line[2])
if not np.isnan(val):
if val < 0.2:
d[(float(line[1]) // res)] = np.Inf
else:
d[(float(line[1]) // res)] = val
else:
d[(float(line[1]) // res)] = np.Inf
elif len(line) == 1:
val = float(line[0])
if not np.isnan(val):
if val < 0.2:
d[pos] = np.Inf
else:
d[pos] = val
else:
d[pos] = np.Inf
return d
return False
def read_pd(f, distance_in_bp, bias, chromosome, res):
sep = get_sep(f)
df = pd.read_csv(f, sep=sep, header=None)
df.dropna(inplace=True)
if df.shape[1] == 5:
df = df[np.vectorize(is_chr)(df[0], chromosome)]
if df.shape[0] == 0:
print('Could\'t read any interaction for this chromosome!')
return
df = df[np.vectorize(is_chr)(df[2], chromosome)]
df = df.loc[np.abs(df[1] - df[3]) <= ((distance_in_bp / res + 1) * res), :]
df[1] //= res
df[3] //= res
bias = read_bias(bias, chromosome, res)
if bias:
factors = np.vectorize(bias.get)(df[1], 1)
df[4] = np.divide(df[4], factors)
factors = np.vectorize(bias.get)(df[3], 1)
df[4] = np.divide(df[4], factors)
df = df.loc[df[4] > 0, :]
x = np.min(df.loc[:, [1, 3]], axis=1)
y = np.max(df.loc[:, [1, 3]], axis=1)
val = np.array(df[4])
elif df.shape[1] == 3:
df = df.loc[np.abs(df[1] - df[0]) <= ((distance_in_bp / res + 1) * res), :]
df[0] //= res
df[1] //= res
bias = read_bias(bias, chromosome, res)
if bias:
factors = np.vectorize(bias.get)(df[0], 1)
df[2] = np.divide(df[2], factors)
factors = np.vectorize(bias.get)(df[1], 1)
df[2] = np.divide(df[2], factors)
df = df.loc[df[2] > 0, :]
x = np.min(df.loc[:, [0, 1]], axis=1)
y = np.max(df.loc[:, [0, 1]], axis=1)
val = np.array(df[2])
return x, y, val
def read_hic_file(f, norm_method, CHRM_SIZE, distance_in_bp, chr1, chr2, res):
"""
:param f: .hic file path
:param chr: Which chromosome to read the file for
:param res: Resolution to extract information from
:return: Numpy matrix of contact counts
"""
if not CHRM_SIZE:
hic = hicstraw.HiCFile(f)
chromosomes = hic.getChromosomes()
chrSize_in_bp = {}
for i in range(1, len(chromosomes)):
chrSize_in_bp["chr" + str(chromosomes[i].name).replace("chr", '')] = chromosomes[i].length
CHRM_SIZE = chrSize_in_bp["chr" + chr1.replace("chr", '')]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHRM_SIZE, CHUNK_SIZE * res) # CHUNK_SIZE*res
result = []
val = []
while start < CHRM_SIZE:
print(int(start), int(end))
if not norm_method:
temp = hicstraw.straw("observed", "KR", f, str(chr1) + ":" + str(int(start)) + ":" + str(int(end)),
str(chr2) + ":" + str(int(start)) + ":" + str(int(end)), "BP", res)
else:
temp = hicstraw.straw("observed", str(norm_method), f,
str(chr1) + ":" + str(int(start)) + ":" + str(int(end)),
str(chr2) + ":" + str(int(start)) + ":" + str(int(end)), "BP", res)
if len(temp) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [[int(record.binX), int(record.binY), record.counts] for record in temp]
prev_block = set([(record.binX, record.binY, record.counts) for record in temp])
else:
cur_block = set([(int(record.binX), int(record.binY), record.counts) for record in temp])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
x = np.array(result[0]) // res
y = np.array(result[1]) // res
val = np.array(result[2])
nan_indx = np.logical_or.reduce((np.isnan(result[0]), np.isnan(result[1]), np.isnan(result[2])))
x = x[~nan_indx]
y = y[~nan_indx]
val = val[~nan_indx]
x = x.astype(int)
y = y.astype(int)
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
if len(val > 0):
return x, y, val
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
def read_cooler(f, distance_in_bp, chr1, chr2, cooler_balance):
"""
:param f: .cool file path
:param chr: Which chromosome to read the file for
:return: Numpy matrix of contact counts
"""
clr = cooler.Cooler(f)
res = clr.binsize
print(f'Your cooler data resolution is {res}')
if chr1 not in clr.chromnames or chr2 not in clr.chromnames:
raise NameError('wrong chromosome name!')
CHRM_SIZE = clr.chromsizes[chr1]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHUNK_SIZE * res, CHRM_SIZE) # CHUNK_SIZE*res
result = []
val = []
###########################
if chr1 == chr2:
# try:
# normVec = clr.bins()['weight'].fetch(chr1)
# result = clr.matrix(balance=True,sparse=True).fetch(chr1)#as_pixels=True, join=True
while start < CHRM_SIZE:
print(int(start), int(end))
if not cooler_balance:
temp = clr.matrix(balance=True, sparse=True).fetch((chr1, int(start), int(end)))
else:
temp = clr.matrix(balance=cooler_balance, sparse=True).fetch((chr1, int(start), int(end)))
temp = sparse.triu(temp)
np.nan_to_num(temp, copy=False, nan=0, posinf=0, neginf=0)
start_in_px = int(start / res)
if len(temp.row) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [list(start_in_px + temp.row), list(start_in_px + temp.col), list(temp.data)]
prev_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
else:
cur_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
# except:
# raise NameError('Reading from the file failed!')
if len(result) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
x = np.array(result[0])
y = np.array(result[1])
val = np.array(result[2])
else:
result = clr.matrix(balance=True, sparse=True).fetch(chr1, chr2)
result = sparse.triu(result)
np.nan_to_num(result, copy=False, nan=0, posinf=0, neginf=0)
x = result.row
y = result.col
val = result.data
##########################
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
# return np.array(x),np.array(y),np.array(val), res, normVec
if len(val > 0):
return np.array(x), np.array(y), np.array(val), res
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
def read_mcooler(f, distance_in_bp, chr1, chr2, res, cooler_balance):
"""
:param f: .cool file path
:param chr: Which chromosome to read the file for
:param res: Resolution to extract information from
:return: Numpy matrix of contact counts
"""
uri = '%s::/resolutions/%s' % (f, res)
# uri = '%s::/7' % (f)
clr = cooler.Cooler(uri)
# print(clr.bins()[:100])
if chr1 not in clr.chromnames or chr2 not in clr.chromnames:
raise NameError('wrong chromosome name!')
CHRM_SIZE = clr.chromsizes[chr1]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHRM_SIZE, CHUNK_SIZE * res) # CHUNK_SIZE*res
result = []
val = []
if chr1 == chr2:
try:
# result = clr.matrix(balance=True,sparse=True).fetch(chr1)#as_pixels=True, join=True
while start < CHRM_SIZE:
print(int(start), int(end))
if not cooler_balance:
temp = clr.matrix(balance=True, sparse=True).fetch((chr1, int(start), int(end)))
else:
temp = clr.matrix(balance=cooler_balance, sparse=True).fetch((chr1, int(start), int(end)))
temp = sparse.triu(temp)
np.nan_to_num(temp, copy=False, nan=0, posinf=0, neginf=0)
start_in_px = int(start / res)
if len(temp.row) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [list(start_in_px + temp.row), list(start_in_px + temp.col), list(temp.data)]
prev_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
# print('result==[]')
else:
cur_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
except:
raise NameError('Reading from the file failed!')
if len(result) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
x = np.array(result[0])
y = np.array(result[1])
val = np.array(result[2])
else:
result = clr.matrix(balance=True, sparse=True).fetch(chr1, chr2)
result = sparse.triu(result)
| np.nan_to_num(result, copy=False, nan=0, posinf=0, neginf=0) | numpy.nan_to_num |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import numpy as np
from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV, LinearRegression,\
ElasticNet, ElasticNetCV, MultiTaskElasticNet, MultiTaskElasticNetCV
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
import mliv.dgps as dgps
from mliv.ensemble import EnsembleIV, EnsembleIVStar
from mliv.rkhs import ApproxRKHSIVCV
from mliv.shape import LipschitzShapeIV, ShapeIV
from mliv.linear import OptimisticHedgeVsOptimisticHedge, StochasticOptimisticHedgeVsOptimisticHedge
from mliv.linear import L2OptimisticHedgeVsOGD, L2ProxGradient
from sklearn.pipeline import Pipeline
from mcpy.utils import filesafe
def _get(opts, key, default):
return opts[key] if (key in opts) else default
def gen_data(opts):
"""
opts : the dgp_opts from the config file
"""
tau_fn = dgps.get_tau_fn(opts['fn'])
z, p, y, tau_fn = dgps.get_data(opts['n_samples'], opts['n_instruments'],
opts['iv_strength'], tau_fn, opts['dgp_num'])
if opts['gridtest']:
T_test = np.zeros((opts['n_test'], p.shape[1]))
T_test += np.median(p, axis=0, keepdims=True)
T_test[:, 0] = np.linspace(np.percentile(
p[:, 0], 5), np.percentile(p[:, 0], 95), 1000)
else:
_, T_test, _, _ = dgps.get_data(opts['n_test'], opts['n_instruments'],
opts['iv_strength'], tau_fn, opts['dgp_num'])
T_test = T_test[np.argsort(T_test[:, 0])]
expected_te = tau_fn(T_test)
# data, true_param
return (T_test, z, p, y), expected_te
def ivfit(data, model, fitargs=None):
T_test, Z, T, Y = data
if fitargs is not None:
model.fit(Z, T, Y, **fitargs)
else:
model.fit(Z, T, Y)
y_pred = model.predict(T_test)
return y_pred.reshape(T_test.shape[:1] + Y.shape[1:])
def nystromrkhsfit(data, opts):
"""
data: the data returned by gen_data
opts: the method_opts from the config file
"""
alpha_scales = | np.geomspace(1, 10000, 10) | numpy.geomspace |
import os
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from P13pt.rfspectrum import Network
from PyQt5.QtCore import QSignalMapper, pyqtSignal, pyqtSlot, QTimer
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLineEdit, QLabel,
QFileDialog, QMessageBox, QDialog)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
def check_deembedding_compatibility(ntwk1, ntwk2):
# TODO: careful when de-embedding thru from thru
return ntwk1.number_of_ports == ntwk2.number_of_ports and len(ntwk1.f) == len(ntwk2.f) and np.max( | np.abs(ntwk1.f - ntwk2.f) | numpy.abs |
#!/usr/bin/env python
# coding: utf-8
# # Analyzing Student's Behavior and Model suggestion for classification levels
# ### <NAME>
# > #### This Data Science project was made under Capstone Data Science IBM Certification Program.
# ## Table of contents
# * [Introduction: Business Problem](#introduction)
# * [Data](#data)
# * [Methodology](#methodology)
# * [Analysis](#analysis)
# * [Results and Discussion](#results)
# * [Conclusion](#conclusion)
# # 1. Introduction <a name="introduction"></a>
# A description of the problem and a discussion of the background
#
# The Internet revolution brought more than social medias and faster information exchanges. It brought also a generation of people who studies through the digital environments. Under this context, the online education evolved quickly and the transformation of the societies really started. Nowadays, people in distant places, poor countries can benefit from technology to achieve information and in this case, the Massive Open Online Courses, MOOCs had a major role.
# MOOCs can join people all around the world to achieve understand in a wide range of areas, delivering science and culture.
#
# It is known, also, that online learning suffers massive unenrollment. The logical border and the lack of motivation can make the students leave. Under this context, what are the related features which causes it? How understand the student scenario and predict his churn or low grades?
# I think that is a relevant point. If MOOCs platforms achieve student understanding and predicting, I think it's possible to menage the student's churn and find a way to give them the needed motivation.
#
# With this set in mind, I started a search for MOOCs generated Students Data to investigate and prepare some conclusions about the theme.
#
# # 2. Data
# A description of the data and how it will be used to solve the problem
#
# To guide my investigation, I was looking for a Set to help to understand the student's behavior, motivation and correlated characteristics in order to better understand why or how is the result of an enrollment. So, it is important to find a dataset with some key features like grade, gender, enrollment levels, and so on. Location data is also important to understand cultural marks, which will be explored by locations APIs.
# Guided by the analysis exploration, I'll be able to build a model to predict student's behavior or results.
# After querying correlated datasets in order to find those with better columns, I found a nice DataSet from Kaggle called "Students' Academic Performance Dataset". You can check it here https://www.kaggle.com/aljarah/xAPI-Edu-Data.
# <p> The data compounds 16 columns with aggregated informations about over 480 students of a Learning Platform called Kalboard360. The datails will be shown next section.
#
# ## 2.1 Data Structure
# As previously mentioned, this dataset includes 16 columns:
#
# 1. Gender - student's gender (nominal: 'Male' or 'Female’)
#
# 2. Nationality- student's nationality (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’)
#
# 3. Place of birth- student's Place of birth (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’)
#
# 4. Educational Stages- educational level student belongs (nominal: ‘lowerlevel’,’MiddleSchool’,’HighSchool’)
#
# 5. Grade Levels- grade student belongs (nominal: ‘G-01’, ‘G-02’, ‘G-03’, ‘G-04’, ‘G-05’, ‘G-06’, ‘G-07’, ‘G-08’, ‘G-09’, ‘G-10’, ‘G-11’, ‘G-12 ‘)
#
# 6. Section ID- classroom student belongs (nominal:’A’,’B’,’C’)
#
# 7. Topic- course topic (nominal:’ English’,’ Spanish’, ‘French’,’ Arabic’,’ IT’,’ Math’,’ Chemistry’, ‘Biology’, ‘Science’,’ History’,’ Quran’,’ Geology’)
#
# 8. Semester- school year semester (nominal:’ First’,’ Second’)
#
# 9. Parent responsible for student (nominal:’mom’,’father’)
#
# 10. Raised hand- how many times the student raises his/her hand on classroom (numeric:0-100)
#
# 11. Visited resources- how many times the student visits a course content(numeric:0-100)
#
# 12. Viewing announcements-how many times the student checks the new announcements(numeric:0-100)
#
# 13. Discussion groups- how many times the student participate on discussion groups (numeric:0-100)
#
# 14. Parent Answering Survey- parent answered the surveys which are provided from school or not (nominal:’Yes’,’No’)
#
# 15. Parent School Satisfaction- the Degree of parent satisfaction from school(nominal:’Yes’,’No’)
#
# 16. Student Absence Days-the number of absence days for each student (nominal: above-7, under-7)
#
# The most important characteristic of this dataset is that it has included the parent's data, which is a nice approach to understand the student.
# # 3. Methodology
#
# The first steps are the data exploration and insight-taking approach in order to better understand the data and the columns. The purpose of this exploratory analysis is to identify hidden features and understand the relations between the features.
# Next, I'll do a descritive analysis by building a dataset for a clustering algorithm. This way, the data understanding will become a more powerfull decision making, focused on student's behaviors.
# Finally, I'll create a my predictive analysis by building a dataset with the best features for a supervised learning algorithm to predict the student's beahvior under certain conditions, which will achieve my final objective.
# # 4. Analysis
# As mentioned, this section will understand the data in order to compose the clustering dataset.
# ### 4.1 Exploratory Analysis
# In[110]:
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# In[111]:
dataset = pd.read_csv("../../../input/aljarah_xAPI-Edu-Data/xAPI-Edu-Data.csv")
dataset.head(5)
# In the context to understand the student and his results, setting up a dataframe with certain columns
# In[112]:
df = dataset[['gender','PlaceofBirth','StageID','Topic','raisedhands','VisITedResources','AnnouncementsView','Discussion', 'ParentAnsweringSurvey','ParentschoolSatisfaction','StudentAbsenceDays', 'Class']]
df.head()
# Try to understand the results from countries
# In[113]:
df.groupby(['ParentschoolSatisfaction'])['Class'].value_counts(normalize=True)
# In[114]:
df.groupby(['ParentAnsweringSurvey'])['ParentschoolSatisfaction'].value_counts(normalize=True)
# It seems that parents which aren't envolved in answering the scholar's surveys are likely to become unsatisfied with the School. This can mean that well informed parents can better understand the student's enrollment and reality and are better satisfied.
# ### Question: What is the relation between active parents and student's classification?
# In[115]:
df.groupby(['ParentAnsweringSurvey'])['Class'].value_counts(normalize=True)
# So, definitively parent's active behavior has an important role on student's growth.
# ## Understanding student's behavior
# Next, it is important to know what characteristics are linked to students sucess. So, we're going to test the features related.
# In[116]:
df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']]
df2.head()
# ### Question: What's the relation between raising hands and classification?
# In[117]:
df2['raisedhands'] = pd.cut(df2.raisedhands, bins=3, labels=np.arange(3), right=False)
df2.groupby(['raisedhands'])['Class'].value_counts(normalize=True)
# So, it seems that students which has low levels of raising hands are most likely to have Low classification. In the otherside, high frequency of raising hands are linked to higher classification.
# Next, we're going to check the act of visiting the course resources.
# In[118]:
df2['VisITedResources'] = pd.cut(df2.VisITedResources, bins=3, labels= | np.arange(3) | numpy.arange |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import operator
import tvm
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
from tvm import relay
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.model_zoo import vision
import model_zoo
def verify_mxnet_frontend_impl(mx_symbol,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
gluon_impl=False,
name=None,
dtype='float32'):
"""Use name different from test to avoid pytest picking it up"""
if gluon_impl:
def get_gluon_output(name, x):
net = vision.get_model(name)
net.collect_params().initialize(mx.init.Xavier())
net_sym = gluon.nn.SymbolBlock(outputs=net(mx.sym.var('data')),
inputs=mx.sym.var('data'),
params=net.collect_params())
out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
return out, net_sym
else:
def get_mxnet_output(symbol, x, dtype='float32'):
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod = mx.mod.Module(symbol, label_names=None)
mod.bind(data_shapes=[('data', x.shape)], for_training=False)
mod.init_params()
mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
out = mod.get_outputs()[0].asnumpy()
args, auxs = mod.get_params()
return out, args, auxs
def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'):
shape_dict = {"data": x.shape}
if gluon_impl:
mod, params = relay.frontend.from_mxnet(symbol, shape_dict)
else:
mod, params = relay.frontend.from_mxnet(symbol,
shape_dict,
arg_params=args,
aux_params=auxs)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
# random input
x = np.random.uniform(size=data_shape)
if gluon_impl:
gluon_out, gluon_sym = get_gluon_output(name, x)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(gluon_sym, x, None, None, target, ctx, dtype)
tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
else:
mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
assert "data" not in args
for target, ctx in ctx_list():
tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, ctx, dtype)
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_mlp():
mlp = model_zoo.mx_mlp()
verify_mxnet_frontend_impl(mlp,
data_shape=(1, 1, 28, 28),
out_shape=(1, 10))
def test_forward_vgg():
for n in [11]:
mx_sym = model_zoo.mx_vgg(n)
verify_mxnet_frontend_impl(mx_sym)
def test_forward_resnet():
for n in [18]:
mx_sym = model_zoo.mx_resnet(18)
verify_mxnet_frontend_impl(mx_sym)
def test_forward_elu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='elu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_rrelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='rrelu', lower_bound=0.3, upper_bound=0.7)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_prelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='prelu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_softrelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.Activation(data, act_type='softrelu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_fc_flatten():
# test flatten=True option in mxnet 0.11.1
data = mx.sym.var('data')
try:
mx_sym = mx.sym.FullyConnected(data, num_hidden=100, flatten=True)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
mx_sym = mx.sym.FullyConnected(mx.sym.Flatten(data), num_hidden=100, flatten=False)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
except:
pass
def test_forward_clip():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.clip(data, a_min=0, a_max=1)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_split():
data = mx.sym.var('data')
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=False)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 1, 2, 1))
def test_forward_split_squeeze():
data = mx.sym.var('data')
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=True)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 2, 1))
def test_forward_expand_dims():
data = mx.sym.var('data')
mx_sym = mx.sym.expand_dims(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 1, 3, 4))
def test_forward_pooling():
data = mx.sym.var('data')
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type='avg')
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type='max')
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
def test_forward_adaptive_pooling():
data = mx.sym.var('data')
mx_sym = mx.sym.contrib.AdaptiveAvgPooling2D(data, output_size=(1,))
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 1, 1))
mx_sym = mx.sym.contrib.AdaptiveAvgPooling2D(data, output_size=(3, 3))
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 3, 3))
def test_forward_lrn():
data = mx.sym.var('data')
mx_sym = mx.sym.LRN(data, alpha=2, beta=2, knorm=1, nsize=5)
verify_mxnet_frontend_impl(mx_sym, (1, 10, 24, 24), (1, 10, 24, 24))
def test_forward_ones():
data = mx.sym.var('data')
ones = mx.sym.ones(shape=(2, 3, 4), dtype='float32')
mx_sym = mx.sym.elemwise_add(data, ones)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_zeros():
data = mx.sym.var('data')
zeros = mx.sym.zeros(shape=(2, 3, 4), dtype='float32')
mx_sym = mx.sym.elemwise_add(data, zeros)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_ones_like():
data = mx.sym.var('data')
mx_sym = mx.sym.ones_like(data, dtype='float32')
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_zeros_like():
data = mx.sym.var('data')
mx_sym = mx.sym.zeros_like(data, dtype='float32')
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_argmax():
data = mx.sym.var('data')
mx_sym = mx.sym.argmax(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (5, 3), (5,))
def test_forward_argmin():
data = mx.sym.var('data')
mx_sym = mx.sym.argmin(data, axis=0)
verify_mxnet_frontend_impl(mx_sym, (5, 4), (4,))
def test_forward_slice():
data = mx.sym.var('data')
mx_sym = mx.sym.slice(data, begin=(0, 1), end=(2, 4))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 3))
mx_sym = mx.sym.slice(data, begin=(-1, 1), end=(-3, 4), step=(-1, 2))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 2))
def test_forward_where():
cond = mx.sym.var('cond')
x = mx.sym.var('x')
y = mx.sym.var('y')
dshape = (2, 2)
dtype = 'float32'
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
shapes = {'cond': dshape, 'x': dshape, 'y': dshape}
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
mod.bind(data_shapes=shapes.items(), for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, args, auxs)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(np_cond, np_x, np_y)
tvm.testing.assert_allclose(op_res.asnumpy(), mx_out)
def test_forward_arange():
def _mx_symbol(F, start, stop, step):
if start is None and step is None:
sym = F.arange(stop)
elif start is None:
sym = F.arange(stop, step=step)
elif step is None:
sym = F.arange(start, stop)
else:
sym = F.arange(start, stop, step)
return sym
def verify(start, stop, step):
ref_res = _mx_symbol(mx.nd, start, stop, step).asnumpy()
mx_sym = _mx_symbol(mx.sym, start, stop, step)
mod, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
verify(0, 20, None)
verify(0, 20, 2)
verify(1, 20, None)
verify(1, 20, 2)
verify(1, 20, 1.5)
verify(1, 20.5, None)
verify(1, 20, 3)
verify(20, 1, -1)
verify(20, 1, -1.5)
def _mx_symbol(F, op_name, inputs):
op = getattr(F, op_name)
return op(*inputs)
def test_forward_broadcast_ops():
for op in ["broadcast_add", "broadcast_sub", "broadcast_mul",
"broadcast_div", "broadcast_mod", "broadcast_maximum",
"broadcast_minimum", "broadcast_equal", "broadcast_not_equal",
"broadcast_greater", "broadcast_greater_equal",
"broadcast_lesser", "broadcast_lesser_equal"]:
a_shape = (3, 4, 5)
b_shape = (4, 5)
if op == "broadcast_mod":
dtype = 'int32'
a_np = np.random.randint(1, 100, size=a_shape).astype(dtype)
b_np = np.random.randint(1, 100, size=b_shape).astype(dtype)
else:
dtype = 'float32'
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {'a': a_shape, 'b': b_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np, b_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_elemwise_ops():
for op in ["elemwise_add", "elemwise_sub", "elemwise_mul",
"elemwise_div", "maximum", "minimum"]:
shape = (3, 4, 5)
dtype = 'float32'
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = np.random.uniform(size=shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {'a': shape, 'b': shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np, b_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_scalar_ops():
for op in [operator.add, operator.sub, operator.mul, operator.truediv,
operator.pow, operator.lt, operator.le, operator.eq,
operator.ne, operator.gt, operator.ge]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = op(mx.sym.var('a'), b_scalar)
ref_res = op(mx.nd.array(a_np), b_scalar)
shapes = {'a': a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
for op in ["maximum", "minimum"]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), b_scalar])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
shapes = {'a': a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_slice_axis():
def verify(shape, axis, begin, end):
data_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.slice_axis(mx.nd.array(data_np), axis, begin, end)
mx_sym = mx.sym.slice_axis(mx.sym.var("data"), axis, begin, end)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(data_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), 0, 1, 2)
verify((3, 4), 0, 1, None)
verify((3, 4), 1, 0, 2)
verify((3, 4), 1, -3, -1)
verify((3, 4), -1, -3, -1)
def test_forward_slice_like():
def verify(x_shape, y_shape, axes):
x_np = np.random.uniform(size=x_shape).astype("float32")
y_np = np.random.uniform(size=y_shape).astype("float32")
if axes is None:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np))
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"))
else:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np), axes=axes)
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"), axes=axes)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": x_shape, "y": y_shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np, y_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), (2, 3), None)
verify((3, 4), (2, 3), (0, 1))
verify((3, 4), (2, 3), (0))
verify((3, 4), (2, 3), (-1))
def test_forward_l2_normalize():
data = mx.sym.var('data')
mx_sym = mx.sym.L2Normalization(data, mode="channel")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))
def test_forward_shape_array():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.shape_array(mx.nd.array(x_np))
mx_sym = mx.sym.shape_array(mx.sym.var("x"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1,))
verify((3, 4, 5))
verify((3, 4, 5, 6))
def test_forward_squeeze():
def verify(shape, axis):
x_np = np.random.uniform(size=shape).astype("float32")
if axis is None:
ref_res = mx.nd.squeeze(mx.nd.array(x_np))
mx_sym = mx.sym.squeeze(mx.sym.var("x"))
else:
ref_res = mx.nd.squeeze(mx.nd.array(x_np), axis=axis)
mx_sym = mx.sym.squeeze(mx.sym.var("x"), axis=axis)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 3, 1), None)
verify((1, 3, 1), 0)
verify((1, 3, 1), 2)
verify((1, 3, 1), (0, 2))
def test_forward_broadcast_axis():
def verify(shape, axis, size):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.broadcast_axis(mx.nd.array(x_np), axis=axis, size=size)
mx_sym = mx.sym.broadcast_axis(mx.sym.var("x"), axis=axis, size=size)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 2, 1), 2, 3)
verify((1, 2, 1), (0, 2), (2, 3))
def test_forward_full():
def verify(val, shape, dtype):
ctx = mx.cpu()
ref_res = mx.nd.full(shape, val, dtype=dtype)
mx_sym = mx.sym.full(shape, val, dtype=dtype)
mod, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, ctx in ctx_list():
# Skip testing graph runtime because this op will be optimized out
# by constant folding.
for kind in ["debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify(2, (3, 4), "float32")
verify(2, (3, 4), "int32")
verify(3.5, (1, 3, 4), "float32")
def test_forward_embedding():
def verify(data_shape, weight_shape):
in_dim, out_dim = weight_shape
x_np = np.random.randint(0, weight_shape[0], size=data_shape).astype("float32")
w_np = np.random.uniform(size=weight_shape).astype("float32")
ref_res = mx.nd.Embedding(mx.nd.array(x_np), mx.nd.array(w_np),
input_dim=in_dim, output_dim=out_dim)
mx_sym = mx.sym.Embedding(mx.sym.var("x"), mx.sym.var("w"),
input_dim=in_dim, output_dim=out_dim)
mod, _ = relay.frontend.from_mxnet(
mx_sym, {"x": data_shape, "w": weight_shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x=x_np, w=w_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 2), (4, 5))
verify((2, 3, 4), (4, 5))
def test_forward_smooth_l1():
data = mx.sym.var('data')
mx_sym = mx.sym.smooth_l1(data)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
mx_sym = mx.sym.smooth_l1(data, scalar=1.0)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
def test_forward_take():
def verify(shape, indices_src, axis, mode="clip"):
x_np = np.random.uniform(size=shape).astype("float32")
indices_np = np.array(indices_src, dtype="float32")
ref_res = mx.nd.take(mx.nd.array(x_np), mx.nd.array(indices_np), axis, mode)
mx_sym = mx.sym.take(mx.sym.var("x"), mx.sym.var("y"), axis, mode)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape, "y": indices_np.shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np, indices_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2,2), [[[1,0],[0,1]]], 0)
verify((2,2), [[[1,0],[0,1]]], 1)
verify((4,3,5,6), [[2,1,0,0]], -2)
verify((3,4), [-1, 5], 0)
verify((3,4), [-1, 5], 0, mode="wrap")
verify((3,4), [-1, 5], 1)
verify((3,4), [-1, 5], 1, mode="wrap")
def test_forward_gather_nd():
def verify(xshape, yshape, y_data):
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = mx.nd.gather_nd(mx.nd.array(x_data), mx.nd.array(y_data))
mx_sym = mx.sym.gather_nd(mx.sym.var("x_data"), mx.sym.var("y_data"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x_data": xshape, "y_data": yshape}, {"x_data": "float32", "y_data": "int32"})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
def test_forward_bilinear_resize():
# add tests including scale_height and scale_width when mxnet is updated to version 1.5
data = mx.sym.var('data')
mx_sym = mx.sym.contrib.BilinearResize2D(data, height=5, width=10)
verify_mxnet_frontend_impl(mx_sym, (1, 2, 3, 4), (1, 2, 5, 10))
def test_forward_rnn_layer():
def verify(mode, seq_len, input_size, hidden_size, num_layers,
batch=1, init_states=True, bidirectional=False):
if mode == "rnn":
layer = gluon.rnn.RNN(hidden_size, num_layers, bidirectional=bidirectional)
elif mode == "gru":
layer = gluon.rnn.GRU(hidden_size, num_layers, bidirectional=bidirectional)
else: # mode == "lstm"
layer = gluon.rnn.LSTM(hidden_size, num_layers, bidirectional=bidirectional)
num_states = 2 if mode == "lstm" else 1
layer.initialize()
layer.hybridize()
dtype = "float32"
directions = 2 if bidirectional else 1
data_np = np.random.uniform(size=(seq_len, batch, input_size)).astype(dtype)
data_mx = mx.nd.array(data_np)
if init_states:
shape_dict = {'data0': data_np.shape}
inputs = {'data0': data_np}
state_shape = (num_layers*directions, batch, hidden_size)
states_np = []
states_mx = []
for i in range(num_states):
s = np.random.uniform(size=state_shape).astype(dtype)
states_np.append(s)
states_mx.append(mx.nd.array(s))
shape_dict['data%s' % (i+1)] = s.shape
inputs['data%s' % (i+1)] = s
mx_out, mx_states = layer(data_mx, states_mx)
mx_res = [mx_out] + mx_states
else:
shape_dict = {'data': data_np.shape}
inputs = {'data': data_np}
mx_res = layer(data_mx)
mx_sym = layer._cached_graph[1]
mx_params = {}
for name, param in layer.collect_params().items():
mx_params[name] = param._reduce()
mod, params = relay.frontend.from_mxnet(
mx_sym, shape=shape_dict, arg_params=mx_params)
for target, ctx in ctx_list():
# only test graph runtime because debug runtime is too slow
for kind in ["graph"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(**inputs, **params)
if init_states:
assert len(op_res) == len(mx_res)
for i, val in enumerate(op_res):
tvm.testing.assert_allclose(
val.asnumpy(), mx_res[i].asnumpy(), rtol=1e-3)
else:
tvm.testing.assert_allclose(
op_res.asnumpy(), mx_res.asnumpy(), rtol=1e-3)
for mode in ["rnn", "gru", "lstm"]:
verify(mode, 1, 64, 64, 1)
verify(mode, 10, 64, 64, 2)
verify(mode, 10, 64, 32, 2)
verify(mode, 10, 64, 32, 2, batch=2)
verify(mode, 10, 32, 64, 1, bidirectional=True)
# The following two codeblocks need to be fixed for mxnet 1.5
# verify(mode, 10, 64, 64, 3, init_states=False)
# verify(mode, 10, 64, 64, 3, batch=2, bidirectional=True, init_states=False)
def test_forward_Crop():
def verify(xshape, yshape, offset=None):
x_data = np.random.uniform(size=xshape).astype("float32")
y_data = np.random.uniform(size=yshape).astype("float32")
if offset is None:
mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"))
ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data))
else:
mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"), offset=offset)
ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data), offset=offset)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": xshape, "y": yshape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
if offset is None or offset == (0, 0):
op_res = intrp.evaluate()(x_data, y_data)
else:
op_res = intrp.evaluate()(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 3, 40, 40), (1, 3, 20, 20))
verify((1, 3, 40, 40), (1, 3, 20, 20), (0, 0))
verify((1, 3, 40, 40), (1, 3, 20, 20), (10, 10))
verify((5, 32, 40, 40), (5, 32, 25, 25))
verify((5, 32, 40, 40), (5, 32, 25, 25), (5, 5))
def test_forward_argsort():
def verify(shape, axis, is_ascend, dtype="float32"):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.argsort(mx.nd.array(x_np), axis=axis, is_ascend=is_ascend, dtype=dtype)
mx_sym = mx.sym.argsort(mx.sym.var("x"), axis=axis, is_ascend=is_ascend, dtype=dtype)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 3, 4), axis=0, is_ascend=False)
verify((1, 4, 6), axis=1, is_ascend=True)
verify((3, 5, 6), axis=-3, is_ascend=False, dtype="int32")
def test_forward_topk():
def verify(shape, k, axis, ret_type, is_ascend=False, dtype="float32"):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.topk(mx.nd.array(x_np), k=k, axis=axis, ret_typ=ret_type,
is_ascend=is_ascend, dtype=dtype)
mx_sym = mx.sym.topk(mx.sym.var("x"), k=k, axis=axis, ret_typ=ret_type,
is_ascend=is_ascend, dtype=dtype)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
if isinstance(ref_res, list):
assert len(op_res) == len(ref_res)
for i, t in enumerate(op_res):
tvm.testing.assert_allclose(t.asnumpy(), ref_res[i].asnumpy())
else:
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), k=1, axis=0, ret_type="both")
verify((3, 4), k=1, axis=-1, ret_type="indices")
verify((3, 5, 6), k=2, axis=2, ret_type="value")
verify((3, 5, 6), k=2, axis=1, ret_type="value", is_ascend=True)
verify((3, 5, 6), k=0, axis=2, ret_type="both", dtype="int32")
def test_forward_sequence_mask():
def verify(shape, use_sequence_length, value, axis, dtype, itype):
data_np = np.random.uniform(size=shape).astype(dtype)
valid_length_np = np.random.randint(0, shape[axis], size=shape[1-axis]).astype(itype)
if use_sequence_length:
ref_res = mx.nd.SequenceMask(mx.nd.array(data_np, dtype=dtype),
sequence_length=mx.nd.array(valid_length_np, dtype=itype),
use_sequence_length=use_sequence_length,
value=value,
axis=axis)
mx_sym = mx.sym.SequenceMask(mx.sym.var('data'),
sequence_length=mx.sym.var('valid_length'),
use_sequence_length=use_sequence_length,
value=value,
axis=axis)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape,
'valid_length': valid_length_np.shape},
dtype={"data": dtype,
"valid_length": itype})
else:
ref_res = mx.nd.SequenceMask(mx.nd.array(data_np, dtype=dtype),
use_sequence_length=use_sequence_length,
value=value,
axis=axis)
mx_sym = mx.sym.SequenceMask(mx.sym.var('data'),
use_sequence_length=use_sequence_length,
value=value,
axis=axis)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape}, dtype={"data": dtype})
for target, ctx in ctx_list():
for kind in ['graph', 'debug']:
if use_sequence_length is False and kind == 'graph':
# Disable the test for 'graph' when it's identity.
continue
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
if use_sequence_length:
op_res = intrp.evaluate()(data_np, valid_length_np)
else:
op_res = intrp.evaluate()(data_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((5, 10), True, 0.0, 0, 'float32', 'float32')
verify((5, 4, 3), True, 1.0, 1, 'float32', 'float32')
verify((5, 4, 3), False, 1.0, 1, 'float64', 'float64')
verify((5, 4, 3, 2), True, 1.0, 0, 'float32', 'float32')
def test_forward_contrib_div_sqrt_dim():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.contrib.div_sqrt_dim(mx.nd.array(x_np))
mx_sym = mx.sym.contrib.div_sqrt_dim(mx.sym.var("x"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4))
verify((3, 4, 5))
def test_forward_batch_norm():
def verify(shape, axis=1, fix_gamma=False):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[axis])).astype("float32")
beta = np.random.uniform(size=(shape[axis])).astype("float32")
moving_mean = np.random.uniform(size=(shape[axis])).astype("float32")
moving_var = np.abs(np.random.uniform(size=(shape[axis])).astype("float32")) + 0.5
ref_res = mx.nd.BatchNorm(mx.nd.array(x), mx.nd.array(gamma), mx.nd.array(beta),
mx.nd.array(moving_mean), mx.nd.array(moving_var),
axis=axis, use_global_stats=True, fix_gamma=fix_gamma)
mx_sym = mx.sym.BatchNorm(mx.sym.var("x"), mx.sym.var("gamma"),
mx.sym.var("beta"), mx.sym.var("mean"),
mx.sym.var("var"), axis=axis, use_global_stats=True,
fix_gamma=fix_gamma)
shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape,
"mean": moving_mean.shape, "var": moving_var.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
#print(mod)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x, gamma, beta, moving_mean, moving_var)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3)
verify((2, 3, 4, 5))
verify((2, 3, 4, 5), axis=0)
verify((2, 3, 4, 5), axis=-1)
verify((2, 3, 4, 5), fix_gamma=True)
def test_forward_instance_norm():
def verify(shape, axis=1, epsilon=1e-5):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[axis])).astype("float32")
beta = np.random.uniform(size=(shape[axis])).astype("float32")
ref_res = mx.nd.InstanceNorm(mx.nd.array(x), mx.nd.array(gamma), mx.nd.array(beta), epsilon)
mx_sym = mx.sym.InstanceNorm(mx.sym.var("x"), mx.sym.var("gamma"), mx.sym.var("beta"), epsilon)
shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x, gamma, beta)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5)
verify((2, 3, 4, 5))
verify((32, 64, 80, 64))
verify((8, 6, 5))
verify((8, 7, 6, 5, 4))
def test_forward_layer_norm():
def verify(shape, axis=-1):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[axis])).astype("float32")
beta = np.random.uniform(size=(shape[axis])).astype("float32")
ref_res = mx.nd.LayerNorm(mx.nd.array(x), mx.nd.array(gamma), mx.nd.array(beta),
axis=axis)
mx_sym = mx.sym.LayerNorm(mx.sym.var("x"), mx.sym.var("gamma"),
mx.sym.var("beta"), axis=axis)
shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x, gamma, beta)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((2, 5))
verify((2, 5), axis=0)
verify((2, 5, 6))
def test_forward_one_hot():
def verify(indices_shape, depth, on_value, off_value, dtype):
x = np.random.randint(0, 5, size=indices_shape)
ref_res = mx.nd.one_hot(mx.nd.array(x), depth, on_value, off_value, dtype)
mx_sym = mx.sym.one_hot(mx.sym.var("x"), depth, on_value, off_value, dtype)
shape_dict = {"x": x.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x.astype("float32"))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((3,), 3, 1, 0, "int32")
verify((3,), 3, 1.0, 0.0, "float32")
verify((2, 2), 5, 2, -2, "int32")
verify((2, 2), 5, 0.5, -0.5, "float32")
verify((3, 2, 4, 5), 6, 1, 0, "int32")
verify((3, 2, 4, 5), 6, 1.0, 0.0, "float32")
def test_forward_pad():
def verify(data_shape, out_shape, mode, pad_width, constant_value=0.0):
data = mx.sym.var('data')
mx_sym = mx.sym.pad(data, mode=mode, pad_width=pad_width, constant_value=constant_value)
verify_mxnet_frontend_impl(mx_sym, data_shape=data_shape, out_shape=out_shape)
verify(data_shape=(1,1,3,5), out_shape=(1,1,6,12), mode="constant",
pad_width=(0,0,0,0,1,2,3,4))
verify(data_shape=(1,1,3,5), out_shape=(1,1,6,12), mode="constant",
pad_width=(0,0,0,0,1,2,3,4), constant_value=3.0)
verify(data_shape=(1,1,3,5), out_shape=(1,1,6,12), mode="edge",
pad_width=(0,0,0,0,1,2,3,4))
verify(data_shape=(1,1,3,5), out_shape=(1,1,6,12), mode="reflect",
pad_width=(0,0,0,0,1,2,3,4))
verify(data_shape=(1,1,3,5,7), out_shape=(1,1,6,12,18), mode="constant",
pad_width=(0,0,0,0,1,2,3,4,5,6))
verify(data_shape=(1,1,3,5,7), out_shape=(1,1,6,12,18), mode="constant",
pad_width=(0,0,0,0,1,2,3,4,5,6), constant_value=3.0)
verify(data_shape=(1,1,3,5,7), out_shape=(1,1,6,12,18), mode="edge",
pad_width=(0,0,0,0,1,2,3,4,5,6))
verify(data_shape=(1,1,3,5,7), out_shape=(1,1,6,12,18), mode="reflect",
pad_width=(0,0,0,0,1,2,3,4,5,6))
def test_forward_slice():
def verify(data_shape, out_shape, begin, end):
data = mx.sym.var('data')
mx_sym = mx.sym.slice(data, begin=begin, end=end)
verify_mxnet_frontend_impl(mx_sym, data_shape=data_shape, out_shape=out_shape)
verify(data_shape=(1,1,10), out_shape=(1,1,8), begin=(0, 0, 2), end=(1, 1, 10))
verify(data_shape=(1,1,10), out_shape=(1,1,8), begin=(None, None, 2), end=(None, None, None))
def test_forward_convolution():
def verify(data_shape, kernel_size, stride, pad, num_filter):
weight_shape=(num_filter, data_shape[1],) + kernel_size
x = | np.random.uniform(size=data_shape) | numpy.random.uniform |
#!/usr/bin/env python3
"""
Investigate DSC data.
Created on Fri Sep 13 12:44:01 2019
@author: slevy
"""
import dsc_extract_physio
import nibabel as nib
import numpy as np
import os
import matplotlib.pyplot as plt
import scipy.signal
import scipy.stats
import pydicom
from matplotlib import cm
from lmfit.models import GaussianModel
from datetime import datetime
import warnings
def extract_signal_within_roi(image, mask):
if len(image.shape) > 3:
nrep = image.shape[3]
s_along_reps = np.zeros((nrep))
s_along_reps_by_slice = np.zeros((nrep, image.shape[2]))
for i_rep in range(nrep):
img_rep_i = image[:, :, :, i_rep]
s_along_reps[i_rep] = | np.mean(img_rep_i[mask > 0]) | numpy.mean |
###################################################################
# EEPOWER.PY
#
# A library of functions, constants and more
# that are related to Power in Electrical Engineering.
#
# Written by <NAME>
#
# Special Thanks To:
# <NAME> - Idaho Power
#
# Included Constants:
# - Micro (mu) Multiple: u
# - Mili Multiple: m
# - Kilo Multiple: k
# - Mega Multiple: M
# - 'A' Operator for Symmetrical Components: a
# - Not a Number value (NaN): NAN
#
# Symmetrical Components Matricies:
# - ABC to 012 Conversion: Aabc
# - 012 to ABC Conversion: A012
#
# Included Functions
# - Phasor V/I Generator: phasor
# - Phasor Impedance Generator: phasorz
# - Complex Display Function: cprint
# - Parallel Impedance Adder: parallelz
# - V/I Line/Phase Converter: phaseline
# - Power Set Values: powerset
# - Power Triangle Function: powertriangle
# - Transformer SC OC Tests: transformertest
# - Phasor Plot Generator: phasorplot
# - Total Harmonic Distortion: thd
# - Total Demand Distortion: tdd
# - Reactance Calculator: reactance
# - Non-Linear PF Calc: nlinpf
# - Harmonic Limit Calculator: harmoniclimit
# - Power Factor Distiortion: pfdist
# - Short-Circuit RL Current: iscrl
# - Voltage Divider: voltdiv
# - Current Divider: curdiv
# - Instantaneous Power Calc.: instpower
# - Delta-Wye Network Converter: dynetz
# - Single Line Power Flow: powerflow
# - Thermocouple Temperature: thermocouple
# - Cold Junction Voltage: coldjunction
# - RTD Temperature Calculator: rtdtemp
#
# Additional functions available in sub-modules:
# - passives.py (renamed from capacitor.py)
# - fault.py
# - electronics.py
# - perunit.py
# - systemsolution.py
###################################################################
name = "eepower"
ver = "2.2.1"
# Import Submodules
from .passives import *
from .perunit import *
from .systemsolution import *
# Import Submodules as External Functions
from . import fault
from . import electronics
# Import libraries as needed:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cmath as c
# Define constants
a = c.rect(1,np.radians(120)) # A Operator for Sym. Components
p = 1e-12 # Pico Multiple
n = 1e-9 # Nano Multiple
u = 1e-6 # Micro (mu) Multiple
m = 1e-3 # Mili Multiple
k = 1e+3 # Kili Multiple
M = 1e+6 # Mega Multiple
NAN = float('nan')
VLLcVLN = c.rect(np.sqrt(3),np.radians(30)) # Conversion Operator
ILcIP = c.rect(np.sqrt(3),np.radians(-30)) # Conversion Operator
# Define symmetrical components matricies
Aabc = 1/3 * np.array([[ 1, 1, 1 ], # Convert ABC to 012
[ 1, a, a**2 ], # (i.e. phase to sequence)
[ 1, a**2, a ]])
A012 = np.array([[ 1, 1, 1 ], # Convert 012 to ABC
[ 1, a**2, a ], # (i.e. sequence to phase)
[ 1, a, a**2 ]])
# Define type constants
matrix = "<class 'numpy.matrixlib.defmatrix.matrix'>"
ndarr = "<class 'numpy.ndarray'>"
# Define Phasor Generator
def phasor( mag, ang ):
"""
phasor Function:
Generates the standard Pythonic complex representation
of a phasor voltage or current when given the magnitude
and angle of the specific voltage or current.
Parameters
----------
mag: float
The Magnitude of the Voltage/Current
ang: float
The Angle (in degrees) of the Voltage/Current
Returns
-------
phasor: complex
Standard Pythonic Complex Representation of
the specified voltage or current.
"""
return( c.rect( mag, np.radians( ang ) ) )
# Define Reactance Calculator
def reactance(z,f=60,sensetivity=1e-12):
"""
reactance Function:
Calculates the Capacitance or Inductance in Farads or Henreys
(respectively) provided the impedance of an element.
Will return capacitance (in Farads) if ohmic impedance is
negative, or inductance (in Henrys) if ohmic impedance is
positive. If imaginary: calculate with j factor (imaginary number).
Parameters
----------
z: complex
The Impedance Provided, may be complex (R+jI)
f: float, optional
The Frequency Base for Provided Impedance, default=60
sensetivity: float, optional
The sensetivity used to check if a resistance was
provided, default=1e-12
Returns
-------
out: float
Capacitance or Inductance of Impedance
"""
# Evaluate Omega
w = 2*np.pi*f
# Input is Complex
if isinstance(z, complex):
# Test for Resistance
if(abs(z.real) > sensetivity):
R = z.real
else:
R = 0
if (z.imag > 0):
out = z/(w*1j)
else:
out = 1/(w*1j*z)
out = abs(out)
# Combine with resistance if present
if(R!=0): out = (R, out)
else:
if (z > 0):
out = z/(w)
else:
out = 1/(w*z)
out = abs(out)
# Return Output
return(out)
# Define display function
def cprint(val,unit="",label="",printval=True,ret=False,round=3):
"""
cprint Function
This function is designed to accept a complex value (val) and print
the value in the standard electrical engineering notation:
**magnitude ∠ angle °**
This function will print the magnitude in degrees, and can print
a unit and label in addition to the value itself.
Parameters
----------
val: complex
The Complex Value to be Printed, may be singular value,
tuple of values, or list/array.
unit: string, optional
The string to be printed corresponding to the unit mark.
default=""
label: string, optional
The pre-pended string used as a descriptive labeling string.
default=""
printval: bool, optional
Control argument enabling/disabling printing of the string.
default=True
ret: bool, optional
Control argument allowing the evaluated value to be returned.
default=False
round: int, optional
Control argument specifying how many decimals of the complex
value to be printed. May be negative to round to spaces
to the left of the decimal place (follows standard round()
functionality). default=3
Returns
-------
numarr: numpy.ndarray
The array of values corresponding to the magnitude and angle,
values are returned in the form: [[ mag, ang ],...,[ mag, ang ]]
where the angles are evaluated in degrees.
"""
printarr = np.array([]) # Empty array
numarr = | np.array([]) | numpy.array |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumprod(data, axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumprod(data, axis=1))
####################################################################################
def test_nancumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumsum(data, axis=None))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumsum(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumsum(data, axis=1))
####################################################################################
def test_nanmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmax(cArray, NumCpp.Axis.NONE).item() == np.nanmax(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmax(data, axis=1))
####################################################################################
def test_nanmean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmean(cArray, NumCpp.Axis.NONE).item() == np.nanmean(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmean(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmean(data, axis=1))
####################################################################################
def test_nanmedian():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert (NumCpp.nanmedian(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() ==
np.nanmedian(data, axis=None).item())
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[0].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
# np.nanmedian(data, axis=0))
#
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[1].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
# np.nanmedian(data, axis=1))
####################################################################################
def test_nanmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmin(cArray, NumCpp.Axis.NONE).item() == np.nanmin(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmin(data, axis=1))
####################################################################################
def test_nanpercentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_nanprod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanprod(cArray, NumCpp.Axis.NONE).item() == np.nanprod(data, axis=None)
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nanprod(data, axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nanprod(data, axis=1))
####################################################################################
def test_nans():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.nansSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.nansRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.nansShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
####################################################################################
def test_nans_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.nans_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(np.isnan(cArray2.getNumpyArray())))
####################################################################################
def test_nanstd():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.nanstd(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=1), 9))
####################################################################################
def test_nansum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nansum(cArray, NumCpp.Axis.NONE).item() == np.nansum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nansum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nansum(data, axis=1))
####################################################################################
def test_nanvar():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanvar(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.nanvar(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=1), 8))
####################################################################################
def test_nbytes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 8
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 16
####################################################################################
def test_negative():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
####################################################################################
def test_newbyteorderArray():
value = np.random.randint(1, 100, [1, ]).item()
assert (NumCpp.newbyteorderScaler(value, NumCpp.Endian.BIG) ==
np.asarray([value], dtype=np.uint32).newbyteorder().item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.newbyteorderArray(cArray, NumCpp.Endian.BIG),
data.newbyteorder())
####################################################################################
def test_none():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
####################################################################################
def test_nonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
####################################################################################
def test_norm():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).flatten() == np.linalg.norm(data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data.transpose()):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.COL).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
assert norms is not None
####################################################################################
def test_not_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
####################################################################################
def test_ones():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 1))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
####################################################################################
def test_ones_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(1, 0)))
####################################################################################
def test_outer():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
####################################################################################
def test_pad():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item()
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray = NumCpp.NdArray(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray = NumCpp.NdArrayComplexDouble(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
####################################################################################
def test_partition():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
####################################################################################
def test_proj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert NumCpp.projScaler(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cData = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cData.setArray(data)
assert NumCpp.projArray(cData) is not None
####################################################################################
def test_ptp():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.COL).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=1))
####################################################################################
def test_put():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), np.uint32)
value = np.random.randint(1, 500)
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cIndices.setArray(indices)
NumCpp.put(cArray, cIndices, value)
data.put(indices, value)
assert np.array_equal(cArray.getNumpyArray(), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), dtype=np.uint32)
values = np.random.randint(1, 500, [numIndices, ])
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cValues = NumCpp.NdArray(1, numIndices)
cIndices.setArray(indices)
cValues.setArray(values)
NumCpp.put(cArray, cIndices, cValues)
data.put(indices, values)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_rad2deg():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.rad2degScaler(value), 9) == np.round(np.rad2deg(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rad2degArray(cArray), 9), np.round(np.rad2deg(data), 9))
####################################################################################
def test_radians():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.radiansScaler(value), 9) == np.round(np.radians(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.radiansArray(cArray), 9), np.round(np.radians(data), 9))
####################################################################################
def test_ravel():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
cArray2 = NumCpp.ravel(cArray)
assert np.array_equal(cArray2.getNumpyArray().flatten(), np.ravel(data))
####################################################################################
def test_real():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.realScaler(value), 9) == np.round(np.real(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.realArray(cArray), 9), np.round(np.real(data), 9))
####################################################################################
def test_reciprocal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
imag = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
####################################################################################
def test_remainder():
# numpy and cmath remainders are calculated differently, so convert for testing purposes
values = np.random.rand(2) * 100
values = np.sort(values)
res = NumCpp.remainderScaler(values[1].item(), values[0].item())
if res < 0:
res += values[0].item()
assert np.round(res, 9) == np.round(np.remainder(values[1], values[0]), 9)
# numpy and cmath remainders are calculated differently, so convert for testing purposes
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols) * 100 + 10
data2 = data1 - np.random.rand(shape.rows, shape.cols) * 10
cArray1.setArray(data1)
cArray2.setArray(data2)
res = NumCpp.remainderArray(cArray1, cArray2)
res[res < 0] = res[res < 0] + data2[res < 0]
assert np.array_equal(np.round(res, 9), np.round(np.remainder(data1, data2), 9))
####################################################################################
def test_replace():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
####################################################################################
def test_reshape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = data.size
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(1, newShape))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshapeList(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumCols = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, -1, newNumCols)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(-1, newNumCols))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumRows = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, newNumRows, -1)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(newNumRows, -1))
####################################################################################
def test_resize():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeFast(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeSlow(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
####################################################################################
def test_right_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.right_shift(cArray, bitsToshift).getNumpyArray(),
np.right_shift(data, bitsToshift))
####################################################################################
def test_rint():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.rintScaler(value) == np.rint(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.rintArray(cArray), np.rint(data))
####################################################################################
def test_rms():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
####################################################################################
def test_roll():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, data.size, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.NONE).getNumpyArray(),
np.roll(data, amount, axis=None))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.cols, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.ROW).getNumpyArray(),
np.roll(data, amount, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.rows, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.COL).getNumpyArray(),
np.roll(data, amount, axis=1))
####################################################################################
def test_rot90():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(1, 4, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.rot90(cArray, amount).getNumpyArray(), np.rot90(data, amount))
####################################################################################
def test_round():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.roundScaler(value, 10) == np.round(value, 10)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.roundArray(cArray, 9), np.round(data, 9))
####################################################################################
def test_row_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.row_stack(cArray1, cArray2, cArray3, cArray4),
np.row_stack([data1, data2, data3, data4]))
####################################################################################
def test_setdiff1d():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
####################################################################################
def test_shape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.shape().rows == shape.rows and cArray.shape().cols == shape.cols
####################################################################################
def test_sign():
value = np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
value = np.random.randn(1).item() * 100 + 1j * np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
####################################################################################
def test_signbit():
value = np.random.randn(1).item() * 100
assert NumCpp.signbitScaler(value) == np.signbit(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signbitArray(cArray), np.signbit(data))
####################################################################################
def test_sin():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
####################################################################################
def test_sinc():
value = np.random.randn(1)
assert np.round(NumCpp.sincScaler(value.item()), 9) == np.round(np.sinc(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sincArray(cArray), 9), np.round(np.sinc(data), 9))
####################################################################################
def test_sinh():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
value = np.random.randn(1).item() + 1j * np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randn(shape.rows, shape.cols) + 1j * np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
####################################################################################
def test_size():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.size() == shapeInput.prod().item()
####################################################################################
def test_sort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
####################################################################################
def test_sqrt():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
####################################################################################
def test_square():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
####################################################################################
def test_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.vstack([data1, data2, data3, data4]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_stdev():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.stdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.std(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_subtract():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
####################################################################################
def test_sumo():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), | np.sum(data, axis=0) | numpy.sum |
import modules.plotting2 as p2
import modules.dataprocessing as dp
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import sys
from pyproj import Proj, transform
import scipy
from matplotlib.colors import LinearSegmentedColormap, TwoSlopeNorm
import cartopy.crs as ccrs
from matplotlib import cm
import numpy as np
import pandas as pd
import modules.week2 as w2
from tqdm import tqdm
import itertools
import time
from scipy.linalg import lstsq
def load_data(variables,projection, temporal_resolution,temporal_decomposition,detrend):
"""
Loads in all of the data
"""
seaice = 'seaice' in variables
variables_to_load = [v for v in variables if v in ['u10', 'v10', 'si10', 't2m', 'sst', 'skt', 'ssr', 'sp', 'ssrd']]
indicies_to_load = [v for v in variables if v in ['IPO','nina34','nina12','DMI','SAM','meiv2','SOI','SAM']]
geopotential_to_load = [v for v in variables if v in ['geopotential']]
x = 10* | np.arange(-395000,395000,2500) | numpy.arange |
import sys
from os.path import dirname
from OpenGL.GLUT import *
from OpenGL.GL import *
import math
from pyoptix.matrix4x4 import Matrix4x4
sys.path.append(dirname(dirname(dirname(__file__))))
import numpy as np
from pyoptix import Context, Compiler, Buffer, Program, Geometry, Material, GeometryInstance, EntryPoint, \
GeometryGroup, Acceleration
from examples.image_window_base import ImageWindowBase, calculate_camera_variables
ESCAPE_KEY = 27
width = 512
height = 512
Compiler.add_program_directory(dirname(__file__))
class ImageWindow(ImageWindowBase):
def __init__(self, context, width, height):
super().__init__(context, width, height)
# will be called before display
self.display_callbacks.append(self.set_camera)
self.frame_number = 1
self.mouse_button = None
self.moues_prev_pose = None
self.camera_eye = np.array([278, 273, -900], dtype=np.float32)
self.camera_lookat = np.array([278, 273, 0], dtype=np.float32)
self.camera_rotate = Matrix4x4()
def glut_resize(self, w, h):
if self.width == w and self.height == h: return
if w <= 0: w = 1
if h <= 0: h = 1
self.width = w
self.height = h
self.frame_number = 1
self.context["output_buffer"].set_size(self.width, self.height)
glViewport(0, 0, self.width, self.height)
glutPostRedisplay()
def glut_keyboard_press(self, k, x, y):
if ord(k) == ESCAPE_KEY:
exit()
def glut_mouse_press(self, button, state, x, y):
if state == GLUT_DOWN:
self.mouse_button = button
self.mouse_prev_pose = (x, y)
def glut_mouse_motion(self, x, y):
if self.mouse_button == GLUT_RIGHT_BUTTON:
dx = float(x - self.mouse_prev_pose[0]) / float(self.width)
dy = float(y - self.mouse_prev_pose[1]) / float(self.height)
dmax = dx if abs(dx) > abs(dy) else dy
scale = min(dmax, 0.9)
self.camera_eye = self.camera_eye + (self.camera_lookat - self.camera_eye) * scale
self.frame_number = 1
#todo implement arcball rotation...
self.mouse_prev_pose = (x, y)
def set_camera(self):
camera_up = np.array([0, 1, 0], dtype=np.float32)
fov = 35.0
aspect_ratio = float(width) / float(height)
# claculate camera variables
W = self.camera_lookat - self.camera_eye
wlen = np.sqrt(np.sum(W ** 2))
U = normalize(np.cross(W, camera_up))
V = normalize(np.cross(U, W))
vlen = wlen * math.tan(0.5 * fov * math.pi / 180)
V *= vlen
ulen = vlen * aspect_ratio
U *= ulen
# compute transformations
frame = Matrix4x4.from_basis(normalize(U),
normalize(V),
normalize(-W),
self.camera_lookat)
frame_inv = frame.inverse()
# apply transformation
# print(frame.to_parameters(True))
self.context["frame_number"] = np.array(self.frame_number, dtype=np.uint32)
self.context["eye"] = | np.array(self.camera_eye, dtype=np.float32) | numpy.array |
import numpy as np
import os
from PIL import Image
from keras.preprocessing import image
def preprocess_input(x):
x = x.astype(np.float32)
x /= 255.
return x
def decode_output(x):
x = x.astype(np.float32)
x *= 255.
return x
def make_paths_from_directory(root):
input_paths = []
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
with open(filepath, 'rb') as fp:
magic = fp.read(8)
if magic.startswith(b'GIF89a') or magic.startswith(b'GIF87a'):
filetype = 'gif'
elif magic == b'\xff\xd8\xff\xe0\x00\x10JF':
filetype = 'jpeg'
elif magic.startswith(b'\x89PNG'):
filetype = 'png'
else:
print(' unsupported file type', repr(magic), filepath)
continue
input_paths.append(filepath)
return input_paths
def make_arrays_from_paths(paths, preprocess=None, target_size=None):
rv = []
for path in paths:
img = image.load_img(path, target_size=target_size)
ar = image.img_to_array(img)
if preprocess:
ar = preprocess(ar)
rv.append(ar)
return np.array(rv)
def generate_img(xgenerater):
num_generate_imgs = 144
z_dim = xgenerater.input_shape[-1]
z = np.random.normal(size=(num_generate_imgs, 1, 1, z_dim))
x_gen = xgenerater.predict_on_batch(z)
x_gen = decode_output(x_gen)
x_gen = np.clip(x_gen, 0., 255.).astype(np.uint8)
# Concatenate generated images
grid_size = int( | np.sqrt(num_generate_imgs) | numpy.sqrt |
# Copyright (c) 2015 MaxPoint Interactive, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, print_function
import numpy as np
from scipy.optimize import minimize
from scipy.stats import norm
def logistic_prob(X, w):
""" MAP (Bayes point) logistic regression probability with overflow prevention via exponent truncation
Parameters
----------
X : array-like, shape (N, p)
Feature matrix
w : array-like, shape (p, )
Parameter vector
Returns
-------
pr : array-like, shape (N, )
vector of logistic regression probabilities
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bish<NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# set a truncation exponent.
trunc = 8. # exp(8)/(1+exp(8)) = 0.9997 which is close enough to 1 as to not matter in most cases.
# calculate argument of logit
z = np.dot(X, w)
# truncate to avoid numerical over/underflow
z = np.clip(z, -trunc, trunc)
# calculate logitstic probability
pr = np.exp(z)
pr = pr / (1. + pr)
return pr
def f_log_posterior(w, wprior, H, y, X, weights=None):
"""Returns negative log posterior probability.
Parameters
----------
w : array-like, shape (p, )
vector of parameters at which the negative log posterior is to be evaluated
wprior : array-like, shape (p, )
vector of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
Array of prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
vector of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
vector of data point weights. Should be within [0,1]
Returns
-------
neg_log_post : float
negative log posterior probability
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bish<NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate negative log posterior
eps = 1e-6 # this defined to ensure that we never take a log of zero
mu = logistic_prob(X, w)
if len(H.shape) == 2:
neg_log_post = (- (np.dot(y.T, weights * np.log(mu + eps))
+ np.dot((1. - y).T, weights * np.log(1. - mu + eps)))
+ 0.5 * np.dot((w - wprior).T, np.dot(H, (w - wprior))))
elif len(H.shape) == 1:
neg_log_post = (- (np.dot(y.T, weights * np.log(mu + eps))
+ np.dot((1. - y).T, weights * np.log(1. - mu + eps)))
+ 0.5 * np.dot((w - wprior).T, H * (w - wprior)))
else:
raise ValueError('Incompatible Hessian')
return float(neg_log_post)
def g_log_posterior(w, wprior, H, y, X, weights=None):
"""Returns gradient of the negative log posterior probability.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the gradient is to be evaluated
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
grad_log_post : array-like, shape (p, )
gradient of negative log posterior
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of <NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate gradient
mu_ = logistic_prob(X, w)
if len(H.shape) == 2:
grad_log_post = np.dot(X.T, weights * (mu_ - y)) + np.dot(H, (w - wprior))
elif len(H.shape) == 1:
grad_log_post = np.dot(X.T, weights * (mu_ - y)) + H * (w - wprior)
else:
raise ValueError('Incompatible Hessian')
return grad_log_post
def g_log_posterior_small(w, wprior, H, y, X, weights=None):
"""Returns normalized (to 1) gradient of the negative log posterior probability.
This is used for BFGS and L-BFGS-B solvers which tend to not converge unless
the gradient is normalized.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the gradient is to be evaluated
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
grad_log_post : array-like, shape (p, )
normalized (to 1) gradient of negative log posterior
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate gradient
mu = logistic_prob(X, w)
if len(H.shape) == 2:
grad_log_post = np.dot(X.T, weights * (mu - y)) + np.dot(H, (w - wprior))
elif len(H.shape) == 1:
grad_log_post = np.dot(X.T, weights * (mu - y)) + H * (w - wprior)
else:
raise ValueError('Incompatible Hessian')
# normalize gradient to length 1
grad_log_post = grad_log_post / np.sqrt(np.sum(grad_log_post * grad_log_post))
return grad_log_post
def H_log_posterior(w, wprior, H, y, X, weights=None):
"""Returns Hessian (either full or diagonal) of the negative log posterior probability.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the Hessian is to be evaluated
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of log prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
H_log_post : array-like, shape like `H`
Hessian of negative log posterior
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate log posterior Hessian
mu = logistic_prob(X, w)
S = mu * (1. - mu) * weights
if len(H.shape) == 2:
H_log_post = np.dot(X.T, X * S[:, np.newaxis]) + H
elif len(H.shape) == 1:
H_log_post = np.diag(np.dot(X.T, X * S[:, np.newaxis])) + H
else:
raise ValueError('Incompatible Hessian')
return H_log_post
def HP_log_posterior(w, q, wprior, H, y, X, weights=None):
"""Returns diagonal Hessian of the negative log posterior probability multiplied by an arbitrary vector.
This is useful for the Newton-CG solver, particularly when we only want to store a diagonal Hessian.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the Hessian is to be evaluated
q : array-like, shape (p, )
arbitrary vector to multiply Hessian by
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, )
array of diagonal log prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
HP : array-like, shape (p, )
Hessian of log posterior (diagonal approx) multiplied by arbitrary vector
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
HP = H_log_posterior(w, wprior, H, y, X, weights)
HP = HP * q
return HP
def fit_bayes_logistic(y, X, wprior, H, weights=None, solver='Newton-CG', bounds=None, maxiter=100):
""" Bayesian Logistic Regression Solver. Assumes Laplace (Gaussian) Approximation
to the posterior of the fitted parameter vector. Uses scipy.optimize.minimize
Parameters
----------
y : array-like, shape (N, )
array of binary {0,1} responses
X : array-like, shape (N, p)
array of features
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
solver : string
scipy optimize solver used. this should be either 'Newton-CG', 'BFGS' or 'L-BFGS-B'.
The default is Newton-CG.
bounds : iterable of length p
a length p list (or tuple) of tuples each of length 2.
This is only used if the solver is set to 'L-BFGS-B'. In that case, a tuple
(lower_bound, upper_bound), both floats, is defined for each parameter. See the
scipy.optimize.minimize docs for further information.
maxiter : int
maximum number of iterations for scipy.optimize.minimize solver.
Returns
-------
w_fit : array-like, shape (p, )
posterior parameters (MAP estimate)
H_fit : array-like, shape like `H`
posterior Hessian (Hessian of negative log posterior evaluated at MAP parameters)
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of <NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# Check that dimensionality of inputs agrees
# check X
if len(X.shape) != 2:
raise ValueError('X must be a N*p matrix')
(nX, pX) = X.shape
# check y
if len(y.shape) > 1:
raise ValueError('y must be a vector of shape (p, )')
if len(np.atleast_1d(y)) != nX:
raise ValueError('y and X do not have the same number of rows')
# check wprior
if len(wprior.shape) > 1:
raise ValueError('prior should be a vector of shape (p, )')
if len(np.atleast_1d(wprior)) != pX:
raise ValueError('prior mean has incompatible length')
# check H
if len(H.shape) == 1:
if np.atleast_1d(H).shape[0] != pX:
raise ValueError('prior Hessian is diagonal but has incompatible length')
elif len(H.shape) == 2:
(h1,h2) = np.atleast_2d(H).shape
if h1 != h2:
raise ValueError('prior Hessian must either be a p*p square matrix or a vector or shape (p, ) ')
if h1 != pX:
raise ValueError('prior Hessian is square but has incompatible size')
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# Do the regression
if solver == 'Newton-CG':
if len(H.shape) == 2:
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior,
hess=H_log_posterior, method='Newton-CG', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
elif len(H.shape) == 1:
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior,
hessp=HP_log_posterior, method='Newton-CG', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
else:
raise ValueError(' You must either use the full Hessian or its diagonal as a vector')
elif solver == 'BFGS':
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior_small,
method='BFGS', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
elif solver == 'L-BFGS-B':
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior_small,
method='L-BFGS-B', bounds=bounds, options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
else:
raise ValueError('Unknown solver specified: "{0}"'.format(solver))
return w_fit, H_fit
def get_pvalues(w, H):
""" Calculates p-values on fitted parameters. This can be used for variable selection by,
for example, discarding every parameter with a p-value less than 0.05 (or some other cutoff)
Parameters
----------
w : array-like, shape (p, )
array of posterior means on the fitted parameters
H : array-like, shape (p, p) or (p, )
array of log posterior Hessian
Returns
-------
pvals : array-like, shape (p, )
array of p-values for each of the fitted parameters
References
----------
Chapter 2 of <NAME>. 'In All Likelihood', Oxford University Press (2013)
Also see: <NAME>. 'Extraction of network topology from multi-electrode recordings: is there
a small world effect', Frontiers in Computational Neuroscience (2011) for a use case of
p-value based variable selection.
"""
# get inverse standard error of each parameter from the square root of the Hessian,
# which is equal to the Fisher information
if len(H.shape) == 2:
inv_std_err = np.sqrt(np.diag(H))
elif len(H.shape) == 1:
inv_std_err = np.sqrt(H)
else:
raise ValueError("Incompatible Hessian provided")
# calculate Wald statistic
z_ = w * inv_std_err
# get p-value by comparing Wald statistic to cdf of Normal distribution
pvals = 2. * (1. - norm.cdf(np.abs(z_)))
return pvals
def bayes_logistic_prob(X, w, H):
""" Posterior predictive logistic regression probability. Uses probit approximation
to the logistic regression sigmoid. Also has overflow prevention via exponent truncation.
Parameters
----------
X : array-like, shape (N, p)
array of covariates
w : array-like, shape (p, )
array of fitted MAP parameters
H : array-like, shape (p, p) or (p, )
array of log posterior Hessian (covariance matrix of fitted MAP parameters)
Returns
-------
pr : array-like, shape (N, )
moderated (by full distribution) logistic probability
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of <NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# set a truncation exponent
trunc = 8. # exp(8)/(1+exp(8)) = 0.9997 which is close enough to 1 as to not matter in most cases.
# unmoderated argument of exponent
z_a = np.dot(X, w)
# find the moderation
if len(H.shape) == 2:
H_inv_ = np.linalg.inv(H)
sig2_a = np.sum(X * np.dot(H_inv_, X.T).T, axis=1)
elif len(H.shape) == 1:
H_inv_ = 1. / H
sig2_a = | np.sum(X * (H_inv_ * X), axis=1) | numpy.sum |
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.linalg as la
import tensorflow as tf
import tools.shrinkage as shrinkage
def adaptive_computation_time(halting_probas,eps=.06):
sh=halting_probas.get_shape().as_list()
batch=sh[0]
max_units=sh[1]
zero_col = tf.ones((batch, 1)) * 1e-8 #LISTA and LAMP use 1e-6 LISTA-CPSS use 1e-8
halting_padded = tf.concat([halting_probas[:, :-1], zero_col], axis=1)
halt_flag_final=( halting_padded<=eps) #[batchsize,T] 最后一层h为1
decay=1./(10.+tf.to_float(tf.range(max_units)))
halt_flag_final_with_decay=tf.to_float(halt_flag_final)+decay[None,:]
N=tf.to_int32(tf.argmax(halt_flag_final_with_decay,dimension=1))
N=tf.stop_gradient(N)
num_units=N+1 #有多少层起作用了 [batchsize,1]
unit_index = tf.range(max_units)
p = tf.where(tf.less_equal(unit_index, (max_units-1)*tf.ones((max_units),dtype=tf.int32)), tf.ones((max_units)), tf.zeros((max_units)))
return num_units,halting_padded,tf.to_float(p),tf.reduce_max(num_units)
def build_LISTA_act(prob,tao,T,eps=0.01,initial_lambda=.1,untied=False):
"""
Builds a LISTA network to infer x from prob.y_ = matmul(prob.A,x) + AWGN
return a list of layer info (name,xhat_,newvars)
name : description, e.g. 'LISTA T=1'
xhat_ : that which approximates x_ at some point in the algorithm
newvars : a tuple of layer-specific trainable variables
"""
assert not untied,'TODO: untied'
eta = shrinkage.simple_soft_threshold
xhats = []
halting_distribs=[]
layers=[]
batchsize=prob.L
A = prob.A
M,N = A.shape
B = A.T / (1.01 * la.norm(A, 2) ** 2 + 1e-10)
# B = A.T
B_ = tf.Variable(B,dtype=tf.float32,name='B_0')
S_ = tf.Variable( np.identity(N) - np.matmul(B,A),dtype=tf.float32,name='S_0')
By_ = tf.matmul( B_ , prob.y_ )
initial_lambda = np.array(initial_lambda).astype(np.float32)
# if getattr(prob,'iid',True) == False:
# create a parameter for each coordinate in x
# initial_lambda = initial_lambda*np.ones( (N,1),dtype=np.float32 )
lam0_ = tf.Variable( initial_lambda,name='lam_0')
xhat_ = eta( By_, lam0_)
xhats.append(xhat_) #tf.reduce_sum(tf.squared_difference(prob.y_,tf.matmul(A,xhat_)),axis=0,keep_dims=True)
# halting_distrib=tf.nn.sigmoid(fully_connect(tf.transpose(tf.concat([tf.reduce_sum(tf.squared_difference(prob.y_,tf.matmul(A,tf.stop_gradient(xhat_))),axis=0,keep_dims=True),tf.norm(tf.stop_gradient(xhat_),ord=1,axis=0,keepdims=True)],axis=0)),output_size=1,scope='LISTA_T_1'))
w1=tf.Variable(1,name='h_w1_0',dtype=tf.float32)
C = tf.Variable(np.eye(M), name='h_w2_0', dtype=tf.float32)
b=tf.Variable(0,name='h_b_0',dtype=tf.float32)
halting_distrib = tf.nn.sigmoid(w1*tf.transpose(
tf.reduce_sum(tf.squared_difference(tf.matmul(C,prob.y_), tf.matmul(C,tf.matmul(prob.A_, tf.stop_gradient(xhat_)))), axis=0, keep_dims=True)
)+b)
halting_distribs.append(halting_distrib)
for t in range(1,T):
lam_ = tf.Variable(initial_lambda,name='lam_{0}'.format(t) )
xhat_ = eta( tf.matmul(S_,xhat_) + By_, lam_ ) #[N,batchsize]
xhats.append(xhat_)
# if t<T-1:
w1 = tf.Variable(1, name='h_w1_{0}'.format(t),dtype=tf.float32)
# C = tf.Variable(np.eye(M), name='h_w2_{0}'.format(t), dtype=tf.float32)
b = tf.Variable(0, name='h_b_{0}'.format(t),dtype=tf.float32)
halting_distrib = tf.nn.sigmoid(w1*tf.transpose(tf.reduce_sum(
tf.squared_difference(tf.matmul(C,prob.y_), tf.matmul(C,tf.matmul(prob.A_, tf.stop_gradient(xhat_)))), axis=0, keep_dims=True) )
+b)
halting_distribs.append(halting_distrib)
halting_distribs=tf.concat(halting_distribs,1)
num_units, halting_distribution,p,max_num=adaptive_computation_time(halting_distribs,eps=eps)
xhat_final1 = tf.zeros((N, batchsize))
xhat_final3= []
for i in range(T):
xhat_final1 = xhat_final1 + tf.to_float(
tf.equal(tf.squeeze(tf.reshape([i] * batchsize, shape=(batchsize, 1))),
num_units - 1)) * xhats[i]
xhat_final3.append(tf.reduce_sum(tf.squared_difference(xhats[i], prob.x_), axis=0,keep_dims=True))
xhat_final3=tf.transpose(tf.concat(xhat_final3,axis=0))
xhat_final2=tf.reduce_sum(p*(xhat_final3/(halting_distribution+1e-6)+tao*halting_distribution),axis=1)
layers.append((xhat_final1,xhat_final2,xhat_final3,tf.transpose(num_units),xhats,halting_distribution,max_num,p))
return layers
def build_LAMP_act(prob,tao,T,eps,shrink,untied):
"""
Builds a LAMP network to infer x from prob.y_ = matmul(prob.A,x) + AWGN
return a list of layer info (name,xhat_,newvars)
name : description, e.g. 'LISTA T=1'
xhat_ : that which approximates x_ at some point in the algorithm
newvars : a tuple of layer-specific trainable variables
"""
eta,theta_init = shrinkage.get_shrinkage_function(shrink)
print('theta_init='+repr(theta_init))
xhats = []
halting_distribs = []
layers = []
batchsize = prob.L
A = prob.A
M,N = A.shape
B = A.T / (1.01 * la.norm(A,2)**2)
B_ = tf.Variable(B,dtype=tf.float32,name='B_0')
By_ = tf.matmul( B_ , prob.y_ )
if getattr(prob,'iid',True) == False:
# set up individual parameters for every coordinate
theta_init = theta_init*np.ones( (N,1),dtype=np.float32 )
theta_ = tf.Variable(theta_init,dtype=tf.float32,name='theta_0')
OneOverM = tf.constant(float(1)/M,dtype=tf.float32)
NOverM = tf.constant(float(N)/M,dtype=tf.float32)
rvar_ = tf.reduce_sum(tf.square(prob.y_),0) * OneOverM
(xhat_,dxdr_) = eta( By_,rvar_ , theta_ )
xhats.append(xhat_)
w1 = tf.Variable(1, name='h_w1_0', dtype=tf.float32)
C = tf.Variable(np.eye(M), name='h_w2_0', dtype=tf.float32)
b = tf.Variable(0, name='h_b_0', dtype=tf.float32)
halting_distrib = tf.nn.sigmoid(w1 * tf.transpose(
tf.reduce_sum(tf.square(tf.matmul(C, prob.y_-tf.matmul(prob.A_, tf.stop_gradient(xhat_)))),
axis=0, keep_dims=True)
) + b)
halting_distribs.append(halting_distrib)
vt_ = prob.y_
for t in range(1,T):
if len(dxdr_.get_shape())==2:
dxdr_ = tf.reduce_mean(dxdr_,axis=0)
bt_ = dxdr_ * NOverM
vt_ = prob.y_ - tf.matmul( prob.A_ , xhat_ ) + bt_ * vt_
rvar_ = tf.reduce_sum(tf.square(vt_),0) * OneOverM
theta_ = tf.Variable(theta_init,name='theta_'+str(t))
if untied:
B_ = tf.Variable(B,dtype=tf.float32,name='B_'+str(t))
rhat_ = xhat_ + tf.matmul(B_,vt_)
else:
rhat_ = xhat_ + tf.matmul(B_,vt_)
(xhat_,dxdr_) = eta( rhat_ ,rvar_ , theta_ )
xhats.append(xhat_)
w1 = tf.Variable(1, name='h_w1_{0}'.format(t), dtype=tf.float32)
b = tf.Variable(0, name='h_b_{0}'.format(t), dtype=tf.float32)
halting_distrib = tf.nn.sigmoid(w1 * tf.transpose(tf.reduce_sum(
tf.square(tf.matmul(C, prob.y_-tf.matmul(prob.A_, tf.stop_gradient(xhat_)))), axis=0,keep_dims=True))
+ b)
halting_distribs.append(halting_distrib)
halting_distribs = tf.concat(halting_distribs, 1)
num_units, halting_distribution, p, max_num = adaptive_computation_time(halting_distribs, eps=eps)
# xhat_final2 = tf.zeros((batchsize))
xhat_final1 = tf.zeros((N, batchsize))
xhat_final3 = []
for i in range(T):
xhat_final1 = xhat_final1 + tf.to_float(
tf.equal(tf.squeeze(tf.reshape([i] * batchsize, shape=(batchsize, 1))),
num_units - 1)) * xhats[i]
# xhat_final2 = xhat_final2 + tf.transpose(halting_distribution[:, i])
xhat_final3.append(tf.reduce_sum(tf.squared_difference(xhats[i], prob.x_), axis=0, keep_dims=True))
xhat_final3 = tf.transpose(tf.concat(xhat_final3, axis=0))
xhat_final2 = tf.reduce_sum(p * (xhat_final3 / (halting_distribution + 1e-6) + tao * halting_distribution), axis=1)
layers.append(
(xhat_final1, xhat_final2, xhat_final3, tf.transpose(num_units), xhats, halting_distribution, max_num, p))
return layers
def build_LISTA_cpss_act(prob,tao,T,eps=0.01,initial_lambda=.1,p=1.2, maxp=13,untied=False):
"""
Builds a LISTA network to infer x from prob.y_ = matmul(prob.A,x) + AWGN
return a list of layer info (name,xhat_,newvars)
name : description, e.g. 'LISTA T=1'
xhat_ : that which approximates x_ at some point in the algorithm
newvars : a tuple of layer-specific trainable variables
"""
assert not untied,'TODO: untied'
eta = shrinkage.shrink_ss
xhats = []
halting_distribs = []
layers = []
A = prob.A
M,N = A.shape
batchsize = prob.L
B = A.T / (1.01 * | la.norm(A,2) | numpy.linalg.norm |
import optuna
import pandas as pd
import numpy as np
from scipy.stats import rankdata
import pandas_ta as pta
from finta import TA as fta
import talib as tta
import re
import warnings
import pareto
warnings.filterwarnings("ignore")
from timeit import default_timer as timer
def col_name(function, study_best_params):
"""
Create consistent column names given string function and params
:param function: Function represented as string
:param study_best_params: Params for function
:return:
"""
# Optuna string of indicator
function_name = function.split("(")[0].replace(".", "_")
# Optuna string of parameters
params = re.sub('[^0-9a-zA-Z_:,]', '', str(study_best_params)).replace(",", "_").replace(":", "_")
# Concatenate name and params to define
col = f"{function_name}_{params}"
return col
def _weighted_pearson(y, y_pred, w=None, pearson=True):
"""Calculate the weighted Pearson correlation coefficient."""
if pearson:
if w is None:
w = np.ones(len(y))
# idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask
# y = np.compress(idx, np.array(y))
# y_pred = np.compress(idx, np.array(y_pred))
# w = np.compress(idx, w)
with np.errstate(divide='ignore', invalid='ignore'):
y_pred_demean = y_pred - np.average(y_pred, weights=w)
y_demean = y - np.average(y, weights=w)
corr = ((np.sum(w * y_pred_demean * y_demean) / np.sum(w)) /
np.sqrt((np.sum(w * y_pred_demean ** 2) *
np.sum(w * y_demean ** 2)) /
(np.sum(w) ** 2)))
if np.isfinite(corr):
return np.abs(corr)
return 0.
def _weighted_spearman(y, y_pred, w=None):
"""Calculate the weighted Spearman correlation coefficient."""
# idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask
# y = np.compress(idx, np.array(y))
# y_pred = np.compress(idx, np.array(y_pred))
# w = np.compress(idx, w)
y_pred_ranked = np.apply_along_axis(rankdata, 0, y_pred)
y_ranked = | np.apply_along_axis(rankdata, 0, y) | numpy.apply_along_axis |
"""Test the basic DAP functions."""
import numpy as np
from six import MAXSIZE
from pydap.model import (DatasetType, BaseType,
StructureType)
from pydap.exceptions import ConstraintExpressionError
from pydap.lib import (quote, encode, fix_slice, combine_slices, hyperslab,
walk, fix_shorthand, get_var)
import unittest
class TestQuote(unittest.TestCase):
"""Test quoting.
According to the DAP 2 specification a variable name MUST contain only
upper or lower case letters, numbers, or characters from the set
_ ! ~ * ' - "
All other characters must be escaped. This includes the period, which is
normally not quoted by ``urllib.quote``.
"""
def test_quoting(self):
"""Test a simple quoting."""
self.assertEqual(quote("White space"), "White%20space")
def test_quoting_period(self):
"""Test if period is also quoted."""
self.assertEqual(quote("Period."), "Period%2E")
class TestEncode(unittest.TestCase):
"""Test encoding.
According to the DAP 2 specification, numbers must be encoded using the C
notation "%.6g". Other objects are encoded as escaped strings.
"""
def test_integer(self):
"""Test integer encoding."""
self.assertEqual(encode(1), "1")
def test_float(self):
"""Test floating encoding."""
self.assertEqual(encode(np.pi), "3.14159")
def test_string(self):
"""Test string encoding."""
self.assertEqual(encode("test"), '"test"')
def test_string_with_quotation(self):
"""Test encoding a string with a quotation mark."""
self.assertEqual(encode('this is a "test"'), '"this is a \"test\""')
def test_unicode(self):
"""Unicode objects are encoded just like strings."""
self.assertEqual(encode(u"test"), '"test"')
def test_obj(self):
"""Other objects are encoded according to their ``repr``."""
self.assertEqual(encode({}), '"{}"')
def test_numpy_string(self):
self.assertEqual(encode(np.array('1', dtype='<U1')), '"1"')
class TestFixSlice(unittest.TestCase):
"""Test the ``fix_slice`` function."""
def test_not_tuple(self):
"""Non tuples should be converted and handled correctly."""
x = | np.arange(10) | numpy.arange |
#!/usr/bin/env python
"""
roms.interp
Methods to interpolate ROMS fields onto other grids
Written by <NAME> on 11/02/13
Copyright (c)2010--2021 University of Hawaii under the MIT-License.
"""
import numpy as np
import netCDF4
import os
import seapy
from seapy.timeout import timeout, TimeoutError
from joblib import Parallel, delayed
from warnings import warn
from rich.progress import (
track, BarColumn, TextColumn, TimeElapsedColumn, Progress)
_up_scaling = {"zeta": 1.0, "u": 1.0, "v": 1.0, "temp": 1.0, "salt": 1.0}
_down_scaling = {"zeta": 1.0, "u": 0.999,
"v": 0.999, "temp": 0.999, "salt": 1.001}
_ksize_range = (7, 15)
# Limit amount of memory in bytes to process in a single read. This determines how to
# divide up the time-records in interpolation
_max_memory = 768 * 1024 * 1024 # 768 MBytes
def __mask_z_grid(z_data, src_depth, z_depth):
"""
When interpolating to z-grid, we need to apply depth dependent masking
based on the original ROMS depths
"""
for k in np.arange(0, z_depth.shape[0]):
idx = np.nonzero(z_depth[k, :, :] < src_depth)
if z_data.ndim == 4:
z_data.mask[:, k, idx[0], idx[1]] = True
elif z_data.ndim == 3:
z_data.mask[k, idx[0], idx[1]] = True
def __interp2_thread(rx, ry, data, zx, zy, pmap, weight, nx, ny, mask):
"""
internal routine: 2D interpolation thread for parallel interpolation
"""
data = np.ma.fix_invalid(data, copy=False)
# Convolve the water over the land
ksize = 2 * np.round(np.sqrt((nx / np.ma.median(np.ma.diff(rx)))**2 +
(ny / np.ma.median(np.ma.diff(ry.T)))**2)) + 1
if ksize < _ksize_range[0]:
warn("nx or ny values are too small for stable OA, {:f}".format(ksize))
ksize = _ksize_range[0]
elif ksize > _ksize_range[1]:
warn("nx or ny values are too large for stable OA, {:f}".format(ksize))
ksize = _ksize_range[1]
data = seapy.convolve_mask(data, ksize=ksize, copy=False)
# Interpolate the field and return the result
with timeout(minutes=30):
res, pm = seapy.oasurf(rx, ry, data, zx, zy, pmap, weight, nx, ny)
return np.ma.masked_where(np.logical_or(mask == 0, np.abs(res) > 9e4), res,
copy=False)
def __interp3_thread(rx, ry, rz, data, zx, zy, zz, pmap,
weight, nx, ny, mask, up_factor=1.0, down_factor=1.0):
"""
internal routine: 3D interpolation thread for parallel interpolation
"""
# Make the mask 3D
mask = seapy.adddim(mask, zz.shape[0])
data = np.ma.fix_invalid(data, copy=False)
# To avoid extrapolation, we are going to convolve ocean over the land
# and add a new top and bottom layer that replicates the data of the
# existing current and top. 1) iteratively convolve until we have
# filled most of the points, 2) Determine which way the
# depth goes and add/subtract new layers, and 3) fill in masked values
# from the layer above/below.
gradsrc = (rz[0, 1, 1] - rz[-1, 1, 1]) > 0
# Convolve the water over the land
ksize = 2 * np.round(np.sqrt((nx / np.ma.median(np.ma.diff(rx)))**2 +
(ny / np.ma.median(np.ma.diff(ry.T)))**2)) + 1
if ksize < _ksize_range[0]:
warn("nx or ny values are too small for stable OA, {:f}".format(ksize))
ksize = _ksize_range[0]
elif ksize > _ksize_range[1]:
warn("nx or ny values are too large for stable OA, {:f}".format(ksize))
ksize = _ksize_range[1]
# Iterate at most 5 times, but we will hopefully break out before that by
# checking if we have filled at least 40% of the bottom to be like
# the surface
bot = -1 if gradsrc else 0
top = 0 if gradsrc else -1
topmask = np.maximum(1, np.ma.count_masked(data[top, :, :]))
if np.ma.count_masked(data[bot, :, :]) > 0:
for iter in range(5):
# Check if we have most everything by checking the bottom
data = seapy.convolve_mask(data, ksize=ksize + iter, copy=False)
if topmask / np.maximum(1, np.ma.count_masked(data[bot, :, :])) > 0.4:
break
# Now fill vertically
nrz = np.zeros((data.shape[0] + 2, data.shape[1], data.shape[2]))
nrz[1:-1, :, :] = rz
nrz[bot, :, :] = rz[bot, :, :] - 5000
nrz[top, :, :] = 1
if not gradsrc:
# The first level is the bottom
# factor = down_factor
levs = np.arange(data.shape[0], 0, -1) - 1
else:
# The first level is the top
# factor = up_factor
levs = np.arange(0, data.shape[0])
# Fill in missing values where we have them from the shallower layer
for k in levs[1:]:
if np.ma.count_masked(data[k, :, :]) == 0:
continue
idx = np.nonzero(np.logical_xor(data.mask[k, :, :],
data.mask[k - 1, :, :]))
data.mask[k, idx[0], idx[1]] = data.mask[k - 1, idx[0], idx[1]]
data[k, idx[0], idx[1]] = data[k - 1, idx[0], idx[1]] * down_factor
# Add upper and lower boundaries
ndat = np.zeros((data.shape[0] + 2, data.shape[1], data.shape[2]))
ndat[bot, :, :] = data[bot, :, :].filled(np.nan) * down_factor
ndat[1:-1, :, :] = data.filled(np.nan)
ndat[top, :, :] = data[top, :, :].filled(np.nan) * up_factor
# Interpolate the field and return the result
with timeout(minutes=30):
if gradsrc:
res, pm = seapy.oavol(rx, ry, nrz[::-1, :, :], ndat[::-1, :, :],
zx, zy, zz, pmap, weight, nx, ny)
else:
res, pm = seapy.oavol(rx, ry, nrz, ndat, zx, zy, zz,
pmap, weight, nx, ny)
return np.ma.masked_where(np.logical_or(mask == 0, np.abs(res) > 9e4), res,
copy=False)
def __interp3_vel_thread(rx, ry, rz, ra, u, v, zx, zy, zz, za, pmap,
weight, nx, ny, mask):
"""
internal routine: 3D velocity interpolation thread for parallel interpolation
"""
# Put on the same grid
if u.shape != v.shape:
u = seapy.model.u2rho(u, fill=True)
v = seapy.model.v2rho(v, fill=True)
# Rotate the fields (NOTE: ROMS angle is negative relative to "true")
if ra is not None:
u, v = seapy.rotate(u, v, ra)
# Interpolate
u = __interp3_thread(rx, ry, rz, u, zx, zy, zz, pmap,
weight, nx, ny, mask, _up_scaling["u"],
_down_scaling["u"])
v = __interp3_thread(rx, ry, rz, v, zx, zy, zz, pmap,
weight, nx, ny, mask, _up_scaling["v"],
_down_scaling["v"])
# Rotate to destination (NOTE: ROMS angle is negative relative to "true")
if za is not None:
u, v = seapy.rotate(u, v, -za)
# Return the masked data
return u, v
def __interp_grids(src_grid, child_grid, ncsrc, ncout, records=None,
threads=2, nx=0, ny=0, weight=10, vmap=None, z_mask=False,
pmap=None):
"""
internal method: Given a model file (average, history, etc.),
interpolate the fields onto another gridded file.
Parameters
----------
src_grid : seapy.model.grid data of source
child_grid : seapy.model.grid output data grid
ncsrc : netcdf input file (History, Average, etc. file)
ncout : netcdf output file
[records] : array of the record indices to interpolate
[threads] : number of processing threads
[nx] : decorrelation length in grid-cells for x
[ny] : decorrelation length in grid-cells for y
[vmap] : variable name mapping
[z_mask] : mask out depths in z-grids
[pmap] : use the specified pmap rather than compute it
Returns
-------
None
"""
# If we don't have a variable map, then do a one-to-one mapping
if vmap is None:
vmap = dict()
for k in seapy.roms.fields:
vmap[k] = k
# Generate a file to store the pmap information
sname = getattr(src_grid, 'name', None)
cname = getattr(child_grid, 'name', None)
pmap_file = None if any(v is None for v in (sname, cname)) else \
sname + "_" + cname + "_pmap.npz"
# Create or load the pmaps depending on if they exist
if nx == 0:
if hasattr(src_grid, "dm") and hasattr(child_grid, "dm"):
nx = np.ceil(np.ma.mean(src_grid.dm) / np.ma.mean(child_grid.dm))
else:
nx = 5
if ny == 0:
if hasattr(src_grid, "dn") and hasattr(child_grid, "dn"):
ny = np.ceil(np.ma.mean(src_grid.dn) / np.ma.mean(child_grid.dn))
else:
ny = 5
if pmap is None:
if pmap_file is not None and os.path.isfile(pmap_file):
pmap = np.load(pmap_file)
else:
tmp = np.ma.masked_equal(src_grid.mask_rho, 0)
tmp, pmaprho = seapy.oasurf(src_grid.lon_rho, src_grid.lat_rho,
tmp, child_grid.lon_rho, child_grid.lat_rho,
weight=weight, nx=nx, ny=ny)
tmp = | np.ma.masked_equal(src_grid.mask_u, 0) | numpy.ma.masked_equal |
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-22 23:55:19
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-23 22:59:45
from aces.tools import parseyaml
from aces.graph import fig, pl
from aces.f import binmeanx
import numpy as np
dir = "bi4i4c.1/0/secondorder/"
file = dir + "groupv/mesh.yaml"
data = parseyaml(file)
freqs = []
gvs = []
for phonon in data['phonon']:
qp = phonon['q-position']
for band in phonon['band']:
frequency = band['frequency']
gv = np.array(band['group_velocity'])
freqs.append(frequency)
gvs.append(gv)
freqs = np.array(freqs)
gvs = np.array(gvs)
gvs = | np.abs(gvs) | numpy.abs |
import numpy
import numpy.matlib
import copy
import pandas
import wave
import struct
import os
import math
import ctypes
import multiprocessing
import warnings
import scipy
from scipy import ndimage
import scipy.stats as stats
from scipy.fftpack import fft
from scipy.signal import decimate
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6])
def read_wav(input_file_name):
"""
:param input_file_name:
:return:
"""
wfh = wave.open(input_file_name, "r")
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wfh.getparams()
raw = wfh.readframes(nframes * nchannels)
out = struct.unpack_from("%dh" % nframes * nchannels, raw)
sig = numpy.reshape(numpy.array(out), (-1, nchannels)).squeeze()
wfh.close()
return sig.astype(numpy.float32), framerate, sampwidth
def read_pcm(input_file_name):
"""Read signal from single channel PCM 16 bits
:param input_file_name: name of the PCM file to read.
:return: the audio signal read from the file in a ndarray encoded on 16 bits, None and 2 (depth of the encoding in bytes)
"""
with open(input_file_name, 'rb') as f:
f.seek(0, 2) # Go to te end of the file
# get the sample count
sample_count = int(f.tell() / 2)
f.seek(0, 0) # got to the begining of the file
data = numpy.asarray(struct.unpack('<' + 'h' * sample_count, f.read()))
return data.astype(numpy.float32), None, 2
def read_audio(input_file_name, framerate=None):
""" Read a 1 or 2-channel audio file in SPHERE, WAVE or RAW PCM format.
The format is determined from the file extension.
If the sample rate read from the file is a multiple of the one given
as parameter, we apply a decimation function to subsample the signal.
:param input_file_name: name of the file to read from
:param framerate: frame rate, optional, if lower than the one read from the file, subsampling is applied
:return: the signal as a numpy array and the sampling frequency
"""
if framerate is None:
raise TypeError("Expected sampling frequency required in sidekit.frontend.io.read_audio")
ext = os.path.splitext(input_file_name)[-1]
if ext.lower() == '.sph':
sig, read_framerate, sampwidth = read_sph(input_file_name, 'p')
elif ext.lower() == '.wav' or ext.lower() == '.wave':
sig, read_framerate, sampwidth = read_wav(input_file_name)
elif ext.lower() == '.pcm' or ext.lower() == '.raw':
sig, read_framerate, sampwidth = read_pcm(input_file_name)
read_framerate = framerate
else:
raise TypeError("Unknown extension of audio file")
# Convert to 16 bit encoding if needed
sig *= (2**(15-sampwidth))
if framerate > read_framerate:
print("Warning in read_audio, up-sampling function is not implemented yet!")
elif read_framerate % float(framerate) == 0 and not framerate == read_framerate:
print("downsample")
sig = decimate(sig, int(read_framerate / float(framerate)), n=None, ftype='iir', axis=0)
return sig.astype(numpy.float32), framerate
def rasta_filt(x):
"""Apply RASTA filtering to the input signal.
:param x: the input audio signal to filter.
cols of x = critical bands, rows of x = frame
same for y but after filtering
default filter is single pole at 0.94
"""
x = x.T
numerator = numpy.arange(.2, -.3, -.1)
denominator = numpy.array([1, -0.94])
# Initialize the state. This avoids a big spike at the beginning
# resulting from the dc offset level in each band.
# (this is effectively what rasta/rasta_filt.c does).
# Because Matlab uses a DF2Trans implementation, we have to
# specify the FIR part to get the state right (but not the IIR part)
y = numpy.zeros(x.shape)
zf = numpy.zeros((x.shape[0], 4))
for i in range(y.shape[0]):
y[i, :4], zf[i, :4] = lfilter(numerator, 1, x[i, :4], axis=-1, zi=[0, 0, 0, 0])
# .. but don't keep any of these values, just output zero at the beginning
y = numpy.zeros(x.shape)
# Apply the full filter to the rest of the signal, append it
for i in range(y.shape[0]):
y[i, 4:] = lfilter(numerator, denominator, x[i, 4:], axis=-1, zi=zf[i, :])[0]
return y.T
def cms(features, label=None, global_mean=None):
"""Performs cepstral mean subtraction
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: a logical vector
:param global_mean: pre-computed mean to use for feature normalization if given
:return: a feature stream
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if label.sum() == 0:
mu = numpy.zeros((features.shape[1]))
if global_mean is not None:
mu = global_mean
else:
mu = numpy.mean(features[label, :], axis=0)
features -= mu
def cmvn(features, label=None, global_mean=None, global_std=None):
"""Performs mean and variance normalization
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param global_mean: pre-computed mean to use for feature normalization if given
:param global_std: pre-computed standard deviation to use for feature normalization if given
:param label: a logical verctor
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if global_mean is not None and global_std is not None:
mu = global_mean
stdev = global_std
features -= mu
features /= stdev
elif not label.sum() == 0:
mu = numpy.mean(features[label, :], axis=0)
stdev = numpy.std(features[label, :], axis=0)
features -= mu
features /= stdev
def stg(features, label=None, win=301):
"""Performs feature warping on a sliding window
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: label of selected frames to compute the Short Term Gaussianization, by default, al frames are used
:param win: size of the frame window to consider, must be an odd number to get a symetric context on left and right
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
speech_features = features[label, :]
add_a_feature = False
if win % 2 == 1:
# one feature per line
nframes, dim = numpy.shape(speech_features)
# If the number of frames is not enough for one window
if nframes < win:
# if the number of frames is not odd, duplicate the last frame
# if nframes % 2 == 1:
if not nframes % 2 == 1:
nframes += 1
add_a_feature = True
speech_features = numpy.concatenate((speech_features, [speech_features[-1, ]]))
win = nframes
# create the output feature stream
stg_features = numpy.zeros(numpy.shape(speech_features))
# Process first window
r = numpy.argsort(speech_features[:win, ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[: (win - 1) / 2] + 0.5) / win
stg_features[: (win - 1) / 2, :] = stats.norm.ppf(arg, 0, 1)
# process all following windows except the last one
for m in range(int((win - 1) / 2), int(nframes - (win - 1) / 2)):
idx = list(range(int(m - (win - 1) / 2), int(m + (win - 1) / 2 + 1)))
foo = speech_features[idx, :]
r = numpy.sum(foo < foo[(win - 1) / 2], axis=0) + 1
arg = (r - 0.5) / win
stg_features[m, :] = stats.norm.ppf(arg, 0, 1)
# Process the last window
r = numpy.argsort(speech_features[list(range(nframes - win, nframes)), ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[(win + 1) / 2: win, :] + 0.5) / win
stg_features[list(range(int(nframes - (win - 1) / 2), nframes)), ] = stats.norm.ppf(arg, 0, 1)
else:
# Raise an exception
raise Exception('Sliding window should have an odd length')
# wrapFeatures = np.copy(features)
if add_a_feature:
stg_features = stg_features[:-1]
features[label, :] = stg_features
def cep_sliding_norm(features, win=301, label=None, center=True, reduce=False):
"""
Performs a cepstal mean substitution and standard deviation normalization
in a sliding windows. MFCC is modified.
:param features: the MFCC, a numpy array
:param win: the size of the sliding windows
:param label: vad label if available
:param center: performs mean subtraction
:param reduce: performs standard deviation division
"""
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if numpy.sum(label) <= win:
if reduce:
cmvn(features, label)
else:
cms(features, label)
else:
d_win = win // 2
df = pandas.DataFrame(features[label, :])
r = df.rolling(window=win, center=True)
mean = r.mean().values
std = r.std().values
mean[0:d_win, :] = mean[d_win, :]
mean[-d_win:, :] = mean[-d_win-1, :]
std[0:d_win, :] = std[d_win, :]
std[-d_win:, :] = std[-d_win-1, :]
if center:
features[label, :] -= mean
if reduce:
features[label, :] /= std
def pre_emphasis(input_sig, pre):
"""Pre-emphasis of an audio signal.
:param input_sig: the input vector of signal to pre emphasize
:param pre: value that defines the pre-emphasis filter.
"""
if input_sig.ndim == 1:
return (input_sig - numpy.c_[input_sig[numpy.newaxis, :][..., :1],
input_sig[numpy.newaxis, :][..., :-1]].squeeze() * pre)
else:
return input_sig - numpy.c_[input_sig[..., :1], input_sig[..., :-1]] * pre
"""Generate a new array that chops the given array along the given axis
into overlapping frames.
This method has been implemented by <NAME>,
as part of the talk box toolkit
example::
segment_axis(arange(10), 4, 2)
array([[0, 1, 2, 3],
( [2, 3, 4, 5],
[4, 5, 6, 7],
[6, 7, 8, 9]])
:param a: the array to segment
:param length: the length of each frame
:param overlap: the number of array elements by which the frames should overlap
:param axis: the axis to operate on; if None, act on the flattened array
:param end: what to do with the last frame, if the array is not evenly
divisible into pieces. Options are:
- 'cut' Simply discard the extra values
- 'wrap' Copy values from the beginning of the array
- 'pad' Pad with a constant value
:param endvalue: the value to use for end='pad'
:return: a ndarray
The array is not copied unless necessary (either because it is unevenly
strided and being flattened or because end is set to 'pad' or 'wrap').
"""
if axis is None:
a = numpy.ravel(a) # may copy
axis = 0
l = a.shape[axis]
if overlap >= length:
raise ValueError("frames cannot overlap by more than 100%")
if overlap < 0 or length <= 0:
raise ValueError("overlap must be nonnegative and length must" +
"be positive")
if l < length or (l - length) % (length - overlap):
if l > length:
roundup = length + (1 + (l - length) // (length - overlap)) * (length - overlap)
rounddown = length + ((l - length) // (length - overlap)) * (length - overlap)
else:
roundup = length
rounddown = 0
assert rounddown < l < roundup
assert roundup == rounddown + (length - overlap) or (roundup == length and rounddown == 0)
a = a.swapaxes(-1, axis)
if end == 'cut':
a = a[..., :rounddown]
l = a.shape[0]
elif end in ['pad', 'wrap']: # copying will be necessary
s = list(a.shape)
s[-1] = roundup
b = numpy.empty(s, dtype=a.dtype)
b[..., :l] = a
if end == 'pad':
b[..., l:] = endvalue
elif end == 'wrap':
b[..., l:] = a[..., :roundup - l]
a = b
a = a.swapaxes(-1, axis)
if l == 0:
raise ValueError("Not enough data points to segment array " +
"in 'cut' mode; try 'pad' or 'wrap'")
assert l >= length
assert (l - length) % (length - overlap) == 0
n = 1 + (l - length) // (length - overlap)
s = a.strides[axis]
new_shape = a.shape[:axis] + (n, length) + a.shape[axis + 1:]
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
try:
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
except TypeError:
a = a.copy()
# Shape doesn't change but strides does
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
def speech_enhancement(X, Gain, NN):
"""This program is only to process the single file seperated by the silence
section if the silence section is detected, then a counter to number of
buffer is set and pre-processing is required.
Usage: SpeechENhance(wavefilename, Gain, Noise_floor)
:param X: input audio signal
:param Gain: default value is 0.9, suggestion range 0.6 to 1.4,
higher value means more subtraction or noise redcution
:param NN:
:return: a 1-dimensional array of boolean that
is True for high energy frames.
Copyright 2014 <NAME> and <NAME>
"""
if X.shape[0] < 512: # creer une exception
return X
num1 = 40 # dsiable buffer number
Alpha = 0.75 # original value is 0.9
FrameSize = 32 * 2 # 256*2
FrameShift = int(FrameSize / NN) # FrameSize/2=128
nfft = FrameSize # = FrameSize
Fmax = int(numpy.floor(nfft / 2) + 1) # 128+1 = 129
# arising hamming windows
Hamm = 1.08 * (0.54 - 0.46 * numpy.cos(2 * numpy.pi * numpy.arange(FrameSize) / (FrameSize - 1)))
y0 = numpy.zeros(FrameSize - FrameShift) # 128 zeros
Eabsn = numpy.zeros(Fmax)
Eta1 = Eabsn
###################################################################
# initial parameter for noise min
mb = numpy.ones((1 + FrameSize // 2, 4)) * FrameSize / 2 # 129x4 set four buffer * FrameSize/2
im = 0
Beta1 = 0.9024 # seems that small value is better;
pxn = numpy.zeros(1 + FrameSize // 2) # 1+FrameSize/2=129 zeros vector
###################################################################
old_absx = Eabsn
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[
numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
###################################################################
# add the pre-noise estimates
for i in range(200):
Frame += 1
fftn = fft(x * Hamm) # get its spectrum
absn = numpy.abs(fftn[0:Fmax]) # get its amplitude
# add the following part from noise estimation algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # Beta=0.9231 recursive pxn
im = (im + 1) % 40 # noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
# 0-2 vector shifted to 1 to 3
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
# over_sub_noise= oversubtraction factor
# end of noise detection algotihm
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
if In_data.shape[0] < FrameShift: # to check file is out
EOF = 1
break
else:
x[FrameSize - FrameShift:FrameSize] = In_data # shift new 128 to position 129 to FrameSize location
# end of for loop for noise estimation
# end of prenoise estimation ************************
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
X1 = numpy.zeros(X.shape)
Frame = 0
while EOF == 0:
Frame += 1
xwin = x * Hamm
fftx = fft(xwin, nfft) # FrameSize FFT
absx = numpy.abs(fftx[0:Fmax]) # Fmax=129,get amplitude of x
argx = fftx[:Fmax] / (absx + numpy.spacing(1)) # normalize x spectrum phase
absn = absx
# add the following part from rainer algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # s Beta=0.9231 recursive pxn
im = int((im + 1) % (num1 * NN / 2)) # original =40 noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
Eabsn = pn
Gaina = Gain
temp1 = Eabsn * Gaina
Eta1 = Alpha * old_absx + (1 - Alpha) * numpy.maximum(absx - temp1, 0)
new_absx = (absx * Eta1) / (Eta1 + temp1) # wiener filter
old_absx = new_absx
ffty = new_absx * argx # multiply amplitude with its normalized spectrum
y = numpy.real(numpy.fft.fftpack.ifft(numpy.concatenate((ffty,
numpy.conj(ffty[numpy.arange(Fmax - 2, 0, -1)])))))
y[:FrameSize - FrameShift] = y[:FrameSize - FrameShift] + y0
y0 = y[FrameShift:FrameSize] # keep 129 to FrameSize point samples
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
z = 2 / NN * y[:FrameShift] # left channel is the original signal
z /= 1.15
z = numpy.minimum(z, 32767)
z = numpy.maximum(z, -32768)
index0 = numpy.arange(FrameShift * (Frame - 1), FrameShift * Frame)
if not all(index0 < X1.shape[0]):
idx = 0
while (index0[idx] < X1.shape[0]) & (idx < index0.shape[0]):
X1[index0[idx]] = z[idx]
idx += 1
else:
X1[index0] = z
if In_data.shape[0] == 0:
EOF = 1
else:
x[numpy.arange(FrameSize - FrameShift, FrameSize + In_data.shape[0] - FrameShift)] = In_data
X1 = X1[X1.shape[0] - X.shape[0]:]
# }
# catch{
# }
return X1
def vad_percentil(log_energy, percent):
"""
:param log_energy:
:param percent:
:return:
"""
thr = numpy.percentile(log_energy, percent)
return log_energy > thr, thr
def vad_energy(log_energy,
distrib_nb=3,
nb_train_it=8,
flooring=0.0001, ceiling=1.0,
alpha=2):
# center and normalize the energy
log_energy = (log_energy - numpy.mean(log_energy)) / numpy.std(log_energy)
# Initialize a Mixture with 2 or 3 distributions
world = Mixture()
# set the covariance of each component to 1.0 and the mean to mu + meanIncrement
world.cst = numpy.ones(distrib_nb) / (numpy.pi / 2.0)
world.det = numpy.ones(distrib_nb)
world.mu = -2 + 4.0 * numpy.arange(distrib_nb) / (distrib_nb - 1)
world.mu = world.mu[:, numpy.newaxis]
world.invcov = numpy.ones((distrib_nb, 1))
# set equal weights for each component
world.w = numpy.ones(distrib_nb) / distrib_nb
world.cov_var_ctl = copy.deepcopy(world.invcov)
# Initialize the accumulator
accum = copy.deepcopy(world)
# Perform nbTrainIt iterations of EM
for it in range(nb_train_it):
accum._reset()
# E-step
world._expectation(accum, log_energy)
# M-step
world._maximization(accum, ceiling, flooring)
# Compute threshold
threshold = world.mu.max() - alpha * numpy.sqrt(1.0 / world.invcov[world.mu.argmax(), 0])
# Apply frame selection with the current threshold
label = log_energy > threshold
return label, threshold
def vad_snr(sig, snr, fs=16000, shift=0.01, nwin=256):
"""Select high energy frames based on the Signal to Noise Ratio
of the signal.
Input signal is expected encoded on 16 bits
:param sig: the input audio signal
:param snr: Signal to noise ratio to consider
:param fs: sampling frequency of the input signal in Hz. Default is 16000.
:param shift: shift between two frames in seconds. Default is 0.01
:param nwin: number of samples of the sliding window. Default is 256.
"""
overlap = nwin - int(shift * fs)
sig /= 32768.
sig = speech_enhancement(numpy.squeeze(sig), 1.2, 2)
# Compute Standard deviation
sig += 0.1 * numpy.random.randn(sig.shape[0])
std2 = segment_axis(sig, nwin, overlap, axis=None, end='cut', endvalue=0).T
std2 = numpy.std(std2, axis=0)
std2 = 20 * numpy.log10(std2) # convert the dB
# APPLY VAD
label = (std2 > numpy.max(std2) - snr) & (std2 > -75)
return label
def label_fusion(label, win=3):
"""Apply a morphological filtering on the label to remove isolated labels.
In case the input is a two channel label (2D ndarray of boolean of same
length) the labels of two channels are fused to remove
overlaping segments of speech.
:param label: input labels given in a 1D or 2D ndarray
:param win: parameter or the morphological filters
"""
channel_nb = len(label)
if channel_nb == 2:
overlap_label = numpy.logical_and(label[0], label[1])
label[0] = numpy.logical_and(label[0], ~overlap_label)
label[1] = numpy.logical_and(label[1], ~overlap_label)
for idx, lbl in enumerate(label):
cl = ndimage.grey_closing(lbl, size=win)
label[idx] = ndimage.grey_opening(cl, size=win)
return label
def hz2mel(f, htk=True):
"""Convert an array of frequency in Hz into mel.
:param f: frequency to convert
:return: the equivalence on the mel scale.
"""
if htk:
return 2595 * numpy.log10(1 + f / 700.)
else:
f = numpy.array(f)
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
f_0 = 0.
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = f < brkfrq
z = numpy.zeros_like(f)
# fill in parts separately
z[linpts] = (f[linpts] - f_0) / f_sp
z[~linpts] = brkpt + (numpy.log(f[~linpts] / brkfrq)) / numpy.log(logstep)
if z.shape == (1,):
return z[0]
else:
return z
def mel2hz(z, htk=True):
"""Convert an array of mel values in Hz.
:param m: ndarray of frequencies to convert in Hz.
:return: the equivalent values in Hertz.
"""
if htk:
return 700. * (10**(z / 2595.) - 1)
else:
z = numpy.array(z, dtype=float)
f_0 = 0
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = (z < brkpt)
f = numpy.zeros_like(z)
# fill in parts separately
f[linpts] = f_0 + f_sp * z[linpts]
f[~linpts] = brkfrq * numpy.exp(numpy.log(logstep) * (z[~linpts] - brkpt))
if f.shape == (1,):
return f[0]
else:
return f
def hz2bark(f):
"""
Convert frequencies (Hertz) to Bark frequencies
:param f: the input frequency
:return:
"""
return 6. * numpy.arcsinh(f / 600.)
def bark2hz(z):
"""
Converts frequencies Bark to Hertz (Hz)
:param z:
:return:
"""
return 600. * numpy.sinh(z / 6.)
def compute_delta(features,
win=3,
method='filter',
filt=numpy.array([.25, .5, .25, 0, -.25, -.5, -.25])):
"""features is a 2D-ndarray each row of features is a a frame
:param features: the feature frames to compute the delta coefficients
:param win: parameter that set the length of the computation window.
The size of the window is (win x 2) + 1
:param method: method used to compute the delta coefficients
can be diff or filter
:param filt: definition of the filter to use in "filter" mode, default one
is similar to SPRO4: filt=numpy.array([.2, .1, 0, -.1, -.2])
:return: the delta coefficients computed on the original features.
"""
# First and last features are appended to the begining and the end of the
# stream to avoid border effect
x = numpy.zeros((features.shape[0] + 2 * win, features.shape[1]), dtype=numpy.float32)
x[:win, :] = features[0, :]
x[win:-win, :] = features
x[-win:, :] = features[-1, :]
delta = numpy.zeros(x.shape, dtype=numpy.float32)
if method == 'diff':
filt = numpy.zeros(2 * win + 1, dtype=numpy.float32)
filt[0] = -1
filt[-1] = 1
for i in range(features.shape[1]):
delta[:, i] = numpy.convolve(features[:, i], filt)
return delta[win:-win, :]
def pca_dct(cep, left_ctx=12, right_ctx=12, p=None):
"""Apply DCT PCA as in [McLaren 2015] paper:
<NAME> and <NAME>, 'Improved Speaker Recognition
Using DCT coefficients as features' in ICASSP, 2015
A 1D-dct is applied to the cepstral coefficients on a temporal
sliding window.
The resulting matrix is then flatten and reduced by using a Principal
Component Analysis.
:param cep: a matrix of cepstral cefficients, 1 line per feature vector
:param left_ctx: number of frames to consider for left context
:param right_ctx: number of frames to consider for right context
:param p: a PCA matrix trained on a developpment set to reduce the
dimension of the features. P is a portait matrix
"""
y = numpy.r_[numpy.resize(cep[0, :], (left_ctx, cep.shape[1])),
cep,
numpy.resize(cep[-1, :], (right_ctx, cep.shape[1]))]
ceps = framing(y, win_size=left_ctx + 1 + right_ctx).transpose(0, 2, 1)
dct_temp = (dct_basis(left_ctx + 1 + right_ctx, left_ctx + 1 + right_ctx)).T
if p is None:
p = numpy.eye(dct_temp.shape[0] * cep.shape[1], dtype=numpy.float32)
return (numpy.dot(ceps.reshape(-1, dct_temp.shape[0]),
dct_temp).reshape(ceps.shape[0], -1)).dot(p)
def shifted_delta_cepstral(cep, d=1, p=3, k=7):
"""
Compute the Shifted-Delta-Cepstral features for language identification
:param cep: matrix of feature, 1 vector per line
:param d: represents the time advance and delay for the delta computation
:param k: number of delta-cepstral blocks whose delta-cepstral
coefficients are stacked to form the final feature vector
:param p: time shift between consecutive blocks.
return: cepstral coefficient concatenated with shifted deltas
"""
y = numpy.r_[numpy.resize(cep[0, :], (d, cep.shape[1])),
cep,
numpy.resize(cep[-1, :], (k * 3 + d, cep.shape[1]))]
delta = compute_delta(y, win=d, method='diff')
sdc = numpy.empty((cep.shape[0], cep.shape[1] * k))
idx = numpy.zeros(delta.shape[0], dtype='bool')
for ii in range(k):
idx[d + ii * p] = True
for ff in range(len(cep)):
sdc[ff, :] = delta[idx, :].reshape(1, -1)
idx = numpy.roll(idx, 1)
return numpy.hstack((cep, sdc))
def trfbank(fs, nfft, lowfreq, maxfreq, nlinfilt, nlogfilt, midfreq=1000):
"""Compute triangular filterbank for cepstral coefficient computation.
:param fs: sampling frequency of the original signal.
:param nfft: number of points for the Fourier Transform
:param lowfreq: lower limit of the frequency band filtered
:param maxfreq: higher limit of the frequency band filtered
:param nlinfilt: number of linear filters to use in low frequencies
:param nlogfilt: number of log-linear filters to use in high frequencies
:param midfreq: frequency boundary between linear and log-linear filters
:return: the filter bank and the central frequencies of each filter
"""
# Total number of filters
nfilt = nlinfilt + nlogfilt
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
frequences = numpy.zeros(nfilt + 2, dtype=numpy.float32)
if nlogfilt == 0:
linsc = (maxfreq - lowfreq) / (nlinfilt + 1)
frequences[:nlinfilt + 2] = lowfreq + numpy.arange(nlinfilt + 2) * linsc
elif nlinfilt == 0:
low_mel = hz2mel(lowfreq)
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2)
# mels[nlinfilt:]
melsc = (max_mel - low_mel) / (nfilt + 1)
mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc
# Back to the frequency domain
frequences = mel2hz(mels)
else:
# Compute linear filters on [0;1000Hz]
linsc = (min([midfreq, maxfreq]) - lowfreq) / (nlinfilt + 1)
frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc
# Compute log-linear filters on [1000;maxfreq]
low_mel = hz2mel(min([1000, maxfreq]))
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2, dtype=numpy.float32)
melsc = (max_mel - low_mel) / (nlogfilt + 1)
# Verify that mel2hz(melsc)>linsc
while mel2hz(melsc) < linsc:
# in this case, we add a linear filter
nlinfilt += 1
nlogfilt -= 1
frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc
low_mel = hz2mel(frequences[nlinfilt - 1] + 2 * linsc)
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2, dtype=numpy.float32)
melsc = (max_mel - low_mel) / (nlogfilt + 1)
mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc
# Back to the frequency domain
frequences[nlinfilt:] = mel2hz(mels)
heights = 2. / (frequences[2:] - frequences[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nfilt, int(numpy.floor(nfft / 2)) + 1), dtype=numpy.float32)
# FFT bins (in Hz)
n_frequences = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = frequences[i]
cen = frequences[i + 1]
hi = frequences[i + 2]
lid = numpy.arange(numpy.floor(low * nfft / fs) + 1, numpy.floor(cen * nfft / fs) + 1, dtype=numpy.int)
left_slope = heights[i] / (cen - low)
rid = numpy.arange(numpy.floor(cen * nfft / fs) + 1,
min(numpy.floor(hi * nfft / fs) + 1, nfft), dtype=numpy.int)
right_slope = heights[i] / (hi - cen)
fbank[i][lid] = left_slope * (n_frequences[lid] - low)
fbank[i][rid[:-1]] = right_slope * (hi - n_frequences[rid[:-1]])
return fbank, frequences
def mel_filter_bank(fs, nfft, lowfreq, maxfreq, widest_nlogfilt, widest_lowfreq, widest_maxfreq,):
"""Compute triangular filterbank for cepstral coefficient computation.
:param fs: sampling frequency of the original signal.
:param nfft: number of points for the Fourier Transform
:param lowfreq: lower limit of the frequency band filtered
:param maxfreq: higher limit of the frequency band filtered
:param widest_nlogfilt: number of log filters
:param widest_lowfreq: lower frequency of the filter bank
:param widest_maxfreq: higher frequency of the filter bank
:param widest_maxfreq: higher frequency of the filter bank
:return: the filter bank and the central frequencies of each filter
"""
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
widest_freqs = numpy.zeros(widest_nlogfilt + 2, dtype=numpy.float32)
low_mel = hz2mel(widest_lowfreq)
max_mel = hz2mel(widest_maxfreq)
mels = numpy.zeros(widest_nlogfilt+2)
melsc = (max_mel - low_mel) / (widest_nlogfilt + 1)
mels[:widest_nlogfilt + 2] = low_mel + numpy.arange(widest_nlogfilt + 2) * melsc
# Back to the frequency domain
widest_freqs = mel2hz(mels)
# Select filters in the narrow band
sub_band_freqs = numpy.array([fr for fr in widest_freqs if lowfreq <= fr <= maxfreq], dtype=numpy.float32)
heights = 2./(sub_band_freqs[2:] - sub_band_freqs[0:-2])
nfilt = sub_band_freqs.shape[0] - 2
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nfilt, numpy.floor(nfft/2)+1), dtype=numpy.float32)
# FFT bins (in Hz)
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = sub_band_freqs[i]
cen = sub_band_freqs[i+1]
hi = sub_band_freqs[i+2]
lid = numpy.arange(numpy.floor(low * nfft / fs) + 1, numpy.floor(cen * nfft / fs) + 1, dtype=numpy.int)
left_slope = heights[i] / (cen - low)
rid = numpy.arange(numpy.floor(cen * nfft / fs) + 1, min(numpy.floor(hi * nfft / fs) + 1,
nfft), dtype=numpy.int)
right_slope = heights[i] / (hi - cen)
fbank[i][lid] = left_slope * (nfreqs[lid] - low)
fbank[i][rid[:-1]] = right_slope * (hi - nfreqs[rid[:-1]])
return fbank, sub_band_freqs
def power_spectrum(input_sig,
fs=8000,
win_time=0.025,
shift=0.01,
prefac=0.97):
"""
Compute the power spectrum of the signal.
:param input_sig:
:param fs:
:param win_time:
:param shift:
:param prefac:
:return:
"""
window_length = int(round(win_time * fs))
overlap = window_length - int(shift * fs)
framed = framing(input_sig, window_length, win_shift=window_length-overlap).copy()
# Pre-emphasis filtering is applied after framing to be consistent with stream processing
framed = pre_emphasis(framed, prefac)
l = framed.shape[0]
n_fft = 2 ** int(numpy.ceil(numpy.log2(window_length)))
# Windowing has been changed to hanning which is supposed to have less noisy sidelobes
# ham = numpy.hamming(window_length)
window = numpy.hanning(window_length)
spec = numpy.ones((l, int(n_fft / 2) + 1), dtype=numpy.float32)
log_energy = numpy.log((framed**2).sum(axis=1) + 1e-5)
dec = 500000
start = 0
stop = min(dec, l)
while start < l:
ahan = framed[start:stop, :] * window
mag = numpy.fft.rfft(ahan, n_fft, axis=-1)
spec[start:stop, :] = mag.real**2 + mag.imag**2
start = stop
stop = min(stop + dec, l)
return spec, log_energy
def mfcc(input_sig,
lowfreq=100, maxfreq=8000,
nlinfilt=0, nlogfilt=24,
nwin=0.025,
fs=16000,
nceps=13,
shift=0.01,
get_spec=False,
get_mspec=False,
prefac=0.97):
"""Compute Mel Frequency Cepstral Coefficients.
:param input_sig: input signal from which the coefficients are computed.
Input audio is supposed to be RAW PCM 16bits
:param lowfreq: lower limit of the frequency band filtered.
Default is 100Hz.
:param maxfreq: higher limit of the frequency band filtered.
Default is 8000Hz.
:param nlinfilt: number of linear filters to use in low frequencies.
Default is 0.
:param nlogfilt: number of log-linear filters to use in high frequencies.
Default is 24.
:param nwin: length of the sliding window in seconds
Default is 0.025.
:param fs: sampling frequency of the original signal. Default is 16000Hz.
:param nceps: number of cepstral coefficients to extract.
Default is 13.
:param shift: shift between two analyses. Default is 0.01 (10ms).
:param get_spec: boolean, if true returns the spectrogram
:param get_mspec: boolean, if true returns the output of the filter banks
:param prefac: pre-emphasis filter value
:return: the cepstral coefficients in a ndaray as well as
the Log-spectrum in the mel-domain in a ndarray.
.. note:: MFCC are computed as follows:
- Pre-processing in time-domain (pre-emphasizing)
- Compute the spectrum amplitude by windowing with a Hamming window
- Filter the signal in the spectral domain with a triangular filter-bank, whose filters are approximatively
linearly spaced on the mel scale, and have equal bandwith in the mel scale
- Compute the DCT of the log-spectrom
- Log-energy is returned as first coefficient of the feature vector.
For more details, refer to [Davis80]_.
"""
# Compute power spectrum
spec, log_energy = power_spectrum(input_sig,
fs,
win_time=nwin,
shift=shift,
prefac=prefac)
# Filter the spectrum through the triangle filter-bank
n_fft = 2 ** int(numpy.ceil(numpy.log2(int(round(nwin * fs)))))
fbank = trfbank(fs, n_fft, lowfreq, maxfreq, nlinfilt, nlogfilt)[0]
mspec = numpy.log(numpy.dot(spec, fbank.T) + 1e-5) # A tester avec log10 et log
# Use the DCT to 'compress' the coefficients (spectrum -> cepstrum domain)
# The C0 term is removed as it is the constant term
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:, 1:nceps + 1]
lst = list()
lst.append(ceps)
lst.append(log_energy)
if get_spec:
lst.append(spec)
else:
lst.append(None)
del spec
if get_mspec:
lst.append(mspec)
else:
lst.append(None)
del mspec
return lst
def fft2barkmx(n_fft, fs, nfilts=0, width=1., minfreq=0., maxfreq=8000):
"""
Generate a matrix of weights to combine FFT bins into Bark
bins. n_fft defines the source FFT size at sampling rate fs.
Optional nfilts specifies the number of output bands required
(else one per bark), and width is the constant width of each
band in Bark (default 1).
While wts has n_fft columns, the second half are all zero.
Hence, Bark spectrum is fft2barkmx(n_fft,fs) * abs(fft(xincols, n_fft));
2004-09-05 <EMAIL> based on rastamat/audspec.m
:param n_fft: the source FFT size at sampling rate fs
:param fs: sampling rate
:param nfilts: number of output bands required
:param width: constant width of each band in Bark (default 1)
:param minfreq:
:param maxfreq:
:return: a matrix of weights to combine FFT bins into Bark bins
"""
maxfreq = min(maxfreq, fs / 2.)
min_bark = hz2bark(minfreq)
nyqbark = hz2bark(maxfreq) - min_bark
if nfilts == 0:
nfilts = numpy.ceil(nyqbark) + 1
wts = numpy.zeros((nfilts, n_fft))
# bark per filt
step_barks = nyqbark / (nfilts - 1)
# Frequency of each FFT bin in Bark
binbarks = hz2bark(numpy.arange(n_fft / 2 + 1) * fs / n_fft)
for i in range(nfilts):
f_bark_mid = min_bark + i * step_barks
# Linear slopes in log-space (i.e. dB) intersect to trapezoidal window
lof = (binbarks - f_bark_mid - 0.5)
hif = (binbarks - f_bark_mid + 0.5)
wts[i, :n_fft // 2 + 1] = 10 ** (numpy.minimum(numpy.zeros_like(hif), numpy.minimum(hif, -2.5 * lof) / width))
return wts
def fft2melmx(n_fft,
fs=8000,
nfilts=0,
width=1.,
minfreq=0,
maxfreq=4000,
htkmel=False,
constamp=False):
"""
Generate a matrix of weights to combine FFT bins into Mel
bins. n_fft defines the source FFT size at sampling rate fs.
Optional nfilts specifies the number of output bands required
(else one per "mel/width"), and width is the constant width of each
band relative to standard Mel (default 1).
While wts has n_fft columns, the second half are all zero.
Hence, Mel spectrum is fft2melmx(n_fft,fs)*abs(fft(xincols,n_fft));
minfreq is the frequency (in Hz) of the lowest band edge;
default is 0, but 133.33 is a common standard (to skip LF).
maxfreq is frequency in Hz of upper edge; default fs/2.
You can exactly duplicate the mel matrix in Slaney's mfcc.m
as fft2melmx(512, 8000, 40, 1, 133.33, 6855.5, 0);
htkmel=1 means use HTK's version of the mel curve, not Slaney's.
constamp=1 means make integration windows peak at 1, not sum to 1.
frqs returns bin center frqs.
% 2004-09-05 <EMAIL> based on fft2barkmx
:param n_fft:
:param fs:
:param nfilts:
:param width:
:param minfreq:
:param maxfreq:
:param htkmel:
:param constamp:
:return:
"""
maxfreq = min(maxfreq, fs / 2.)
if nfilts == 0:
nfilts = numpy.ceil(hz2mel(maxfreq, htkmel) / 2.)
wts = numpy.zeros((nfilts, n_fft))
# Center freqs of each FFT bin
fftfrqs = numpy.arange(n_fft / 2 + 1) / n_fft * fs
# 'Center freqs' of mel bands - uniformly spaced between limits
minmel = hz2mel(minfreq, htkmel)
maxmel = hz2mel(maxfreq, htkmel)
binfrqs = mel2hz(minmel + numpy.arange(nfilts + 2) / (nfilts + 1) * (maxmel - minmel), htkmel)
for i in range(nfilts):
_fs = binfrqs[i + numpy.arange(3, dtype=int)]
# scale by width
_fs = _fs[1] + width * (_fs - _fs[1])
# lower and upper slopes for all bins
loslope = (fftfrqs - _fs[0]) / (_fs[1] - __fs[0])
hislope = (_fs[2] - fftfrqs)/(_fs[2] - _fs[1])
wts[i, 1 + numpy.arange(n_fft//2 + 1)] =numpy.maximum(numpy.zeros_like(loslope),numpy.minimum(loslope, hislope))
if not constamp:
# Slaney-style mel is scaled to be approx constant E per channel
wts = numpy.dot(numpy.diag(2. / (binfrqs[2 + numpy.arange(nfilts)] - binfrqs[numpy.arange(nfilts)])) , wts)
# Make sure 2nd half of FFT is zero
wts[:, n_fft // 2 + 1: n_fft] = 0
return wts, binfrqs
def audspec(power_spectrum,
fs=16000,
nfilts=None,
fbtype='bark',
minfreq=0,
maxfreq=8000,
sumpower=True,
bwidth=1.):
"""
:param power_spectrum:
:param fs:
:param nfilts:
:param fbtype:
:param minfreq:
:param maxfreq:
:param sumpower:
:param bwidth:
:return:
"""
if nfilts is None:
nfilts = int(numpy.ceil(hz2bark(fs / 2)) + 1)
if not fs == 16000:
maxfreq = min(fs / 2, maxfreq)
nframes, nfreqs = power_spectrum.shape
n_fft = (nfreqs -1 ) * 2
if fbtype == 'bark':
wts = fft2barkmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq)
elif fbtype == 'mel':
wts = fft2melmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq)
elif fbtype == 'htkmel':
wts = fft2melmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq, True, True)
elif fbtype == 'fcmel':
wts = fft2melmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq, True, False)
else:
print('fbtype {} not recognized'.format(fbtype))
wts = wts[:, :nfreqs]
if sumpower:
audio_spectrum = power_spectrum.dot(wts.T)
else:
audio_spectrum = numpy.dot(numpy.sqrt(power_spectrum), wts.T)**2
return audio_spectrum, wts
def postaud(x, fmax, fbtype='bark', broaden=0):
"""
do loudness equalization and cube root compression
:param x:
:param fmax:
:param fbtype:
:param broaden:
:return:
"""
nframes, nbands = x.shape
# Include frequency points at extremes, discard later
nfpts = nbands + 2 * broaden
if fbtype == 'bark':
bandcfhz = bark2hz(numpy.linspace(0, hz2bark(fmax), num=nfpts))
elif fbtype == 'mel':
bandcfhz = mel2hz(numpy.linspace(0, hz2bark(fmax), num=nfpts))
elif fbtype == 'htkmel' or fbtype == 'fcmel':
bandcfhz = mel2hz(numpy.linspace(0, hz2mel(fmax,1), num=nfpts),1)
else:
print('unknown fbtype {}'.format(fbtype))
# Remove extremal bands (the ones that will be duplicated)
bandcfhz = bandcfhz[broaden:(nfpts - broaden)]
# Hynek's magic equal-loudness-curve formula
fsq = bandcfhz ** 2
ftmp = fsq + 1.6e5
eql = ((fsq / ftmp) ** 2) * ((fsq + 1.44e6) / (fsq + 9.61e6))
# weight the critical bands
z = numpy.matlib.repmat(eql.T,nframes,1) * x
# cube root compress
z = z ** .33
# replicate first and last band (because they are unreliable as calculated)
if broaden == 1:
y = z[:, numpy.hstack((0,numpy.arange(nbands), nbands - 1))]
else:
y = z[:, numpy.hstack((1,numpy.arange(1, nbands - 1), nbands - 2))]
return y, eql
def dolpc(x, model_order=8):
"""
compute autoregressive model from spectral magnitude samples
:param x:
:param model_order:
:return:
"""
nframes, nbands = x.shape
r = numpy.real(numpy.fft.ifft(numpy.hstack((x,x[:,numpy.arange(nbands-2,0,-1)]))))
# First half only
r = r[:, :nbands]
# Find LPC coeffs by Levinson-Durbin recursion
y_lpc = numpy.ones((r.shape[0], model_order + 1))
for ff in range(r.shape[0]):
y_lpc[ff, 1:], e, _ = levinson(r[ff, :-1].T, order=model_order, allow_singularity=True)
# Normalize each poly by gain
y_lpc[ff, :] /= e
return y_lpc
def lpc2cep(a, nout):
"""
Convert the LPC 'a' coefficients in each column of lpcas
into frames of cepstra.
nout is number of cepstra to produce, defaults to size(lpcas,1)
2003-04-11 <EMAIL>
:param a:
:param nout:
:return:
"""
ncol , nin = a.shape
order = nin - 1
if nout is None:
nout = order + 1
c = numpy.zeros((ncol, nout))
# First cep is log(Error) from Durbin
c[:, 0] = -numpy.log(a[:, 0])
# Renormalize lpc A coeffs
a /= numpy.tile(a[:, 0][:, None], (1, nin))
for n in range(1, nout):
sum = 0
for m in range(1, n):
sum += (n - m) * a[:, m] * c[:, n - m]
c[:, n] = -(a[:, n] + sum / n)
return c
def lpc2spec(lpcas, nout=17):
"""
Convert LPC coeffs back into spectra
nout is number of freq channels, default 17 (i.e. for 8 kHz)
:param lpcas:
:param nout:
:return:
"""
[cols, rows] = lpcas.shape
order = rows - 1
gg = lpcas[:, 0]
aa = lpcas / numpy.tile(gg, (rows,1)).T
# Calculate the actual z-plane polyvals: nout points around unit circle
zz = numpy.exp((-1j * numpy.pi / (nout - 1)) * numpy.outer(numpy.arange(nout).T, numpy.arange(order + 1)))
# Actual polyvals, in power (mag^2)
features = ( 1./numpy.abs(aa.dot(zz.T))**2) / numpy.tile(gg, (nout, 1)).T
F = numpy.zeros((cols, rows-1))
M = numpy.zeros((cols, rows-1))
for c in range(cols):
aaa = aa[c, :]
rr = numpy.roots(aaa)
ff = numpy.angle(rr.T)
zz = numpy.exp(1j * numpy.outer(ff, numpy.arange(len(aaa))))
mags = numpy.sqrt(((1./numpy.abs(zz.dot(aaa)))**2)/gg[c])
ix = numpy.argsort(ff)
keep = ff[ix] > 0
ix = ix[keep]
F[c, numpy.arange(len(ix))] = ff[ix]
M[c, numpy.arange(len(ix))] = mags[ix]
F = F[:, F.sum(axis=0) != 0]
M = M[:, M.sum(axis=0) != 0]
return features, F, M
def spec2cep(spec, ncep=13, type=2):
"""
Calculate cepstra from spectral samples (in columns of spec)
Return ncep cepstral rows (defaults to 9)
This one does type II dct, or type I if type is specified as 1
dctm returns the DCT matrix that spec was multiplied by to give cep.
:param spec:
:param ncep:
:param type:
:return:
"""
nrow, ncol = spec.shape
# Make the DCT matrix
dctm = numpy.zeros(ncep, nrow);
#if type == 2 || type == 3
# # this is the orthogonal one, the one you want
# for i = 1:ncep
# dctm(i,:) = cos((i-1)*[1:2:(2*nrow-1)]/(2*nrow)*pi) * sqrt(2/nrow);
# if type == 2
# # make it unitary! (but not for HTK type 3)
# dctm(1,:) = dctm(1,:)/sqrt(2);
#elif type == 4: # type 1 with implicit repeating of first, last bins
# """
# Deep in the heart of the rasta/feacalc code, there is the logic
# that the first and last auditory bands extend beyond the edge of
# the actual spectra, and they are thus copied from their neighbors.
# Normally, we just ignore those bands and take the 19 in the middle,
# but when feacalc calculates mfccs, it actually takes the cepstrum
# over the spectrum *including* the repeated bins at each end.
# Here, we simulate 'repeating' the bins and an nrow+2-length
# spectrum by adding in extra DCT weight to the first and last
# bins.
# """
# for i = 1:ncep
# dctm(i,:) = cos((i-1)*[1:nrow]/(nrow+1)*pi) * 2;
# # Add in edge points at ends (includes fixup scale)
# dctm(i,1) = dctm(i,1) + 1;
# dctm(i,nrow) = dctm(i,nrow) + ((-1)^(i-1));
# dctm = dctm / (2*(nrow+1));
#else % dpwe type 1 - same as old spec2cep that expanded & used fft
# for i = 1:ncep
# dctm(i,:) = cos((i-1)*[0:(nrow-1)]/(nrow-1)*pi) * 2 / (2*(nrow-1));
# dctm(:,[1 nrow]) = dctm(:, [1 nrow])/2;
#cep = dctm*log(spec);
return None, None, None
def lifter(x, lift=0.6, invs=False):
"""
Apply lifter to matrix of cepstra (one per column)
lift = exponent of x i^n liftering
or, as a negative integer, the length of HTK-style sin-curve liftering.
If inverse == 1 (default 0), undo the liftering.
:param x:
:param lift:
:param invs:
:return:
"""
nfrm , ncep = x.shape
if lift == 0:
y = x
else:
if lift > 0:
if lift > 10:
print('Unlikely lift exponent of {} did you mean -ve?'.format(lift))
liftwts = numpy.hstack((1, numpy.arange(1, ncep)**lift))
elif lift < 0:
# Hack to support HTK liftering
L = float(-lift)
if (L != numpy.round(L)):
print('HTK liftering value {} must be integer'.format(L))
liftwts = numpy.hstack((1, 1 + L/2*numpy.sin(numpy.arange(1, ncep) * numpy.pi / L)))
if invs:
liftwts = 1 / liftwts
y = x.dot(numpy.diag(liftwts))
return y
def plp(input_sig,
nwin=0.025,
fs=16000,
plp_order=13,
shift=0.01,
get_spec=False,
get_mspec=False,
prefac=0.97,
rasta=True):
"""
output is matrix of features, row = feature, col = frame
% fs is sampling rate of samples, defaults to 8000
% dorasta defaults to 1; if 0, just calculate PLP
% modelorder is order of PLP model, defaults to 8. 0 -> no PLP
:param input_sig:
:param fs: sampling rate of samples default is 8000
:param rasta: default is True, if False, juste compute PLP
:param model_order: order of the PLP model, default is 8, 0 means no PLP
:return: matrix of features, row = features, column are frames
"""
plp_order -= 1
# first compute power spectrum
powspec, log_energy = power_spectrum(input_sig, fs, nwin, shift, prefac)
# next group to critical bands
audio_spectrum = audspec(powspec, fs)[0]
nbands = audio_spectrum.shape[0]
if rasta:
# put in log domain
nl_aspectrum = numpy.log(audio_spectrum)
# next do rasta filtering
ras_nl_aspectrum = rasta_filt(nl_aspectrum)
# do inverse log
audio_spectrum = numpy.exp(ras_nl_aspectrum)
# do final auditory compressions
post_spectrum = postaud(audio_spectrum, fs / 2.)[0]
if plp_order > 0:
# LPC analysis
lpcas = dolpc(post_spectrum, plp_order)
# convert lpc to cepstra
cepstra = lpc2cep(lpcas, plp_order + 1)
# .. or to spectra
spectra, F, M = lpc2spec(lpcas, nbands)
else:
# No LPC smoothing of spectrum
spectra = post_spectrum
cepstra = spec2cep(spectra)
cepstra = lifter(cepstra, 0.6)
lst = list()
lst.append(cepstra)
lst.append(log_energy)
if get_spec:
lst.append(powspec)
else:
lst.append(None)
del powspec
if get_mspec:
lst.append(post_spectrum)
else:
lst.append(None)
del post_spectrum
return lst
def framing(sig, win_size, win_shift=1, context=(0, 0), pad='zeros'):
"""
:param sig: input signal, can be mono or multi dimensional
:param win_size: size of the window in term of samples
:param win_shift: shift of the sliding window in terme of samples
:param context: tuple of left and right context
:param pad: can be zeros or edge
"""
dsize = sig.dtype.itemsize
if sig.ndim == 1:
sig = sig[:, numpy.newaxis]
# Manage padding
c = (context, ) + (sig.ndim - 1) * ((0, 0), )
_win_size = win_size + sum(context)
shape = (int((sig.shape[0] - win_size) / win_shift) + 1, 1, _win_size, sig.shape[1])
strides = tuple(map(lambda x: x * dsize, [win_shift * sig.shape[1], 1, sig.shape[1], 1]))
if pad == 'zeros':
return numpy.lib.stride_tricks.as_strided(numpy.lib.pad(sig, c, 'constant', constant_values=(0,)),
shape=shape,
strides=strides).squeeze()
elif pad == 'edge':
return numpy.lib.stride_tricks.as_strided(numpy.lib.pad(sig, c, 'edge'),
shape=shape,
strides=strides).squeeze()
def dct_basis(nbasis, length):
"""
:param nbasis: number of CT coefficients to keep
:param length: length of the matrix to process
:return: a basis of DCT coefficients
"""
return scipy.fftpack.idct(numpy.eye(nbasis, length), norm='ortho')
def levinson(r, order=None, allow_singularity=False):
r"""Levinson-Durbin recursion.
Find the coefficients of a length(r)-1 order autoregressive linear process
:param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation)
:param order: requested order of the autoregressive coefficients. default is N.
:param allow_singularity: false by default. Other implementations may be True (e.g., octave)
:return:
* the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)`
* the prediction errors
* the `N` reflections coefficients values
This algorithm solves the set of complex linear simultaneous equations
using Levinson algorithm.
.. math::
\bold{T}_M \left( \begin{array}{c} 1 \\ \bold{a}_M \end{array} \right) =
\left( \begin{array}{c} \rho_M \\ \bold{0}_M \end{array} \right)
where :math:`\bold{T}_M` is a Hermitian Toeplitz matrix with elements
:math:`T_0, T_1, \dots ,T_M`.
.. note:: Solving this equations by Gaussian elimination would
require :math:`M^3` operations whereas the levinson algorithm
requires :math:`M^2+M` additions and :math:`M^2+M` multiplications.
This is equivalent to solve the following symmetric Toeplitz system of
linear equations
.. math::
\left( \begin{array}{cccc}
r_1 & r_2^* & \dots & r_{n}^*\\
r_2 & r_1^* & \dots & r_{n-1}^*\\
\dots & \dots & \dots & \dots\\
r_n & \dots & r_2 & r_1 \end{array} \right)
\left( \begin{array}{cccc}
a_2\\
a_3 \\
\dots \\
a_{N+1} \end{array} \right)
=
\left( \begin{array}{cccc}
-r_2\\
-r_3 \\
\dots \\
-r_{N+1} \end{array} \right)
where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and
:math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically
a vector of autocorrelation coefficients where lag 0 is the first
element :math:`r_1`.
.. doctest::
>>> import numpy; from spectrum import LEVINSON
>>> T = numpy.array([3., -2+0.5j, .7-1j])
>>> a, e, k = LEVINSON(T)
"""
#from numpy import isrealobj
T0 = numpy.real(r[0])
T = r[1:]
M = len(T)
if order is None:
M = len(T)
else:
assert order <= M, 'order must be less than size of the input data'
M = order
realdata = numpy.isrealobj(r)
if realdata is True:
A = numpy.zeros(M, dtype=float)
ref = numpy.zeros(M, dtype=float)
else:
A = numpy.zeros(M, dtype=complex)
ref = numpy.zeros(M, dtype=complex)
P = T0
for k in range(M):
save = T[k]
if k == 0:
temp = -save / P
else:
#save += sum([A[j]*T[k-j-1] for j in range(0,k)])
for j in range(0, k):
save = save + A[j] * T[k-j-1]
temp = -save / P
if realdata:
P = P * (1. - temp**2.)
else:
P = P * (1. - (temp.real**2+temp.imag**2))
if (P <= 0).any() and allow_singularity==False:
raise ValueError("singular matrix")
A[k] = temp
ref[k] = temp # save reflection coeff at each step
if k == 0:
continue
khalf = (k+1)//2
if realdata is True:
for j in range(0, khalf):
kj = k-j-1
save = A[j]
A[j] = save + temp * A[kj]
if j != kj:
A[kj] += temp*save
else:
for j in range(0, khalf):
kj = k-j-1
save = A[j]
A[j] = save + temp * A[kj].conjugate()
if j != kj:
A[kj] = A[kj] + temp * save.conjugate()
return A, P, ref
def sum_log_probabilities(lp):
"""Sum log probabilities in a secure manner to avoid extreme values
:param lp: numpy array of log-probabilities to sum
"""
pp_max = numpy.max(lp, axis=1)
log_lk = pp_max + numpy.log(numpy.sum(numpy.exp((lp.transpose() - pp_max).T), axis=1))
ind = ~numpy.isfinite(pp_max)
if sum(ind) != 0:
log_lk[ind] = pp_max[ind]
pp = numpy.exp((lp.transpose() - log_lk).transpose())
llk = log_lk.sum()
return pp, llk
class Mixture(object):
"""
A class for Gaussian Mixture Model storage.
For more details about Gaussian Mixture Models (GMM) you can refer to
[Bimbot04]_.
:attr w: array of weight parameters
:attr mu: ndarray of mean parameters, each line is one distribution
:attr invcov: ndarray of inverse co-variance parameters, 2-dimensional
for diagonal co-variance distribution 3-dimensional for full co-variance
:attr invchol: 3-dimensional ndarray containing upper cholesky
decomposition of the inverse co-variance matrices
:attr cst: array of constant computed for each distribution
:attr det: array of determinant for each distribution
"""
@staticmethod
def read_alize(file_name):
"""
:param file_name:
:return:
"""
"""Read a Mixture in alize raw format
:param mixtureFileName: name of the file to read from
"""
mixture = Mixture()
with open(file_name, 'rb') as f:
distrib_nb = struct.unpack("I", f.read(4))[0]
vect_size = struct.unpack("<I", f.read(4))[0]
# resize all attributes
mixture.w = numpy.zeros(distrib_nb, "d")
mixture.invcov = numpy.zeros((distrib_nb, vect_size), "d")
mixture.mu = numpy.zeros((distrib_nb, vect_size), "d")
mixture.cst = numpy.zeros(distrib_nb, "d")
mixture.det = numpy.zeros(distrib_nb, "d")
for d in range(distrib_nb):
mixture.w[d] = struct.unpack("<d", f.read(8))[0]
for d in range(distrib_nb):
mixture.cst[d] = struct.unpack("d", f.read(8))[0]
mixture.det[d] = struct.unpack("d", f.read(8))[0]
f.read(1)
for c in range(vect_size):
mixture.invcov[d, c] = struct.unpack("d", f.read(8))[0]
for c in range(vect_size):
mixture.mu[d, c] = struct.unpack("d", f.read(8))[0]
mixture._compute_all()
return mixture
@staticmethod
def read_htk(filename, begin_hmm=False, state2=False):
"""Read a Mixture in HTK format
:param filename: name of the file to read from
:param begin_hmm: boolean
:param state2: boolean
"""
mixture = Mixture()
with open(filename, 'rb') as f:
lines = [line.rstrip() for line in f]
distrib = 0
vect_size = 0
for i in range(len(lines)):
if lines[i] == '':
break
w = lines[i].split()
if w[0] == '<NUMMIXES>':
distrib_nb = int(w[1])
mixture.w.resize(distrib_nb)
mixture.cst.resize(distrib_nb)
mixture.det.resize(distrib_nb)
if w[0] == '<BEGINHMM>':
begin_hmm = True
if w[0] == '<STATE>':
state2 = True
if begin_hmm & state2:
if w[0].upper() == '<MIXTURE>':
distrib = int(w[1]) - 1
mixture.w[distrib] = numpy.double(w[2])
elif w[0].upper() == '<MEAN>':
if vect_size == 0:
vect_size = int(w[1])
mixture.mu.resize(distrib_nb, vect_size)
i += 1
mixture.mu[distrib, :] = numpy.double(lines[i].split())
elif w[0].upper() == '<VARIANCE>':
if mixture.invcov.shape[0] == 0:
vect_size = int(w[1])
mixture.invcov.resize(distrib_nb, vect_size)
i += 1
C = numpy.double(lines[i].split())
mixture.invcov[distrib, :] = 1 / C
elif w[0].upper() == '<INVCOVAR>':
raise Exception("we don't manage full covariance model")
elif w[0].upper() == '<GCONST>':
mixture.cst[distrib] = numpy.exp(-.05 * numpy.double(w[1]))
mixture._compute_all()
return mixture
def __init__(self,
mixture_file_name='',
name='empty'):
"""Initialize a Mixture from a file or as an empty Mixture.
:param mixture_file_name: name of the file to read from, if empty, initialize
an empty mixture
"""
self.w = numpy.array([])
self.mu = numpy.array([])
self.invcov = numpy.array([])
self.invchol = numpy.array([])
self.cov_var_ctl = numpy.array([])
self.cst = numpy.array([])
self.det = numpy.array([])
self.name = name
self.A = 0
if mixture_file_name != '':
self.read(mixture_file_name)
def __add__(self, other):
"""Overide the sum for a mixture.
Weight, means and inv_covariances are added, det and cst are
set to 0
"""
new_mixture = Mixture()
new_mixture.w = self.w + other.w
new_mixture.mu = self.mu + other.mu
new_mixture.invcov = self.invcov + other.invcov
return new_mixture
def init_from_diag(self, diag_mixture):
"""
:param diag_mixture:
"""
distrib_nb = diag_mixture.w.shape[0]
dim = diag_mixture.mu.shape[1]
self.w = diag_mixture.w
self.cst = diag_mixture.cst
self.det = diag_mixture.det
self.mu = diag_mixture.mu
self.invcov = numpy.empty((distrib_nb, dim, dim))
self.invchol = numpy.empty((distrib_nb, dim, dim))
for gg in range(distrib_nb):
self.invcov[gg] = numpy.diag(diag_mixture.invcov[gg, :])
self.invchol[gg] = numpy.linalg.cholesky(self.invcov[gg])
self.cov_var_ctl = numpy.diag(diag_mixture.cov_var_ctl)
self.name = diag_mixture.name
self.A = numpy.zeros(self.cst.shape) # we keep zero here as it is not used for full covariance distributions
def _serialize(self):
"""
Serialization is necessary to share the memomry when running multiprocesses
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
sh = self.w.shape
tmp = multiprocessing.Array(ctypes.c_double, self.w.size)
self.w = numpy.ctypeslib.as_array(tmp.get_obj())
self.w = self.w.reshape(sh)
sh = self.mu.shape
tmp = multiprocessing.Array(ctypes.c_double, self.mu.size)
self.mu = numpy.ctypeslib.as_array(tmp.get_obj())
self.mu = self.mu.reshape(sh)
sh = self.invcov.shape
tmp = multiprocessing.Array(ctypes.c_double, self.invcov.size)
self.invcov = numpy.ctypeslib.as_array(tmp.get_obj())
self.invcov = self.invcov.reshape(sh)
sh = self.cov_var_ctl.shape
tmp = multiprocessing.Array(ctypes.c_double, self.cov_var_ctl.size)
self.cov_var_ctl = numpy.ctypeslib.as_array(tmp.get_obj())
self.cov_var_ctl = self.cov_var_ctl.reshape(sh)
sh = self.cst.shape
tmp = multiprocessing.Array(ctypes.c_double, self.cst.size)
self.cst = numpy.ctypeslib.as_array(tmp.get_obj())
self.cst = self.cst.reshape(sh)
sh = self.det.shape
tmp = multiprocessing.Array(ctypes.c_double, self.det.size)
self.det = numpy.ctypeslib.as_array(tmp.get_obj())
self.det = self.det.reshape(sh)
def get_distrib_nb(self):
"""
Return the number of Gaussian distributions in the mixture
:return: then number of distributions
"""
return self.w.shape[0]
def read(self, mixture_file_name, prefix=''):
"""Read a Mixture in hdf5 format
:param mixture_file_name: name of the file to read from
:param prefix:
"""
with h5py.File(mixture_file_name, 'r') as f:
self.w = f.get(prefix+'w').value
self.w.resize(numpy.max(self.w.shape))
self.mu = f.get(prefix+'mu').value
self.invcov = f.get(prefix+'invcov').value
self.invchol = f.get(prefix+'invchol').value
self.cov_var_ctl = f.get(prefix+'cov_var_ctl').value
self.cst = f.get(prefix+'cst').value
self.det = f.get(prefix+'det').value
self.A = f.get(prefix+'a').value
def write_alize(self, mixture_file_name):
"""Save a mixture in alize raw format
:param mixture_file_name: name of the file to write in
"""
with open(mixture_file_name, 'wb') as of:
# write the number of distributions per state
of.write(struct.pack("<I", self.distrib_nb()))
# Write the dimension of the features
of.write(struct.pack("<I", self.dim()))
# Weights
of.write(struct.pack("<" + "d" * self.w.shape[0], *self.w))
# For each distribution
for d in range(self.distrib_nb()):
# Write the constant
of.write(struct.pack("<d", self.cst[d]))
# Write the determinant
of.write(struct.pack("<d", self.det[d]))
# write a meaningless char for compatibility purpose
of.write(struct.pack("<c", bytes(1)))
# Covariance
of.write(
struct.pack("<" + "d" * self.dim(), *self.invcov[d, :]))
# Means
of.write(struct.pack("<" + "d" * self.dim(), *self.mu[d, :]))
def write(self, mixture_file_name, prefix='', mode='w'):
"""Save a Mixture in hdf5 format
:param mixture_file_name: the name of the file to write in
:param prefix: prefix of the group in the HDF5 file
:param mode: mode of the opening, default is "w"
"""
f = h5py.File(mixture_file_name, mode)
f.create_dataset(prefix+'w', self.w.shape, "d", self.w,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'mu', self.mu.shape, "d", self.mu,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'invcov', self.invcov.shape, "d", self.invcov,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'invchol', self.invchol.shape, "d", self.invchol,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'cov_var_ctl', self.cov_var_ctl.shape, "d",
self.cov_var_ctl,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'cst', self.cst.shape, "d", self.cst,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'det', self.det.shape, "d", self.det,
compression="gzip",
fletcher32=True)
f.create_dataset(prefix+'a', self.A.shape, "d", self.A,
compression="gzip",
fletcher32=True)
f.close()
def distrib_nb(self):
"""Return the number of distribution of the Mixture
:return: the number of distribution in the Mixture
"""
return self.w.shape[0]
def dim(self):
"""Return the dimension of distributions of the Mixture
:return: an integer, size of the acoustic vectors
"""
return self.mu.shape[1]
def sv_size(self):
"""Return the dimension of the super-vector
:return: an integer, size of the mean super-vector
"""
return self.mu.shape[1] * self.w.shape[0]
def _compute_all(self):
"""Compute determinant and constant values for each distribution"""
if self.invcov.ndim == 2: # for Diagonal covariance only
self.det = 1.0 / numpy.prod(self.invcov, axis=1)
elif self.invcov.ndim == 3: # For full covariance dstributions
for gg in range(self.mu.shape[0]):
self.det[gg] = 1./numpy.linalg.det(self.invcov[gg])
self.invchol[gg] = numpy.linalg.cholesky(self.invcov[gg])
self.cst = 1.0 / (numpy.sqrt(self.det) * (2.0 * numpy.pi) ** (self.dim() / 2.0))
if self.invcov.ndim == 2:
self.A = ( | numpy.square(self.mu) | numpy.square |
from __future__ import print_function
import os, json, random, sys, math, torch, copy
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
from sklearn.metrics import average_precision_score, accuracy_score, confusion_matrix
from datasets import get_dataloader, get_num_classes, get_class_names
from models import get_model
from base_trainer import BaseTrainer
from functools import partial
from opts import get_arguments
from core.config import cfg, cfg_from_file, cfg_from_list
from datasets.utils import Colorize
from losses import get_criterion, mask_loss_ce
from utils.timer import Timer
from utils.stat_manager import StatManager
from torchvision.utils import save_image as sv
from torchvision import transforms
import matplotlib.cm as mpl_color_map
from PIL import Image, ImagePalette
# specific to pytorch-v1 cuda-9.0
# see: https://github.com/pytorch/pytorch/issues/15054#issuecomment-450191923
# and: https://github.com/pytorch/pytorch/issues/14456
torch.backends.cudnn.benchmark = True
#torch.backends.cudnn.deterministic = True
DEBUG = False
def rescale_as(x, y, mode="bilinear", align_corners=True):
h, w = y.size()[2:]
x = F.interpolate(x, size=[h, w], mode=mode, align_corners=align_corners)
return x
class DecTrainer(BaseTrainer):
def __init__(self, args, **kwargs):
super(DecTrainer, self).__init__(args, **kwargs)
# dataloader
self.trainloader = get_dataloader(args, cfg, 'train')
# self.trainloader_val = get_dataloader(args, cfg, 'train_voc')
self.valloader = get_dataloader(args, cfg, 'val')
self.denorm = self.trainloader.dataset.denorm
self.use_triplet = args.use_triplet
self.loss_3d = args.loss_3d
self.normalize_feature = args.normalize_feature
self.nclass = get_num_classes(args)
self.classNames = get_class_names(args)
assert self.nclass == len(self.classNames) - 1
self.classIndex = {}
for i, cname in enumerate(self.classNames):
self.classIndex[cname] = i
# model
self.enc = get_model(cfg.NET, num_classes=self.nclass)
self.criterion_cls = get_criterion(cfg.NET.LOSS)
# optimizer using different LR
enc_params = self.enc.parameter_groups(cfg.NET.LR, cfg.NET.WEIGHT_DECAY)
self.optim_enc = self.get_optim(enc_params, cfg.NET)
# checkpoint management
self._define_checkpoint('enc', self.enc, self.optim_enc)
self._load_checkpoint(args.resume)
self.fixed_batch = None
self.fixed_batch_path = args.fixed_batch_path
if os.path.isfile(self.fixed_batch_path):
print("Loading fixed batch from {}".format(self.fixed_batch_path))
self.fixed_batch = torch.load(self.fixed_batch_path)
# using cuda
if cfg.NUM_GPUS != 0:
self.enc = nn.DataParallel(self.enc)
self.criterion_cls = nn.DataParallel(self.criterion_cls)
self.enc = self.enc.cuda()
self.criterion_cls = self.criterion_cls.cuda()
# CHANGE: visual
self.visual_times = 0
self.dataset = args.dataset.lower()
def step(self, epoch, image, gt_labels, train=False, visualise=False, save_image=False, info=None):
PRETRAIN = epoch < (11 if DEBUG else cfg.TRAIN.PRETRAIN)
if self.dataset == "wikiscenes_corr":
corr = image["corr"]
image = image["image"]
# denorm image
image_raw = self.denorm(image.clone())
# classification
cls_out, cls_fg, masks, mask_logits, pseudo_gt, loss_mask = self.enc(image, image_raw, gt_labels)
# classification loss
loss_cls = self.criterion_cls(cls_out, gt_labels).mean()
# keep track of all losses for logging
losses = {"loss_cls": loss_cls.item()}
losses["loss_fg"] = cls_fg.mean().item()
loss_3d = 0
mean_3d_loss = 0
kp_number = 0
norm = 0
if self.dataset == "wikiscenes_corr" and train:
# compute 3D consistency loss
feature = masks["feature"]
# here we normalize the out_feature to make sure it doesn't optimize by scaling the feature vector
if self.normalize_feature:
feature = feature / feature.norm(dim=1, keepdim=True)
b, c, h, w = feature.shape
feature = feature.reshape(2, b // 2, c, h, w)
assert h == w, "not square"
for i in range(b // 2):
if not corr[i]:
continue
# k * 4
coord = torch.tensor(corr[i])
if cfg.NUM_GPUS != 0:
coord = coord.cuda()
# reshape, permute to allow grid_sample, [N, Hout, Wout, 2]
coord = coord.reshape(1, -1, 2, 2).permute(2, 0, 1, 3).contiguous()
if self.use_triplet:
# add a fake kp
coord = torch.cat([coord, coord[1:2]])
# add a random shift in [0.25, 0.75], fmod it
# so the selected fake keypoint is far enough from the correct corresponding one
coord[2,0,:,:] += (torch.randn(coord.shape[2], 2) / 2 - 0.25).cuda()
coord = coord.fmod(1.0)
# modify feature so that it has the same batch size as coord
modified_feature = torch.cat([feature, feature[1:]])
else:
modified_feature = feature
# change range to [-1, 1]
coord = coord * 2 - 1
keypoints = F.grid_sample(modified_feature[:,i,:,:], coord)
loss_func = nn.MSELoss()
with torch.no_grad():
mean_3d_loss += loss_func(feature[:,0,:,:], feature[:,1,:,:])
norm += torch.norm(feature).item()
if self.use_triplet:
distance_p = (keypoints[0] - keypoints[1]).norm(dim=0)
distance_n = (keypoints[0] - keypoints[2]).norm(dim=0)
loss_3d += nn.ReLU()(args.triplet_margin + distance_p - distance_n).mean()
else:
loss_3d += loss_func(keypoints[0], keypoints[1])
kp_number += coord.shape[2]
losses["loss_3d"] = loss_3d
losses["mean_loss_3d"] = mean_3d_loss
losses["feature_norm"] = norm
losses["kp number"] = kp_number
loss = loss_cls.clone()
if "dec" in masks:
loss_mask = loss_mask.mean()
# (CHANGE: disable mask loss, only cls loss)
if not PRETRAIN:
loss += cfg.NET.MASK_LOSS_BCE * loss_mask
assert not "pseudo" in masks
masks["pseudo"] = pseudo_gt
losses["loss_mask"] = loss_mask.item()
# add 3d consistency loss
if self.dataset == "wikiscenes_corr" and train:
loss += losses["loss_3d"] * (self.loss_3d / cfg.TRAIN.BATCH_SIZE)
losses["loss"] = loss.item()
if train:
self.optim_enc.zero_grad()
loss.backward()
self.optim_enc.step()
for mask_key, mask_val in masks.items():
masks[mask_key] = masks[mask_key].detach()
mask_logits = mask_logits.detach()
if visualise:
self._visualise(epoch, image, masks, mask_logits, cls_out, gt_labels, save_image, info)
# make sure to cut the return values from graph
return losses, cls_out.detach(), masks, mask_logits
def train_epoch(self, epoch):
self.enc.train()
stat = StatManager()
stat.add_val("loss")
stat.add_val("loss_cls")
stat.add_val("loss_fg")
stat.add_val("loss_bce")
# adding stats for classes
timer = Timer("New Epoch: ")
# train_step = partial(self.step, train=True, visualise=False)
# CHANGE: get result, not train
train_step = partial(self.step, train=True, visualise=False)
preds_all = list()
targets_all = list()
for i, (image, gt_labels, _) in enumerate(self.trainloader):
if self.dataset == "wikiscenes_corr":
corr = image['corr']
for i in range(len(corr)):
corr[i] = json.loads(corr[i])
image = torch.cat([image['1'], image['2']], 0)
image_corr = {"image": image, "corr": corr}
gt_labels = torch.cat(gt_labels, 0)
losses, cls_out, _, _ = train_step(epoch, image_corr, gt_labels)
else:
losses, cls_out, _, _ = train_step(epoch, image, gt_labels)
cls_sigmoid = torch.sigmoid(cls_out.cpu()).numpy()
preds_all.append(cls_sigmoid)
targets_all.append(gt_labels.cpu().numpy())
if self.fixed_batch is None or "points" not in self.fixed_batch:
self.fixed_batch = {}
self.fixed_batch["image"] = image.clone()
self.fixed_batch["labels"] = gt_labels.clone()
random_points = list()
for i in range(cfg.TRAIN.BATCH_SIZE):
# 3 points per image in a batch
random_points.append([{"rx": random.random(), "ry": random.random()} for i in range(3)])
self.fixed_batch["points"] = random_points
torch.save(self.fixed_batch, self.fixed_batch_path)
for loss_key, loss_val in losses.items():
stat.update_stats(loss_key, loss_val)
# intermediate logging
if i % 10 == 0:
msg = "Loss [{:04d}]: ".format(i)
for loss_key, loss_val in losses.items():
msg += "{}: {:.4f} | ".format(loss_key, loss_val)
msg += " | Im/Sec: {:.1f}".format(i * cfg.TRAIN.BATCH_SIZE / timer.get_stage_elapsed())
print(msg)
sys.stdout.flush()
del image, gt_labels
if DEBUG and i > 0:
break
def publish_loss(stats, name, t, prefix='data/'):
print("{}: {:4.3f}".format(name, stats.summarize_key(name)))
self.writer.add_scalar(prefix + name, stats.summarize_key(name), t)
for stat_key in stat.vals.keys():
publish_loss(stat, stat_key, epoch)
# plotting learning rate
for ii, l in enumerate(self.optim_enc.param_groups):
print("Learning rate [{}]: {:4.3e}".format(ii, l['lr']))
self.writer.add_scalar('lr/enc_group_%02d' % ii, l['lr'], epoch)
def apply_colormap_on_image(org_im, activation, colormap_name):
"""
Apply heatmap on image
Args:
org_img (PIL img): Original image
activation_map (numpy arr): Activation map (grayscale) 0-255
colormap_name (str): Name of the colormap
"""
# Get colormap
color_map = mpl_color_map.get_cmap(colormap_name)
no_trans_heatmap = color_map(activation)
# Change alpha channel in colormap to make sure original image is displayed
heatmap = copy.copy(no_trans_heatmap)
heatmap[:, :, 3] = 0.4
heatmap = Image.fromarray((heatmap*255).astype(np.uint8)).resize(org_im.shape[1:], Image.ANTIALIAS)
org_im = transforms.ToPILImage()(org_im).convert("RGBA")
no_trans_heatmap = Image.fromarray((no_trans_heatmap*255).astype(np.uint8))
# Apply heatmap on iamge
heatmap_on_image = Image.new("RGBA", org_im.size)
heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im)
heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)
return heatmap_on_image
# self.writer.add_scalar('lr/bg_baseline', self.enc.module.mean.item(), epoch)
with torch.no_grad():
# the second parameter is not used
image_raw = self.denorm(self.fixed_batch["image"].clone())
self.enc.eval()
_, _, masks, _, _, _ = self.enc(self.fixed_batch["image"], image_raw, self.fixed_batch["labels"])
feature = masks["feature"].cpu()
_, _, w, h = feature.shape
colormaps = list()
for i in range(cfg.TRAIN.BATCH_SIZE):
raw = [image_raw[k] for k in [i, i+cfg.TRAIN.BATCH_SIZE]]
for j in range(3):
x = int(self.fixed_batch["points"][i][j]["rx"] * cfg.DATASET.CROP_SIZE)
y = int(self.fixed_batch["points"][i][j]["ry"] * cfg.DATASET.CROP_SIZE)
fx = int(self.fixed_batch["points"][i][j]["rx"] * w)
fy = int(self.fixed_batch["points"][i][j]["ry"] * h)
selected_feature = feature[i][:,fy, fx]
heat = [torch.norm((feature[k] - selected_feature[:,None,None]), dim=0) for k in [i, i+cfg.TRAIN.BATCH_SIZE]]
# normalize separately
min_ = torch.min(heat[0].min(), heat[1].min())
range_ = torch.max(heat[0].max(), heat[1].max()) - min_
heat = [(heat[k] - min_) / range_ for k in [0,1]]
# [0,1] -> [0,1], 0 -> 1, 1 -> 0
heat = [1 - ((heat[k] * 2 - 1) * (heat[k] * 2 - 1) * (heat[k] * 2 - 1) / 2 + 0.5) for k in [0,1]]
# put color
colormap = [apply_colormap_on_image(raw[k], heat[k], 'jet') for k in [0,1]]
# draw a cross
green = (0, 255, 0)
mark_size = 5
for k in range(x - mark_size, x + 1 + mark_size):
if 0 <= k < colormap[0].size[0]:
colormap[0].putpixel((k, y), green)
for k in range(y - mark_size, y + 1 + mark_size):
if 0 <= k < colormap[0].size[1]:
colormap[0].putpixel((x, k), green)
colormap = [transforms.ToTensor()(colormap[k]) for k in [0,1]]
colormaps.append(colormap)
self.write_image(colormaps, epoch)
self.count_acc(targets_all, preds_all, self.writer, epoch)
# visualising
# self.enc.eval()
# with torch.no_grad():
# self.step(epoch, self.fixed_batch["image"], \
# self.fixed_batch["labels"], \
# train=False, visualise=True)
def _mask_rgb(self, masks, image_norm):
# visualising masks
masks_conf, masks_idx = torch.max(masks, 1)
masks_conf = masks_conf - F.relu(masks_conf - 1, 0)
masks_idx_rgb = self._apply_cmap(masks_idx.cpu(), masks_conf.cpu())
return 0.3 * image_norm + 0.7 * masks_idx_rgb
def _init_norm(self):
self.trainloader.dataset.set_norm(self.enc.normalize)
self.valloader.dataset.set_norm(self.enc.normalize)
# self.trainloader_val.dataset.set_norm(self.enc.normalize)
def _apply_cmap(self, mask_idx, mask_conf):
palette = self.trainloader.dataset.get_palette()
masks = []
col = Colorize()
mask_conf = mask_conf.float() / 255.0
for mask, conf in zip(mask_idx.split(1), mask_conf.split(1)):
m = col(mask).float()
m = m * conf
masks.append(m[None, ...])
return torch.cat(masks, 0)
def validation(self, epoch, writer, loader, checkpoint=False):
stat = StatManager()
# Fast test during the training
def eval_batch(image, gt_labels, info):
# do not save the images to save time
losses, cls, masks, mask_logits = \
self.step(epoch, image, gt_labels, train=False, visualise=False, save_image=True, info=info)
for loss_key, loss_val in losses.items():
stat.update_stats(loss_key, loss_val)
return cls.cpu(), masks, mask_logits.cpu()
self.enc.eval()
# class ground truth
targets_all = []
# class predictions
preds_all = []
def add_stats(means, stds, x):
means.append(x.mean())
stds.append(x.std())
for n, (image, gt_labels, info) in enumerate(loader):
if self.dataset == "wikiscenes_corr":
# info = info[0] + info[1]
info = info[0]
corr = image['corr']
for i in range(len(corr)):
corr[i] = json.loads(corr[i])
# image = torch.cat([image['1'], image['2']], 0)
# not validate the random selected ones
image = image['1']
image_corr = {"image": image, "corr": corr}
# gt_labels = torch.cat(gt_labels, 0)
gt_labels = gt_labels[0]
with torch.no_grad():
cls_raw, masks_all, mask_logits = eval_batch(
image_corr if self.dataset == "wikiscenes_corr" else image,
gt_labels, info)
cls_sigmoid = torch.sigmoid(cls_raw).numpy()
preds_all.append(cls_sigmoid)
targets_all.append(gt_labels.cpu().numpy())
self.count_acc(targets_all, preds_all, writer, epoch)
# total classification loss
for stat_key in stat.vals.keys():
writer.add_scalar('all/{}'.format(stat_key), stat.summarize_key(stat_key), epoch)
if checkpoint and epoch >= cfg.TRAIN.PRETRAIN:
# we will use mAP - mask_loss as our proxy score
# to save the best checkpoint so far
proxy_score = 1 - stat.summarize_key("loss")
writer.add_scalar('all/checkpoint_score', proxy_score, epoch)
self.checkpoint_best(proxy_score, epoch)
def count_acc(self, targets_all, preds_all, writer, epoch):
#
# classification
#
targets_stacked = np.vstack(targets_all)
preds_stacked = np.vstack(preds_all)
aps = average_precision_score(targets_stacked, preds_stacked, average=None)
# y_true = targets_stacked.argmax(1)
# y_pred = preds_stacked.argmax(1)
# acc = accuracy_score(y_true, y_pred)
# per class accuracy
# give up accuracy since computationally intensive
# cm = confusion_matrix(y_true, y_pred)
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# cm = cm.diagonal()
# skip BG AP
offset = self.nclass - aps.size
assert offset == 1, 'Class number mismatch'
classNames = self.classNames[offset:-1]
for ni, className in enumerate(classNames):
writer.add_scalar('%02d_%s/AP' % (ni + offset, className), aps[ni], epoch)
# writer.add_scalar('%02d_%s/acc' % (ni + offset, className), cm[ni], epoch)
print("AP_{}: {:4.3f}".format(className, aps[ni]))
meanAP = | np.mean(aps) | numpy.mean |
# Copyright (c) 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import csv
import math
import logging
import random
import yaml
import paddle
import numpy as np
from PIL import Image
import pycocotools.coco as coco
from autoshape.utils.image import flip, color_aug
from autoshape.utils.image import get_affine_transform, affine_transform
from autoshape.utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from autoshape.utils.image import draw_dense_reg
from autoshape.cvlibs import manager
from autoshape.transforms import Compose
@manager.DATASETS.add_component
class Kitti_dataset(paddle.io.Dataset):
"""Parsing KITTI format dataset
Args:
Dataset (class):
"""
def __init__(self, opt, split):
super().__init__()
self.num_class = opt.num_class
self.num_keypoints = opt.num_keypoints
self.max_objs = opt.max_objs
self.data_dir = os.path.join(opt.data_dir, 'kitti')
self.img_dir = os.path.join(self.data_dir, 'images')
self.annot_path = os.path.join(self.data_dir, 'annotations', 'kitti_{}_{}.json').format(split, self.num_keypoints)
self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
self.alpha_in_degree = False
print('==> initializing kitti{} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
image_ids = self.coco.getImgIds()
if 'train' in split:
self.images = []
for img_id in image_ids:
idxs = self.coco.getAnnIds(imgIds=[img_id])
if len(idxs) > 0:
self.images.append(img_id)
else:
self.images = image_ids
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _convert_alpha(self, alpha):
return math.radians(alpha + 45) if self.alpha_in_degree else alpha
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
# if 'train' in self.split:
# if not self.opt.not_rand_crop:
# s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
# w_border = self._get_border(128, img.shape[1])
# h_border = self._get_border(128, img.shape[0])
# c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
# c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
# else:
# sf = self.opt.scale
# cf = self.opt.shift
# c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
# c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
# s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
trans_input = get_affine_transform(c, s, rot, [self.opt.input_w, self.opt.input_h])
inp = cv2.warpAffine(img, trans_input, (self.opt.input_w, self.opt.input_h), flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
# if 'train' in self.split and not self.opt.no_color_aug:
# color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
# print(inp.shape)
# cv2.imshow('t', inp)
# cv2.imshow('ori', img)
# cv2.waitKey()
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
num_keypoints = self.num_keypoints
trans_output = get_affine_transform(c, s, 0, [self.opt.output_w, self.opt.output_h])
trans_output_inv = get_affine_transform(c, s, 0, [self.opt.output_w, self.opt.output_h], inv=1)
# object class and position in heatmap
hm = | np.zeros((self.num_class, self.opt.output_h, self.opt.output_w), dtype=np.float32) | numpy.zeros |
from functools import lru_cache
import numpy as np
from lie_learn.representations.SO3.irrep_bases import change_of_basis_matrix
from lie_learn.representations.SO3.pinchon_hoggan.pinchon_hoggan_dense import rot_mat, Jd
from lie_learn.representations.SO3.wigner_d import wigner_d_matrix, wigner_D_matrix
import lie_learn.spaces.S3 as S3
from lie_learn.representations.SO3.indexing import flat_ind_zp_so3, flat_ind_so3
from .FFTBase import FFTBase
from scipy.fftpack import fft2, ifft2, fftshift
# TODO:
# Write testing code for these FFTs
# Write fast code for the real, quantum-normalized, centered / block-diagonal bases.
# The real Wigner-d functions d^l_mn are identically 0 whenever either (m < 0 and n >= 0) or (m >= 0 and n < 0),
# so we can save work in the Wigner-d transform
class SO3_FT_Naive(FFTBase):
"""
The most naive implementation of the discrete SO(3) Fourier transform:
explicitly construct the Fourier matrix F and multiply by it to perform the Fourier transform.
We use the following convention:
Let D^l_mn(g) (the Wigner D function) be normalized so that it is unitary.
FFT(f)^l_mn = int_SO(3) f(g) \conj(D^l_mn(g)) dg
where dg is the normalized Haar measure on SO(3).
IFFT(\hat(f))(g) = \sum_{l=0}^L_max (2l + 1) \sum_{m=-l}^l \sum_{n=-l}^l \hat(f)^l_mn D^l_mn(g)
Under this convention, where (2l+1) appears in the IFFT, we have:
- The Fourier transform of D^l_mn is a one-hot vector where FFT(D^l_mn)^l_mn = 1 / (2l + 1),
because 1 / (2l + 1) is the squared norm of D^l_mn.
- The convolution theorem is
FFT(f * psi) = FFT(f) FFT(psi)^{*T},
i.e. the second argument is conjugate-transposed, and there is no normalization constant required.
"""
def __init__(self, L_max, field='complex', normalization='quantum', order='centered', condon_shortley='cs'):
super().__init__()
# TODO allow user to specify the grid (now using SOFT implicitly)
# Explicitly construct the Wigner-D matrices evaluated at each point in a grid in SO(3)
self.D = []
b = L_max + 1
for l in range(b):
self.D.append(np.zeros((2 * b, 2 * b, 2 * b, 2 * l + 1, 2 * l + 1),
dtype=complex if field == 'complex' else float))
for j1 in range(2 * b):
alpha = 2 * np.pi * j1 / (2. * b)
for k in range(2 * b):
beta = np.pi * (2 * k + 1) / (4. * b)
for j2 in range(2 * b):
gamma = 2 * np.pi * j2 / (2. * b)
self.D[-1][j1, k, j2, :, :] = wigner_D_matrix(l, alpha, beta, gamma,
field, normalization, order, condon_shortley)
# Compute quadrature weights
self.w = S3.quadrature_weights(b=b, grid_type='SOFT')
# Stack D into a single Fourier matrix
# The first axis corresponds to the spatial samples.
# The spatial grid has shape (2b, 2b, 2b), so this axis has length (2b)^3.
# The second axis of this matrix has length sum_{l=0}^L_max (2l+1)^2,
# which corresponds to all the spectral coefficients flattened into a vector.
# (normally these are stored as matrices D^l of shape (2l+1)x(2l+1))
self.F = np.hstack([self.D[l].reshape((2 * b) ** 3, (2 * l + 1) ** 2) for l in range(b)])
# For the IFFT / synthesis transform, we need to weight the order-l Fourier coefficients by (2l + 1)
# Here we precompute these coefficients.
ls = [[ls] * (2 * ls + 1) ** 2 for ls in range(b)]
ls = np.array([ll for sublist in ls for ll in sublist]) # (0,) + 9 * (1,) + 25 * (2,), ...
self.l_weights = 2 * ls + 1
def analyze(self, f):
f_hat = []
for l in range(f.shape[0] // 2):
f_hat.append(np.einsum('ijkmn,ijk->mn', self.D[l], f * self.w[None, :, None]))
return f_hat
def analyze_by_matmul(self, f):
f = f * self.w[None, :, None]
f = f.flatten()
return self.F.T.conj().dot(f)
def synthesize(self, f_hat):
b = len(self.D)
f = np.zeros((2 * b, 2 * b, 2 * b), dtype=self.D[0].dtype)
for l in range(b):
f += np.einsum('ijkmn,mn->ijk', self.D[l], f_hat[l] * (2 * l + 1))
return f
def synthesize_by_matmul(self, f_hat):
return self.F.dot(f_hat * self.l_weights)
class SO3_FFT_SemiNaive_Complex(FFTBase):
def __init__(self, L_max, d=None, w=None, L2_normalized=True,
field='complex', normalization='quantum', order='centered', condon_shortley='cs'):
super().__init__()
if d is None:
self.d = setup_d_transform(
b=L_max + 1, L2_normalized=L2_normalized,
field=field, normalization=normalization,
order=order, condon_shortley=condon_shortley)
else:
self.d = d
if w is None:
self.w = S3.quadrature_weights(b=L_max + 1)
else:
self.w = w
self.wd = weigh_wigner_d(self.d, self.w)
def analyze(self, f):
return SO3_FFT_analyze(f) # , self.wd)
def synthesize(self, f_hat):
"""
Perform the inverse (spectral to spatial) SO(3) Fourier transform.
:param f_hat: a list of matrices of with shapes [1x1, 3x3, 5x5, ..., 2 L_max + 1 x 2 L_max + 1]
"""
return SO3_FFT_synthesize(f_hat) # , self.d)
class SO3_FFT_NaiveReal(FFTBase):
def __init__(self, L_max, d=None, L2_normalized=True):
self.L_max = L_max
self.complex_fft = SO3_FFT_SemiNaive_Complex(L_max=L_max, d=d, L2_normalized=L2_normalized)
# Compute change of basis function:
self.c2b = [change_of_basis_matrix(l,
frm=('complex', 'seismology', 'centered', 'cs'),
to=('real', 'quantum', 'centered', 'cs'))
for l in range(L_max + 1)]
def analyze(self, f):
raise NotImplementedError('SO3 analyze function not implemented yet')
def synthesize(self, f_hat):
"""
"""
# Change basis on f_hat
# We have R = B * C * B.conj().T, where
# B is the real-to-complex change of basis, C are the complex Wigner D functions,
# and R are the real Wigner D functions.
# We want to compute Tr(eta^T R) = Tr( (B.T * eta * B.conj())^T C)
f_hat_complex = [self.c2b[l].T.dot(f_hat[l]).dot(self.c2b[l].conj()) for l in range(self.L_max + 1)]
f = self.complex_fft.synthesize(f_hat_complex)
return f.real
def synthesize_direct(self, f_hat):
pass
# Synthesize without using complex fft
def SO3_FFT_analyze(f):
"""
Compute the complex SO(3) Fourier transform of f.
The standard way to define the FT is:
\hat{f}^l_mn = (2 J + 1)/(8 pi^2) *
int_0^2pi da int_0^pi db sin(b) int_0^2pi dc f(a,b,c) D^{l*}_mn(a,b,c)
The normalizing constant comes about because:
int_SO(3) D^*(g) D(g) dg = 8 pi^2 / (2 J + 1)
where D is any Wigner D function D^l_mn. Note that the factor 8 pi^2 (the volume of SO(3))
goes away if we integrate with the normalized Haar measure.
This function computes the FT using the normalized D functions:
\tilde{D} = 1/2pi sqrt((2J+1)/2) D
where D are the rotation matrices in the basis of complex, seismology-normalized, centered spherical harmonics.
Hence, this function computes:
\hat{f}^l_mn = \int_SO(3) f(g) \tilde{D}^{l*}_mn(g) dg
So that the FT of f = \tilde{D}^l_mn is 1 at (l,m,n) (and zero elsewhere).
Args:
f: an array of shape (2B, 2B, 2B), where B is the bandwidth.
Returns:
f_hat: the Fourier transform of f. A list of length B,
where entry l contains an 2l+1 by 2l+1 array containing the projections
of f onto matrix elements of the l-th irreducible representation of SO(3).
Main source:
SOFT: SO(3) Fourier Transforms
<NAME> and <NAME>
Further information:
Generalized FFTs-a survey of some recent results
Maslen & Rockmore
Engineering Applications of Noncommutative Harmonic Analysis.
9.5 - Sampling and FFT for SO(3) and SU(2)
<NAME>, <NAME>
"""
assert f.shape[0] == f.shape[1]
assert f.shape[1] == f.shape[2]
assert f.shape[0] % 2 == 0
# First, FFT along the alpha and gamma axes (axis 0 and 2, respectively)
F = fft2(f, axes=(0, 2))
F = fftshift(F, axes=(0, 2))
# Then, perform the Wigner-d transform
return wigner_d_transform_analysis(F)
def SO3_FFT_synthesize(f_hat):
"""
Perform the inverse (spectral to spatial) SO(3) Fourier transform.
:param f_hat: a list of matrices of with shapes [1x1, 3x3, 5x5, ..., 2 L_max + 1 x 2 L_max + 1]
"""
F = wigner_d_transform_synthesis(f_hat)
# The rest of the SO(3) FFT is just a standard torus FFT
F = fftshift(F, axes=(0, 2))
f = ifft2(F, axes=(0, 2))
b = len(f_hat)
return f * (2 * b) ** 2
def SO3_ifft(f_hat):
"""
"""
b = len(f_hat)
d = setup_d_transform(b)
df_hat = [d[l] * f_hat[l][:, None, :] for l in range(len(d))]
# Note: the frequencies where m=-B or n=-B are set to zero,
# because they are not used in the forward transform either
# (the forward transform is up to m=-l, l<B
F = np.zeros((2 * b, 2 * b, 2 * b), dtype=complex)
for l in range(b):
F[b - l:b + l + 1, :, b - l:b + l + 1] += df_hat[l]
F = fftshift(F, axes=(0, 2))
f = ifft2(F, axes=(0, 2))
return f * 2 * (b ** 2) / np.pi
def wigner_d_transform_analysis(f):
"""
The discrete Wigner-d transform [1] is defined as
WdT(s)[l, m, n] = sum_k=0^{2b-1} w_b(k) d^l_mn(beta_k) s_k
where:
- w_b(k) is the k-th quadrature weight for an order b grid,
- d^l_mn is a Wigner-d function,
- beta_k = pi(2k + 1) / 4b
- s is a data vector of length 2b
In practice we want to transform many data vectors at once; we have an input array of shape (2b, 2b, 2b)
[1] SOFT: SO(3) Fourier Transforms
<NAME> and <NAME>
:param F:
:param wd: the weighted Wigner-d functions, as returned by weigh_wigner_d()
:return:
"""
assert f.shape[0] == f.shape[1]
assert f.shape[1] == f.shape[2]
assert f.shape[0] % 2 == 0
b = f.shape[0] // 2 # The bandwidth
f0 = f.shape[0] // 2 # The index of the 0-frequency / DC component
wd = weighted_d(b)
f_hat = [] # To store the result
Z = 2 * np.pi / ((2 * b) ** 2) # Normalizing constant
# NOTE: the factor 1. / (2 (2b)^2) comes from the quadrature integration - see S3.integrate_quad
# Maybe it makes more sense to integrate this factor into the quadrature weights.
# The factor 4 pi is probably related to the normalization of the Haar measure on S^2
# The array F we have computed so far still has shape (2b, 2b, 2b),
# where the axes correspond to (M, beta, M').
# For each l = 0, ..., b-1, select a subarray of shape (2l + 1, 2b, 2l + 1)
f_sub = [f[f0 - l:f0 + l + 1, :, f0 - l:f0 + l + 1] for l in range(b)]
for l in range(b):
# Dot the vectors F_mn and d_mn over the middle axis (beta),
# where -l <= m,n <= l, which corresponds to
# f0 - l <= m,n < f0 + l + 1
# for 0-based indexing and zero-frequency location f0
f_hat.append(
np.einsum('mbn,mbn->mn', wd[l], f_sub[l]) * Z
)
return f_hat
def wigner_d_transform_analysis_vectorized(f, wd_flat, idxs):
""" computes the wigner transform analysis in a vectorized way
returns the flattened blocks of f_hat as a single vector
f: the input signal, shape (2b, 2b, 2b) axes m, beta, n.
wd_flat: the flattened weighted wigner d functions, shape (num_spectral, 2b), axes (l*m*n, beta)
idxs: the array of indices containing all analysis blocks
"""
f_trans = f.transpose([0, 2, 1]) # shape 2b, 2b, 2b, axes m, n, beta
f_trans_flat = f_trans.reshape(-1, f.shape[1]) # shape 4b^2, 2b, axes m*n, beta
f_i = f_trans_flat[idxs] # shape num_spectral, 2b, axes l*m*n, beta
prod = f_i * wd_flat # shape num_spectral, 2b, axes l*m*n, beta
result = prod.sum(axis=1) # shape num_spectral, axes l*m*n
return result
def wigner_d_transform_analysis_vectorized_v2(f, wd_flat_t, idxs):
"""
:param f: the SO(3) signal, shape (2b, 2b, 2b), axes beta, m, n
:param wd_flat: the flattened weighted wigner d functions, shape (2b, num_spectral), axes (beta, l*m*n)
:param idxs:
:return:
"""
fr = f.reshape(f.shape[0], -1) # shape 2b, 4b^2, axes beta, m*n
f_i = fr[..., idxs] # shape 2b, num_spectral, axes beta, l*m*n
prod = f_i * wd_flat_t # shape 2b, num_spectral, axes beta, l*m*n
result = prod.sum(axis=0) # shape num_spectral, axes l*m*n
return result
def wigner_d_transform_synthesis(f_hat):
b = len(f_hat)
d = setup_d_transform(b, L2_normalized=False)
# Perform the brute-force Wigner-d transform
# Note: the frequencies where m=-B or n=-B are set to zero,
# because they are not used in the forward transform either
# (the forward transform is up to m=-l, l<B
df_hat = [d[l] * f_hat[l][:, None, :] for l in range(b)]
F = | np.zeros((2 * b, 2 * b, 2 * b), dtype=complex) | numpy.zeros |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
from p_winds import hydrogen, helium, tools, parker
# HD 209458 b
R_pl = (1.39 * u.jupiterRad).value
M_pl = (0.73 * u.jupiterMass).value
m_dot = (5E10 * u.g / u.s).value
T_0 = (9E3 * u.K).value
h_fraction = 0.90
he_fraction = 1 - h_fraction
he_h_fraction = he_fraction / h_fraction
average_f_ion = 0.0
average_mu = (1 + 4 * he_h_fraction) / (1 + he_h_fraction + average_f_ion)
# In the initial state, the fraction of singlet and triplet helium is 1E-6, and
# the optical depths are null
initial_state = np.array([1.0, 0.0])
r = np.logspace(0, | np.log10(20) | numpy.log10 |
import numpy as np
import matplotlib.pyplot as plt
import sklearn.feature_selection as fs
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import math
def main():
two = np.genfromtxt('data/two.csv')
two = StandardScaler().fit_transform(two)
pca = PCA()
pca.fit(two)
base_corr = np.corrcoef(two.transpose())
print(
"Q1a)\nVariance weights:\nÂ1: {}\nÂ2: {}".format(pca.explained_variance_ratio_[0],
pca.explained_variance_ratio_[1]))
two = pca.transform(two)
pcaed = pca.transform(two)
pcaed_corr = np.corrcoef(pcaed.transpose())
print("Q1b)\nrotated by {}°".format(np.degrees(math.acos(pca.components_[0, 0]))))
print("Q1c)\nOg:\n{}\nPCA:\n{}".format(base_corr, pcaed_corr))
reduced = pcaed[:, 0]
zs = np.zeros((len(reduced)))
x = | np.array([reduced, zs]) | numpy.array |
from numpy import ones
from numpy import array
from numpy import empty
from numpy import hstack
from numpy import random
from numpy.testing import assert_almost_equal
from limix_math.linalg import qs_decomposition
from limix_qep.ep import BinomialEP
from limix_qep.ep import BernoulliEP
from limix_qep.tool.util import create_binomial
def test_binomial_lml():
random.seed(6)
n = 3
M = ones((n, 1)) * 1.
G = array([[1.2, 3.4], [-.1, 1.2], [0.0, .2]])
(Q, S) = qs_decomposition(G)
y = array([1., 0., 1.])
ep1 = BinomialEP(y, 1, M, hstack(Q), empty((n, 0)), hstack(S) + 1.0)
ep1.beta = array([1.])
ep1.genetic_variance = 1.
ep1.environmental_variance = 1e-7
lml1 = ep1.lml()
ep2 = BernoulliEP(y, M, hstack(Q), empty((n, 0)), hstack(S) + 1.0)
ep2.beta = array([1.])
ep2.genetic_variance = 1.
lml2 = ep2.lml()
assert_almost_equal(lml1 - lml2, 0., decimal=5)
def test_binomial_optimize():
seed = 10
nsamples = 200
nfeatures = 1200
ntrials = random.RandomState(seed).randint(1, 1000, nsamples)
M = | ones((nsamples, 1)) | numpy.ones |
import sys
# sys.path.append('../')
sys.path.insert(0, '../')
import unittest
import scripts.sqp_solver.SQPsolver as solver
from scripts.cvxpy_optimizer.solver_cvxpy import ConvexOptimizer
import numpy as np
import yaml
import os
from easydict import EasyDict as edict
from collections import defaultdict
import random
import copy
class Test_sqp_solver(unittest.TestCase):
@classmethod
def setUpClass(cls):
# self.q = np.asarray([0, 0])
# self.P = 2 * np.asarray([[1., 2.], [0., 4.]])
# self.G = np.asarray([[-1.0, 0.0], [0.0, -1.0]])
# self.lbG = np.asarray([0.0, 0.0])
# self.ubG = np.asarray([3.0, 2.0])
# self.A = np.asarray([[2.0, 1.0], [0.5, 1.2]])
# self.b = np.asarray([2.0, 0.3])
# # self.initial_guess = None
# self.initial_guess = [0.5, 0.0]
cls.P = np.array([[2., -4., 0.],
[0., 4., -4.],
[0., 0., 2.]])
# Make symmetric and not indefinite
cls.P = .5 * (cls.P + cls.P.T) + 1e-08 * np.eye(3)
cls.q = | np.array([0., 0., 0.]) | numpy.array |
from typing import Mapping, Optional
from abc import ABC, abstractmethod
import numpy as np
from numba import njit
class Transformation(ABC):
""" Transformation object that will handle coordinate frame changes for datasets """
def __init__(self):
pass
@abstractmethod
def rect_from_lidar(self, lidar_points: np.ndarray, frame_data: Optional[Mapping],
only_forward: bool = False) -> np.ndarray:
""" Get 3D points in ego/rect frame from points in LiDAR coordinates
:param lidar_points: Nx3 points
"""
@abstractmethod
def img_from_tracking(self, track_points: np.ndarray, cam: str,
frame_data: Optional[Mapping]) -> np.ndarray:
""" Get image place coordinates from tracking coordinates i.e. rect KITTI coordinate frame
For KITTI, this would be img_from_rect
For NuScenes, tracking coordinates need to be converted back to NuScenes world coordinates,
then to ego frame, then to cam frame, then projected
"""
@njit
def inverse_rigid_transform(transform):
""" Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1] """
inverse = np.zeros_like(transform) # 3x4
inverse[0:3, 0:3] = transform[0:3, 0:3].T
inverse[0:3, 3] = (-transform[0:3, 0:3].T) @ transform[0:3, 3]
return inverse
@njit
def to_homogeneous(points):
return np.hstack((points, np.ones((points.shape[0], 1))))
def get_rotation_matrix_around_y(t):
""" Rotation about the y-axis. """
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
@njit
def cam_points_to_image_coordinates(img_points):
"""
:param img_points: nx3 3D points in camera frame coordinates
:return: nx2 2D coordinates of points in image coordinates
"""
img_points[:, 0] /= img_points[:, 2]
img_points[:, 1] /= img_points[:, 2]
# img_points = img_points[:, :2] / img_points[:, 2].reshape(-1, 1)
img_plane_points = | np.rint(img_points) | numpy.rint |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import shapely
import shapely.geometry
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_single_bool()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_invert()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
test_SegmentationMapOnImage_bool()
test_SegmentationMapOnImage_get_arr_int()
# test_SegmentationMapOnImage_get_arr_bool()
test_SegmentationMapOnImage_draw()
test_SegmentationMapOnImage_draw_on_image()
test_SegmentationMapOnImage_pad()
test_SegmentationMapOnImage_pad_to_aspect_ratio()
test_SegmentationMapOnImage_scale()
test_SegmentationMapOnImage_to_heatmaps()
test_SegmentationMapOnImage_from_heatmaps()
test_SegmentationMapOnImage_copy()
test_SegmentationMapOnImage_deepcopy()
test_Polygon___init__()
test_Polygon_xx()
test_Polygon_yy()
test_Polygon_xx_int()
test_Polygon_yy_int()
test_Polygon_is_valid()
test_Polygon_area()
test_Polygon_project()
test_Polygon__compute_inside_image_point_mask()
test_Polygon_is_fully_within_image()
test_Polygon_is_partly_within_image()
test_Polygon_is_out_of_image()
test_Polygon_cut_out_of_image()
test_Polygon_clip_out_of_image()
test_Polygon_shift()
test_Polygon_draw_on_image()
test_Polygon_extract_from_image()
test_Polygon_to_shapely_polygon()
test_Polygon_to_bounding_box()
test_Polygon_from_shapely()
test_Polygon_copy()
test_Polygon_deepcopy()
test_Polygon___repr__()
test_Polygon___str__()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) is True
for value in values_false:
assert ia.is_np_array(value) is False
def test_is_single_integer():
assert ia.is_single_integer("A") is False
assert ia.is_single_integer(None) is False
assert ia.is_single_integer(1.2) is False
assert ia.is_single_integer(1.0) is False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False
assert ia.is_single_integer(1) is True
assert ia.is_single_integer(1234) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True
def test_is_single_float():
assert ia.is_single_float("A") is False
assert ia.is_single_float(None) is False
assert ia.is_single_float(1.2) is True
assert ia.is_single_float(1.0) is True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True
assert ia.is_single_float(1) is False
assert ia.is_single_float(1234) is False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) is True
for value in values_false:
assert ia.is_single_number(value) is False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) is True, value
for value in values_false:
assert ia.is_iterable(value) is False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0],
_Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) is True
for value in values_false:
assert ia.is_string(value) is False
def test_is_single_bool():
class _Dummy(object):
pass
values_true = [False, True]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)]
for value in values_true:
assert ia.is_single_bool(value) is True
for value in values_false:
assert ia.is_single_bool(value) is False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) is True
for value in values_false:
assert ia.is_integer_array(value) is False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) is True
for value in values_false:
assert ia.is_float_array(value) is False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = | np.random.RandomState(10017) | numpy.random.RandomState |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.